diff --git a/build2/qt/HGImgFmt/HGImgFmt.pro b/build2/qt/HGImgFmt/HGImgFmt.pro index d0af4edb..e68edca8 100644 --- a/build2/qt/HGImgFmt/HGImgFmt.pro +++ b/build2/qt/HGImgFmt/HGImgFmt.pro @@ -132,6 +132,10 @@ unix { LIBS += $$PWD/../../../third_party/libzip/$${MY_OS}/$${MY_ARCH}/lib/libzip.a LIBS += $$PWD/../../../third_party/tinyxml2/$${MY_OS}/$${MY_ARCH}/lib/libtinyxml2.a LIBS += $$PWD/../../../third_party/zlib/$${MY_OS}/$${MY_ARCH}/lib/libz.a + contains(QT_ARCH, sw_64) { + LIBS += $$PWD/../../../third_party/openssl/$${MY_OS}/$${MY_ARCH}/lib/libcrypto.a + LIBS += $$PWD/../../../third_party/openssl/$${MY_OS}/$${MY_ARCH}/lib/libssl.a + } } INCLUDEPATH += $$PWD/../../../utility diff --git a/build2/qt/HGVersion/HGVersion.pro b/build2/qt/HGVersion/HGVersion.pro index 96d7d98e..cae9383d 100644 --- a/build2/qt/HGVersion/HGVersion.pro +++ b/build2/qt/HGVersion/HGVersion.pro @@ -92,6 +92,7 @@ unix { CONFIG += unversioned_libname unversioned_soname QMAKE_CXXFLAGS += -fvisibility=hidden + QMAKE_CFLAGS += -fvisibility=hidden QMAKE_LFLAGS += -static-libstdc++ -static-libgcc QMAKE_LFLAGS += -Wl,-rpath,\'\$\$ORIGIN\' -Wl,--exclude-libs,ALL QMAKE_LFLAGS += -z defs -B direct diff --git a/build2/qt/HGWebService/HGWebService.pro b/build2/qt/HGWebService/HGWebService.pro index fa24d844..95e0b5cd 100644 --- a/build2/qt/HGWebService/HGWebService.pro +++ b/build2/qt/HGWebService/HGWebService.pro @@ -111,6 +111,10 @@ unix { LIBS += $$PWD/../../../third_party/libzip/$${MY_OS}/$${MY_ARCH}/lib/libzip.a LIBS += $$PWD/../../../third_party/zlib/$${MY_OS}/$${MY_ARCH}/lib/libz.a LIBS += $$PWD/../../../third_party/libcurl/$${MY_OS}/$${MY_ARCH}/lib/libcurl.a + contains(QT_ARCH, sw_64) { + LIBS += $$PWD/../../../third_party/openssl/$${MY_OS}/$${MY_ARCH}/lib/libcrypto.a + LIBS += $$PWD/../../../third_party/openssl/$${MY_OS}/$${MY_ARCH}/lib/libssl.a + } } INCLUDEPATH += $$PWD/../../../modules diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/allheaders.h b/third_party/leptonica/uos/sw_64/include/leptonica/allheaders.h new file mode 100644 index 00000000..10d67806 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/allheaders.h @@ -0,0 +1,2735 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_ALLHEADERS_H +#define LEPTONICA_ALLHEADERS_H + + +#define LIBLEPT_MAJOR_VERSION 1 +#define LIBLEPT_MINOR_VERSION 74 +#define LIBLEPT_PATCH_VERSION 4 + +#include "alltypes.h" + +#ifndef NO_PROTOS +/* + * These prototypes were autogen'd by xtractprotos, v. 1.5 + */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +LEPT_DLL extern PIX * pixCleanBackgroundToWhite ( PIX *pixs, PIX *pixim, PIX *pixg, l_float32 gamma, l_int32 blackval, l_int32 whiteval ); +LEPT_DLL extern PIX * pixBackgroundNormSimple ( PIX *pixs, PIX *pixim, PIX *pixg ); +LEPT_DLL extern PIX * pixBackgroundNorm ( PIX *pixs, PIX *pixim, PIX *pixg, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, l_int32 bgval, l_int32 smoothx, l_int32 smoothy ); +LEPT_DLL extern PIX * pixBackgroundNormMorph ( PIX *pixs, PIX *pixim, l_int32 reduction, l_int32 size, l_int32 bgval ); +LEPT_DLL extern l_int32 pixBackgroundNormGrayArray ( PIX *pixs, PIX *pixim, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, l_int32 bgval, l_int32 smoothx, l_int32 smoothy, PIX **ppixd ); +LEPT_DLL extern l_int32 pixBackgroundNormRGBArrays ( PIX *pixs, PIX *pixim, PIX *pixg, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, l_int32 bgval, l_int32 smoothx, l_int32 smoothy, PIX **ppixr, PIX **ppixg, PIX **ppixb ); +LEPT_DLL extern l_int32 pixBackgroundNormGrayArrayMorph ( PIX *pixs, PIX *pixim, l_int32 reduction, l_int32 size, l_int32 bgval, PIX **ppixd ); +LEPT_DLL extern l_int32 pixBackgroundNormRGBArraysMorph ( PIX *pixs, PIX *pixim, l_int32 reduction, l_int32 size, l_int32 bgval, PIX **ppixr, PIX **ppixg, PIX **ppixb ); +LEPT_DLL extern l_int32 pixGetBackgroundGrayMap ( PIX *pixs, PIX *pixim, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, PIX **ppixd ); +LEPT_DLL extern l_int32 pixGetBackgroundRGBMap ( PIX *pixs, PIX *pixim, PIX *pixg, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, PIX **ppixmr, PIX **ppixmg, PIX **ppixmb ); +LEPT_DLL extern l_int32 pixGetBackgroundGrayMapMorph ( PIX *pixs, PIX *pixim, l_int32 reduction, l_int32 size, PIX **ppixm ); +LEPT_DLL extern l_int32 pixGetBackgroundRGBMapMorph ( PIX *pixs, PIX *pixim, l_int32 reduction, l_int32 size, PIX **ppixmr, PIX **ppixmg, PIX **ppixmb ); +LEPT_DLL extern l_int32 pixFillMapHoles ( PIX *pix, l_int32 nx, l_int32 ny, l_int32 filltype ); +LEPT_DLL extern PIX * pixExtendByReplication ( PIX *pixs, l_int32 addw, l_int32 addh ); +LEPT_DLL extern l_int32 pixSmoothConnectedRegions ( PIX *pixs, PIX *pixm, l_int32 factor ); +LEPT_DLL extern PIX * pixGetInvBackgroundMap ( PIX *pixs, l_int32 bgval, l_int32 smoothx, l_int32 smoothy ); +LEPT_DLL extern PIX * pixApplyInvBackgroundGrayMap ( PIX *pixs, PIX *pixm, l_int32 sx, l_int32 sy ); +LEPT_DLL extern PIX * pixApplyInvBackgroundRGBMap ( PIX *pixs, PIX *pixmr, PIX *pixmg, PIX *pixmb, l_int32 sx, l_int32 sy ); +LEPT_DLL extern PIX * pixApplyVariableGrayMap ( PIX *pixs, PIX *pixg, l_int32 target ); +LEPT_DLL extern PIX * pixGlobalNormRGB ( PIX *pixd, PIX *pixs, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 mapval ); +LEPT_DLL extern PIX * pixGlobalNormNoSatRGB ( PIX *pixd, PIX *pixs, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 factor, l_float32 rank ); +LEPT_DLL extern l_int32 pixThresholdSpreadNorm ( PIX *pixs, l_int32 filtertype, l_int32 edgethresh, l_int32 smoothx, l_int32 smoothy, l_float32 gamma, l_int32 minval, l_int32 maxval, l_int32 targetthresh, PIX **ppixth, PIX **ppixb, PIX **ppixd ); +LEPT_DLL extern PIX * pixBackgroundNormFlex ( PIX *pixs, l_int32 sx, l_int32 sy, l_int32 smoothx, l_int32 smoothy, l_int32 delta ); +LEPT_DLL extern PIX * pixContrastNorm ( PIX *pixd, PIX *pixs, l_int32 sx, l_int32 sy, l_int32 mindiff, l_int32 smoothx, l_int32 smoothy ); +LEPT_DLL extern l_int32 pixMinMaxTiles ( PIX *pixs, l_int32 sx, l_int32 sy, l_int32 mindiff, l_int32 smoothx, l_int32 smoothy, PIX **ppixmin, PIX **ppixmax ); +LEPT_DLL extern l_int32 pixSetLowContrast ( PIX *pixs1, PIX *pixs2, l_int32 mindiff ); +LEPT_DLL extern PIX * pixLinearTRCTiled ( PIX *pixd, PIX *pixs, l_int32 sx, l_int32 sy, PIX *pixmin, PIX *pixmax ); +LEPT_DLL extern PIX * pixAffineSampledPta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixAffineSampled ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixAffinePta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixAffine ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixAffinePtaColor ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint32 colorval ); +LEPT_DLL extern PIX * pixAffineColor ( PIX *pixs, l_float32 *vc, l_uint32 colorval ); +LEPT_DLL extern PIX * pixAffinePtaGray ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint8 grayval ); +LEPT_DLL extern PIX * pixAffineGray ( PIX *pixs, l_float32 *vc, l_uint8 grayval ); +LEPT_DLL extern PIX * pixAffinePtaWithAlpha ( PIX *pixs, PTA *ptad, PTA *ptas, PIX *pixg, l_float32 fract, l_int32 border ); +LEPT_DLL extern l_int32 getAffineXformCoeffs ( PTA *ptas, PTA *ptad, l_float32 **pvc ); +LEPT_DLL extern l_int32 affineInvertXform ( l_float32 *vc, l_float32 **pvci ); +LEPT_DLL extern l_int32 affineXformSampledPt ( l_float32 *vc, l_int32 x, l_int32 y, l_int32 *pxp, l_int32 *pyp ); +LEPT_DLL extern l_int32 affineXformPt ( l_float32 *vc, l_int32 x, l_int32 y, l_float32 *pxp, l_float32 *pyp ); +LEPT_DLL extern l_int32 linearInterpolatePixelColor ( l_uint32 *datas, l_int32 wpls, l_int32 w, l_int32 h, l_float32 x, l_float32 y, l_uint32 colorval, l_uint32 *pval ); +LEPT_DLL extern l_int32 linearInterpolatePixelGray ( l_uint32 *datas, l_int32 wpls, l_int32 w, l_int32 h, l_float32 x, l_float32 y, l_int32 grayval, l_int32 *pval ); +LEPT_DLL extern l_int32 gaussjordan ( l_float32 **a, l_float32 *b, l_int32 n ); +LEPT_DLL extern PIX * pixAffineSequential ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 bw, l_int32 bh ); +LEPT_DLL extern l_float32 * createMatrix2dTranslate ( l_float32 transx, l_float32 transy ); +LEPT_DLL extern l_float32 * createMatrix2dScale ( l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern l_float32 * createMatrix2dRotate ( l_float32 xc, l_float32 yc, l_float32 angle ); +LEPT_DLL extern PTA * ptaTranslate ( PTA *ptas, l_float32 transx, l_float32 transy ); +LEPT_DLL extern PTA * ptaScale ( PTA *ptas, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PTA * ptaRotate ( PTA *ptas, l_float32 xc, l_float32 yc, l_float32 angle ); +LEPT_DLL extern BOXA * boxaTranslate ( BOXA *boxas, l_float32 transx, l_float32 transy ); +LEPT_DLL extern BOXA * boxaScale ( BOXA *boxas, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern BOXA * boxaRotate ( BOXA *boxas, l_float32 xc, l_float32 yc, l_float32 angle ); +LEPT_DLL extern PTA * ptaAffineTransform ( PTA *ptas, l_float32 *mat ); +LEPT_DLL extern BOXA * boxaAffineTransform ( BOXA *boxas, l_float32 *mat ); +LEPT_DLL extern l_int32 l_productMatVec ( l_float32 *mat, l_float32 *vecs, l_float32 *vecd, l_int32 size ); +LEPT_DLL extern l_int32 l_productMat2 ( l_float32 *mat1, l_float32 *mat2, l_float32 *matd, l_int32 size ); +LEPT_DLL extern l_int32 l_productMat3 ( l_float32 *mat1, l_float32 *mat2, l_float32 *mat3, l_float32 *matd, l_int32 size ); +LEPT_DLL extern l_int32 l_productMat4 ( l_float32 *mat1, l_float32 *mat2, l_float32 *mat3, l_float32 *mat4, l_float32 *matd, l_int32 size ); +LEPT_DLL extern l_int32 l_getDataBit ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataBit ( void *line, l_int32 n ); +LEPT_DLL extern void l_clearDataBit ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataBitVal ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern l_int32 l_getDataDibit ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataDibit ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern void l_clearDataDibit ( void *line, l_int32 n ); +LEPT_DLL extern l_int32 l_getDataQbit ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataQbit ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern void l_clearDataQbit ( void *line, l_int32 n ); +LEPT_DLL extern l_int32 l_getDataByte ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataByte ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern l_int32 l_getDataTwoBytes ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataTwoBytes ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern l_int32 l_getDataFourBytes ( void *line, l_int32 n ); +LEPT_DLL extern void l_setDataFourBytes ( void *line, l_int32 n, l_int32 val ); +LEPT_DLL extern char * barcodeDispatchDecoder ( char *barstr, l_int32 format, l_int32 debugflag ); +LEPT_DLL extern l_int32 barcodeFormatIsSupported ( l_int32 format ); +LEPT_DLL extern NUMA * pixFindBaselines ( PIX *pixs, PTA **ppta, PIXA *pixadb ); +LEPT_DLL extern PIX * pixDeskewLocal ( PIX *pixs, l_int32 nslices, l_int32 redsweep, l_int32 redsearch, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta ); +LEPT_DLL extern l_int32 pixGetLocalSkewTransform ( PIX *pixs, l_int32 nslices, l_int32 redsweep, l_int32 redsearch, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta, PTA **pptas, PTA **pptad ); +LEPT_DLL extern NUMA * pixGetLocalSkewAngles ( PIX *pixs, l_int32 nslices, l_int32 redsweep, l_int32 redsearch, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta, l_float32 *pa, l_float32 *pb, l_int32 debug ); +LEPT_DLL extern L_BBUFFER * bbufferCreate ( l_uint8 *indata, l_int32 nalloc ); +LEPT_DLL extern void bbufferDestroy ( L_BBUFFER **pbb ); +LEPT_DLL extern l_uint8 * bbufferDestroyAndSaveData ( L_BBUFFER **pbb, size_t *pnbytes ); +LEPT_DLL extern l_int32 bbufferRead ( L_BBUFFER *bb, l_uint8 *src, l_int32 nbytes ); +LEPT_DLL extern l_int32 bbufferReadStream ( L_BBUFFER *bb, FILE *fp, l_int32 nbytes ); +LEPT_DLL extern l_int32 bbufferExtendArray ( L_BBUFFER *bb, l_int32 nbytes ); +LEPT_DLL extern l_int32 bbufferWrite ( L_BBUFFER *bb, l_uint8 *dest, size_t nbytes, size_t *pnout ); +LEPT_DLL extern l_int32 bbufferWriteStream ( L_BBUFFER *bb, FILE *fp, size_t nbytes, size_t *pnout ); +LEPT_DLL extern PIX * pixBilateral ( PIX *pixs, l_float32 spatial_stdev, l_float32 range_stdev, l_int32 ncomps, l_int32 reduction ); +LEPT_DLL extern PIX * pixBilateralGray ( PIX *pixs, l_float32 spatial_stdev, l_float32 range_stdev, l_int32 ncomps, l_int32 reduction ); +LEPT_DLL extern PIX * pixBilateralExact ( PIX *pixs, L_KERNEL *spatial_kel, L_KERNEL *range_kel ); +LEPT_DLL extern PIX * pixBilateralGrayExact ( PIX *pixs, L_KERNEL *spatial_kel, L_KERNEL *range_kel ); +LEPT_DLL extern PIX* pixBlockBilateralExact ( PIX *pixs, l_float32 spatial_stdev, l_float32 range_stdev ); +LEPT_DLL extern L_KERNEL * makeRangeKernel ( l_float32 range_stdev ); +LEPT_DLL extern PIX * pixBilinearSampledPta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixBilinearSampled ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixBilinearPta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixBilinear ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixBilinearPtaColor ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint32 colorval ); +LEPT_DLL extern PIX * pixBilinearColor ( PIX *pixs, l_float32 *vc, l_uint32 colorval ); +LEPT_DLL extern PIX * pixBilinearPtaGray ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint8 grayval ); +LEPT_DLL extern PIX * pixBilinearGray ( PIX *pixs, l_float32 *vc, l_uint8 grayval ); +LEPT_DLL extern PIX * pixBilinearPtaWithAlpha ( PIX *pixs, PTA *ptad, PTA *ptas, PIX *pixg, l_float32 fract, l_int32 border ); +LEPT_DLL extern l_int32 getBilinearXformCoeffs ( PTA *ptas, PTA *ptad, l_float32 **pvc ); +LEPT_DLL extern l_int32 bilinearXformSampledPt ( l_float32 *vc, l_int32 x, l_int32 y, l_int32 *pxp, l_int32 *pyp ); +LEPT_DLL extern l_int32 bilinearXformPt ( l_float32 *vc, l_int32 x, l_int32 y, l_float32 *pxp, l_float32 *pyp ); +LEPT_DLL extern l_int32 pixOtsuAdaptiveThreshold ( PIX *pixs, l_int32 sx, l_int32 sy, l_int32 smoothx, l_int32 smoothy, l_float32 scorefract, PIX **ppixth, PIX **ppixd ); +LEPT_DLL extern PIX * pixOtsuThreshOnBackgroundNorm ( PIX *pixs, PIX *pixim, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, l_int32 bgval, l_int32 smoothx, l_int32 smoothy, l_float32 scorefract, l_int32 *pthresh ); +LEPT_DLL extern PIX * pixMaskedThreshOnBackgroundNorm ( PIX *pixs, PIX *pixim, l_int32 sx, l_int32 sy, l_int32 thresh, l_int32 mincount, l_int32 smoothx, l_int32 smoothy, l_float32 scorefract, l_int32 *pthresh ); +LEPT_DLL extern l_int32 pixSauvolaBinarizeTiled ( PIX *pixs, l_int32 whsize, l_float32 factor, l_int32 nx, l_int32 ny, PIX **ppixth, PIX **ppixd ); +LEPT_DLL extern l_int32 pixSauvolaBinarize ( PIX *pixs, l_int32 whsize, l_float32 factor, l_int32 addborder, PIX **ppixm, PIX **ppixsd, PIX **ppixth, PIX **ppixd ); +LEPT_DLL extern PIX * pixSauvolaGetThreshold ( PIX *pixm, PIX *pixms, l_float32 factor, PIX **ppixsd ); +LEPT_DLL extern PIX * pixApplyLocalThreshold ( PIX *pixs, PIX *pixth, l_int32 redfactor ); +LEPT_DLL extern l_int32 pixThresholdByConnComp ( PIX *pixs, PIX *pixm, l_int32 start, l_int32 end, l_int32 incr, l_float32 thresh48, l_float32 threshdiff, l_int32 *pglobthresh, PIX **ppixd, l_int32 debugflag ); +LEPT_DLL extern PIX * pixExpandBinaryReplicate ( PIX *pixs, l_int32 xfact, l_int32 yfact ); +LEPT_DLL extern PIX * pixExpandBinaryPower2 ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern PIX * pixReduceBinary2 ( PIX *pixs, l_uint8 *intab ); +LEPT_DLL extern PIX * pixReduceRankBinaryCascade ( PIX *pixs, l_int32 level1, l_int32 level2, l_int32 level3, l_int32 level4 ); +LEPT_DLL extern PIX * pixReduceRankBinary2 ( PIX *pixs, l_int32 level, l_uint8 *intab ); +LEPT_DLL extern l_uint8 * makeSubsampleTab2x ( void ); +LEPT_DLL extern PIX * pixBlend ( PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract ); +LEPT_DLL extern PIX * pixBlendMask ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract, l_int32 type ); +LEPT_DLL extern PIX * pixBlendGray ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract, l_int32 type, l_int32 transparent, l_uint32 transpix ); +LEPT_DLL extern PIX * pixBlendGrayInverse ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract ); +LEPT_DLL extern PIX * pixBlendColor ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract, l_int32 transparent, l_uint32 transpix ); +LEPT_DLL extern PIX * pixBlendColorByChannel ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 rfract, l_float32 gfract, l_float32 bfract, l_int32 transparent, l_uint32 transpix ); +LEPT_DLL extern PIX * pixBlendGrayAdapt ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract, l_int32 shift ); +LEPT_DLL extern PIX * pixFadeWithGray ( PIX *pixs, PIX *pixb, l_float32 factor, l_int32 type ); +LEPT_DLL extern PIX * pixBlendHardLight ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 x, l_int32 y, l_float32 fract ); +LEPT_DLL extern l_int32 pixBlendCmap ( PIX *pixs, PIX *pixb, l_int32 x, l_int32 y, l_int32 sindex ); +LEPT_DLL extern PIX * pixBlendWithGrayMask ( PIX *pixs1, PIX *pixs2, PIX *pixg, l_int32 x, l_int32 y ); +LEPT_DLL extern PIX * pixBlendBackgroundToColor ( PIX *pixd, PIX *pixs, BOX *box, l_uint32 color, l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern PIX * pixMultiplyByColor ( PIX *pixd, PIX *pixs, BOX *box, l_uint32 color ); +LEPT_DLL extern PIX * pixAlphaBlendUniform ( PIX *pixs, l_uint32 color ); +LEPT_DLL extern PIX * pixAddAlphaToBlend ( PIX *pixs, l_float32 fract, l_int32 invert ); +LEPT_DLL extern PIX * pixSetAlphaOverWhite ( PIX *pixs ); +LEPT_DLL extern L_BMF * bmfCreate ( const char *dir, l_int32 fontsize ); +LEPT_DLL extern void bmfDestroy ( L_BMF **pbmf ); +LEPT_DLL extern PIX * bmfGetPix ( L_BMF *bmf, char chr ); +LEPT_DLL extern l_int32 bmfGetWidth ( L_BMF *bmf, char chr, l_int32 *pw ); +LEPT_DLL extern l_int32 bmfGetBaseline ( L_BMF *bmf, char chr, l_int32 *pbaseline ); +LEPT_DLL extern PIXA * pixaGetFont ( const char *dir, l_int32 fontsize, l_int32 *pbl0, l_int32 *pbl1, l_int32 *pbl2 ); +LEPT_DLL extern l_int32 pixaSaveFont ( const char *indir, const char *outdir, l_int32 fontsize ); +LEPT_DLL extern PIX * pixReadStreamBmp ( FILE *fp ); +LEPT_DLL extern PIX * pixReadMemBmp ( const l_uint8 *cdata, size_t size ); +LEPT_DLL extern l_int32 pixWriteStreamBmp ( FILE *fp, PIX *pix ); +LEPT_DLL extern l_int32 pixWriteMemBmp ( l_uint8 **pfdata, size_t *pfsize, PIX *pixs ); +LEPT_DLL extern PIXA * l_bootnum_gen1 ( void ); +LEPT_DLL extern PIXA * l_bootnum_gen2 ( void ); +LEPT_DLL extern PIXA * l_bootnum_gen3 ( void ); +LEPT_DLL extern BOX * boxCreate ( l_int32 x, l_int32 y, l_int32 w, l_int32 h ); +LEPT_DLL extern BOX * boxCreateValid ( l_int32 x, l_int32 y, l_int32 w, l_int32 h ); +LEPT_DLL extern BOX * boxCopy ( BOX *box ); +LEPT_DLL extern BOX * boxClone ( BOX *box ); +LEPT_DLL extern void boxDestroy ( BOX **pbox ); +LEPT_DLL extern l_int32 boxGetGeometry ( BOX *box, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 boxSetGeometry ( BOX *box, l_int32 x, l_int32 y, l_int32 w, l_int32 h ); +LEPT_DLL extern l_int32 boxGetSideLocations ( BOX *box, l_int32 *pl, l_int32 *pr, l_int32 *pt, l_int32 *pb ); +LEPT_DLL extern l_int32 boxSetSideLocations ( BOX *box, l_int32 l, l_int32 r, l_int32 t, l_int32 b ); +LEPT_DLL extern l_int32 boxGetRefcount ( BOX *box ); +LEPT_DLL extern l_int32 boxChangeRefcount ( BOX *box, l_int32 delta ); +LEPT_DLL extern l_int32 boxIsValid ( BOX *box, l_int32 *pvalid ); +LEPT_DLL extern BOXA * boxaCreate ( l_int32 n ); +LEPT_DLL extern BOXA * boxaCopy ( BOXA *boxa, l_int32 copyflag ); +LEPT_DLL extern void boxaDestroy ( BOXA **pboxa ); +LEPT_DLL extern l_int32 boxaAddBox ( BOXA *boxa, BOX *box, l_int32 copyflag ); +LEPT_DLL extern l_int32 boxaExtendArray ( BOXA *boxa ); +LEPT_DLL extern l_int32 boxaExtendArrayToSize ( BOXA *boxa, l_int32 size ); +LEPT_DLL extern l_int32 boxaGetCount ( BOXA *boxa ); +LEPT_DLL extern l_int32 boxaGetValidCount ( BOXA *boxa ); +LEPT_DLL extern BOX * boxaGetBox ( BOXA *boxa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern BOX * boxaGetValidBox ( BOXA *boxa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern NUMA * boxaFindInvalidBoxes ( BOXA *boxa ); +LEPT_DLL extern l_int32 boxaGetBoxGeometry ( BOXA *boxa, l_int32 index, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 boxaIsFull ( BOXA *boxa, l_int32 *pfull ); +LEPT_DLL extern l_int32 boxaReplaceBox ( BOXA *boxa, l_int32 index, BOX *box ); +LEPT_DLL extern l_int32 boxaInsertBox ( BOXA *boxa, l_int32 index, BOX *box ); +LEPT_DLL extern l_int32 boxaRemoveBox ( BOXA *boxa, l_int32 index ); +LEPT_DLL extern l_int32 boxaRemoveBoxAndSave ( BOXA *boxa, l_int32 index, BOX **pbox ); +LEPT_DLL extern BOXA * boxaSaveValid ( BOXA *boxas, l_int32 copyflag ); +LEPT_DLL extern l_int32 boxaInitFull ( BOXA *boxa, BOX *box ); +LEPT_DLL extern l_int32 boxaClear ( BOXA *boxa ); +LEPT_DLL extern BOXAA * boxaaCreate ( l_int32 n ); +LEPT_DLL extern BOXAA * boxaaCopy ( BOXAA *baas, l_int32 copyflag ); +LEPT_DLL extern void boxaaDestroy ( BOXAA **pbaa ); +LEPT_DLL extern l_int32 boxaaAddBoxa ( BOXAA *baa, BOXA *ba, l_int32 copyflag ); +LEPT_DLL extern l_int32 boxaaExtendArray ( BOXAA *baa ); +LEPT_DLL extern l_int32 boxaaExtendArrayToSize ( BOXAA *baa, l_int32 size ); +LEPT_DLL extern l_int32 boxaaGetCount ( BOXAA *baa ); +LEPT_DLL extern l_int32 boxaaGetBoxCount ( BOXAA *baa ); +LEPT_DLL extern BOXA * boxaaGetBoxa ( BOXAA *baa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern BOX * boxaaGetBox ( BOXAA *baa, l_int32 iboxa, l_int32 ibox, l_int32 accessflag ); +LEPT_DLL extern l_int32 boxaaInitFull ( BOXAA *baa, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaaExtendWithInit ( BOXAA *baa, l_int32 maxindex, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaaReplaceBoxa ( BOXAA *baa, l_int32 index, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaaInsertBoxa ( BOXAA *baa, l_int32 index, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaaRemoveBoxa ( BOXAA *baa, l_int32 index ); +LEPT_DLL extern l_int32 boxaaAddBox ( BOXAA *baa, l_int32 index, BOX *box, l_int32 accessflag ); +LEPT_DLL extern BOXAA * boxaaReadFromFiles ( const char *dirname, const char *substr, l_int32 first, l_int32 nfiles ); +LEPT_DLL extern BOXAA * boxaaRead ( const char *filename ); +LEPT_DLL extern BOXAA * boxaaReadStream ( FILE *fp ); +LEPT_DLL extern BOXAA * boxaaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 boxaaWrite ( const char *filename, BOXAA *baa ); +LEPT_DLL extern l_int32 boxaaWriteStream ( FILE *fp, BOXAA *baa ); +LEPT_DLL extern l_int32 boxaaWriteMem ( l_uint8 **pdata, size_t *psize, BOXAA *baa ); +LEPT_DLL extern BOXA * boxaRead ( const char *filename ); +LEPT_DLL extern BOXA * boxaReadStream ( FILE *fp ); +LEPT_DLL extern BOXA * boxaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 boxaWrite ( const char *filename, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaWriteStream ( FILE *fp, BOXA *boxa ); +LEPT_DLL extern l_int32 boxaWriteMem ( l_uint8 **pdata, size_t *psize, BOXA *boxa ); +LEPT_DLL extern l_int32 boxPrintStreamInfo ( FILE *fp, BOX *box ); +LEPT_DLL extern l_int32 boxContains ( BOX *box1, BOX *box2, l_int32 *presult ); +LEPT_DLL extern l_int32 boxIntersects ( BOX *box1, BOX *box2, l_int32 *presult ); +LEPT_DLL extern BOXA * boxaContainedInBox ( BOXA *boxas, BOX *box ); +LEPT_DLL extern l_int32 boxaContainedInBoxCount ( BOXA *boxa, BOX *box, l_int32 *pcount ); +LEPT_DLL extern l_int32 boxaContainedInBoxa ( BOXA *boxa1, BOXA *boxa2, l_int32 *pcontained ); +LEPT_DLL extern BOXA * boxaIntersectsBox ( BOXA *boxas, BOX *box ); +LEPT_DLL extern l_int32 boxaIntersectsBoxCount ( BOXA *boxa, BOX *box, l_int32 *pcount ); +LEPT_DLL extern BOXA * boxaClipToBox ( BOXA *boxas, BOX *box ); +LEPT_DLL extern BOXA * boxaCombineOverlaps ( BOXA *boxas, PIXA *pixadb ); +LEPT_DLL extern l_int32 boxaCombineOverlapsInPair ( BOXA *boxas1, BOXA *boxas2, BOXA **pboxad1, BOXA **pboxad2, PIXA *pixadb ); +LEPT_DLL extern BOX * boxOverlapRegion ( BOX *box1, BOX *box2 ); +LEPT_DLL extern BOX * boxBoundingRegion ( BOX *box1, BOX *box2 ); +LEPT_DLL extern l_int32 boxOverlapFraction ( BOX *box1, BOX *box2, l_float32 *pfract ); +LEPT_DLL extern l_int32 boxOverlapArea ( BOX *box1, BOX *box2, l_int32 *parea ); +LEPT_DLL extern BOXA * boxaHandleOverlaps ( BOXA *boxas, l_int32 op, l_int32 range, l_float32 min_overlap, l_float32 max_ratio, NUMA **pnamap ); +LEPT_DLL extern l_int32 boxSeparationDistance ( BOX *box1, BOX *box2, l_int32 *ph_sep, l_int32 *pv_sep ); +LEPT_DLL extern l_int32 boxCompareSize ( BOX *box1, BOX *box2, l_int32 type, l_int32 *prel ); +LEPT_DLL extern l_int32 boxContainsPt ( BOX *box, l_float32 x, l_float32 y, l_int32 *pcontains ); +LEPT_DLL extern BOX * boxaGetNearestToPt ( BOXA *boxa, l_int32 x, l_int32 y ); +LEPT_DLL extern BOX * boxaGetNearestToLine ( BOXA *boxa, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 boxGetCenter ( BOX *box, l_float32 *pcx, l_float32 *pcy ); +LEPT_DLL extern l_int32 boxIntersectByLine ( BOX *box, l_int32 x, l_int32 y, l_float32 slope, l_int32 *px1, l_int32 *py1, l_int32 *px2, l_int32 *py2, l_int32 *pn ); +LEPT_DLL extern BOX * boxClipToRectangle ( BOX *box, l_int32 wi, l_int32 hi ); +LEPT_DLL extern l_int32 boxClipToRectangleParams ( BOX *box, l_int32 w, l_int32 h, l_int32 *pxstart, l_int32 *pystart, l_int32 *pxend, l_int32 *pyend, l_int32 *pbw, l_int32 *pbh ); +LEPT_DLL extern BOX * boxRelocateOneSide ( BOX *boxd, BOX *boxs, l_int32 loc, l_int32 sideflag ); +LEPT_DLL extern BOXA * boxaAdjustSides ( BOXA *boxas, l_int32 delleft, l_int32 delright, l_int32 deltop, l_int32 delbot ); +LEPT_DLL extern BOX * boxAdjustSides ( BOX *boxd, BOX *boxs, l_int32 delleft, l_int32 delright, l_int32 deltop, l_int32 delbot ); +LEPT_DLL extern BOXA * boxaSetSide ( BOXA *boxad, BOXA *boxas, l_int32 side, l_int32 val, l_int32 thresh ); +LEPT_DLL extern BOXA * boxaAdjustWidthToTarget ( BOXA *boxad, BOXA *boxas, l_int32 sides, l_int32 target, l_int32 thresh ); +LEPT_DLL extern BOXA * boxaAdjustHeightToTarget ( BOXA *boxad, BOXA *boxas, l_int32 sides, l_int32 target, l_int32 thresh ); +LEPT_DLL extern l_int32 boxEqual ( BOX *box1, BOX *box2, l_int32 *psame ); +LEPT_DLL extern l_int32 boxaEqual ( BOXA *boxa1, BOXA *boxa2, l_int32 maxdist, NUMA **pnaindex, l_int32 *psame ); +LEPT_DLL extern l_int32 boxSimilar ( BOX *box1, BOX *box2, l_int32 leftdiff, l_int32 rightdiff, l_int32 topdiff, l_int32 botdiff, l_int32 *psimilar ); +LEPT_DLL extern l_int32 boxaSimilar ( BOXA *boxa1, BOXA *boxa2, l_int32 leftdiff, l_int32 rightdiff, l_int32 topdiff, l_int32 botdiff, l_int32 debug, l_int32 *psimilar, NUMA **pnasim ); +LEPT_DLL extern l_int32 boxaJoin ( BOXA *boxad, BOXA *boxas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern l_int32 boxaaJoin ( BOXAA *baad, BOXAA *baas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern l_int32 boxaSplitEvenOdd ( BOXA *boxa, l_int32 fillflag, BOXA **pboxae, BOXA **pboxao ); +LEPT_DLL extern BOXA * boxaMergeEvenOdd ( BOXA *boxae, BOXA *boxao, l_int32 fillflag ); +LEPT_DLL extern BOXA * boxaTransform ( BOXA *boxas, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern BOX * boxTransform ( BOX *box, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern BOXA * boxaTransformOrdered ( BOXA *boxas, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 order ); +LEPT_DLL extern BOX * boxTransformOrdered ( BOX *boxs, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 order ); +LEPT_DLL extern BOXA * boxaRotateOrth ( BOXA *boxas, l_int32 w, l_int32 h, l_int32 rotation ); +LEPT_DLL extern BOX * boxRotateOrth ( BOX *box, l_int32 w, l_int32 h, l_int32 rotation ); +LEPT_DLL extern BOXA * boxaSort ( BOXA *boxas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex ); +LEPT_DLL extern BOXA * boxaBinSort ( BOXA *boxas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex ); +LEPT_DLL extern BOXA * boxaSortByIndex ( BOXA *boxas, NUMA *naindex ); +LEPT_DLL extern BOXAA * boxaSort2d ( BOXA *boxas, NUMAA **pnaad, l_int32 delta1, l_int32 delta2, l_int32 minh1 ); +LEPT_DLL extern BOXAA * boxaSort2dByIndex ( BOXA *boxas, NUMAA *naa ); +LEPT_DLL extern l_int32 boxaExtractAsNuma ( BOXA *boxa, NUMA **pnal, NUMA **pnat, NUMA **pnar, NUMA **pnab, NUMA **pnaw, NUMA **pnah, l_int32 keepinvalid ); +LEPT_DLL extern l_int32 boxaExtractAsPta ( BOXA *boxa, PTA **pptal, PTA **pptat, PTA **pptar, PTA **pptab, PTA **pptaw, PTA **pptah, l_int32 keepinvalid ); +LEPT_DLL extern l_int32 boxaGetRankVals ( BOXA *boxa, l_float32 fract, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 boxaGetMedianVals ( BOXA *boxa, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 boxaGetAverageSize ( BOXA *boxa, l_float32 *pw, l_float32 *ph ); +LEPT_DLL extern l_int32 boxaaGetExtent ( BOXAA *baa, l_int32 *pw, l_int32 *ph, BOX **pbox, BOXA **pboxa ); +LEPT_DLL extern BOXA * boxaaFlattenToBoxa ( BOXAA *baa, NUMA **pnaindex, l_int32 copyflag ); +LEPT_DLL extern BOXA * boxaaFlattenAligned ( BOXAA *baa, l_int32 num, BOX *fillerbox, l_int32 copyflag ); +LEPT_DLL extern BOXAA * boxaEncapsulateAligned ( BOXA *boxa, l_int32 num, l_int32 copyflag ); +LEPT_DLL extern BOXAA * boxaaTranspose ( BOXAA *baas ); +LEPT_DLL extern l_int32 boxaaAlignBox ( BOXAA *baa, BOX *box, l_int32 delta, l_int32 *pindex ); +LEPT_DLL extern PIX * pixMaskConnComp ( PIX *pixs, l_int32 connectivity, BOXA **pboxa ); +LEPT_DLL extern PIX * pixMaskBoxa ( PIX *pixd, PIX *pixs, BOXA *boxa, l_int32 op ); +LEPT_DLL extern PIX * pixPaintBoxa ( PIX *pixs, BOXA *boxa, l_uint32 val ); +LEPT_DLL extern PIX * pixSetBlackOrWhiteBoxa ( PIX *pixs, BOXA *boxa, l_int32 op ); +LEPT_DLL extern PIX * pixPaintBoxaRandom ( PIX *pixs, BOXA *boxa ); +LEPT_DLL extern PIX * pixBlendBoxaRandom ( PIX *pixs, BOXA *boxa, l_float32 fract ); +LEPT_DLL extern PIX * pixDrawBoxa ( PIX *pixs, BOXA *boxa, l_int32 width, l_uint32 val ); +LEPT_DLL extern PIX * pixDrawBoxaRandom ( PIX *pixs, BOXA *boxa, l_int32 width ); +LEPT_DLL extern PIX * boxaaDisplay ( PIX *pixs, BOXAA *baa, l_int32 linewba, l_int32 linewb, l_uint32 colorba, l_uint32 colorb, l_int32 w, l_int32 h ); +LEPT_DLL extern PIXA * pixaDisplayBoxaa ( PIXA *pixas, BOXAA *baa, l_int32 colorflag, l_int32 width ); +LEPT_DLL extern BOXA * pixSplitIntoBoxa ( PIX *pixs, l_int32 minsum, l_int32 skipdist, l_int32 delta, l_int32 maxbg, l_int32 maxcomps, l_int32 remainder ); +LEPT_DLL extern BOXA * pixSplitComponentIntoBoxa ( PIX *pix, BOX *box, l_int32 minsum, l_int32 skipdist, l_int32 delta, l_int32 maxbg, l_int32 maxcomps, l_int32 remainder ); +LEPT_DLL extern BOXA * makeMosaicStrips ( l_int32 w, l_int32 h, l_int32 direction, l_int32 size ); +LEPT_DLL extern l_int32 boxaCompareRegions ( BOXA *boxa1, BOXA *boxa2, l_int32 areathresh, l_int32 *pnsame, l_float32 *pdiffarea, l_float32 *pdiffxor, PIX **ppixdb ); +LEPT_DLL extern BOX * pixSelectLargeULComp ( PIX *pixs, l_float32 areaslop, l_int32 yslop, l_int32 connectivity ); +LEPT_DLL extern BOX * boxaSelectLargeULBox ( BOXA *boxas, l_float32 areaslop, l_int32 yslop ); +LEPT_DLL extern BOXA * boxaSelectRange ( BOXA *boxas, l_int32 first, l_int32 last, l_int32 copyflag ); +LEPT_DLL extern BOXAA * boxaaSelectRange ( BOXAA *baas, l_int32 first, l_int32 last, l_int32 copyflag ); +LEPT_DLL extern BOXA * boxaSelectBySize ( BOXA *boxas, l_int32 width, l_int32 height, l_int32 type, l_int32 relation, l_int32 *pchanged ); +LEPT_DLL extern NUMA * boxaMakeSizeIndicator ( BOXA *boxa, l_int32 width, l_int32 height, l_int32 type, l_int32 relation ); +LEPT_DLL extern BOXA * boxaSelectByArea ( BOXA *boxas, l_int32 area, l_int32 relation, l_int32 *pchanged ); +LEPT_DLL extern NUMA * boxaMakeAreaIndicator ( BOXA *boxa, l_int32 area, l_int32 relation ); +LEPT_DLL extern BOXA * boxaSelectByWHRatio ( BOXA *boxas, l_float32 ratio, l_int32 relation, l_int32 *pchanged ); +LEPT_DLL extern NUMA * boxaMakeWHRatioIndicator ( BOXA *boxa, l_float32 ratio, l_int32 relation ); +LEPT_DLL extern BOXA * boxaSelectWithIndicator ( BOXA *boxas, NUMA *na, l_int32 *pchanged ); +LEPT_DLL extern BOXA * boxaPermutePseudorandom ( BOXA *boxas ); +LEPT_DLL extern BOXA * boxaPermuteRandom ( BOXA *boxad, BOXA *boxas ); +LEPT_DLL extern l_int32 boxaSwapBoxes ( BOXA *boxa, l_int32 i, l_int32 j ); +LEPT_DLL extern PTA * boxaConvertToPta ( BOXA *boxa, l_int32 ncorners ); +LEPT_DLL extern BOXA * ptaConvertToBoxa ( PTA *pta, l_int32 ncorners ); +LEPT_DLL extern PTA * boxConvertToPta ( BOX *box, l_int32 ncorners ); +LEPT_DLL extern BOX * ptaConvertToBox ( PTA *pta ); +LEPT_DLL extern BOXA * boxaSmoothSequenceLS ( BOXA *boxas, l_float32 factor, l_int32 subflag, l_int32 maxdiff, l_int32 debug ); +LEPT_DLL extern BOXA * boxaSmoothSequenceMedian ( BOXA *boxas, l_int32 halfwin, l_int32 subflag, l_int32 maxdiff, l_int32 debug ); +LEPT_DLL extern BOXA * boxaLinearFit ( BOXA *boxas, l_float32 factor, l_int32 debug ); +LEPT_DLL extern BOXA * boxaWindowedMedian ( BOXA *boxas, l_int32 halfwin, l_int32 debug ); +LEPT_DLL extern BOXA * boxaModifyWithBoxa ( BOXA *boxas, BOXA *boxam, l_int32 subflag, l_int32 maxdiff ); +LEPT_DLL extern BOXA * boxaConstrainSize ( BOXA *boxas, l_int32 width, l_int32 widthflag, l_int32 height, l_int32 heightflag ); +LEPT_DLL extern BOXA * boxaReconcileEvenOddHeight ( BOXA *boxas, l_int32 sides, l_int32 delh, l_int32 op, l_float32 factor, l_int32 start ); +LEPT_DLL extern BOXA * boxaReconcilePairWidth ( BOXA *boxas, l_int32 delw, l_int32 op, l_float32 factor, NUMA *na ); +LEPT_DLL extern l_int32 boxaPlotSides ( BOXA *boxa, const char *plotname, NUMA **pnal, NUMA **pnat, NUMA **pnar, NUMA **pnab, PIX **ppixd ); +LEPT_DLL extern l_int32 boxaPlotSizes ( BOXA *boxa, const char *plotname, NUMA **pnaw, NUMA **pnah, PIX **ppixd ); +LEPT_DLL extern BOXA * boxaFillSequence ( BOXA *boxas, l_int32 useflag, l_int32 debug ); +LEPT_DLL extern l_int32 boxaGetExtent ( BOXA *boxa, l_int32 *pw, l_int32 *ph, BOX **pbox ); +LEPT_DLL extern l_int32 boxaGetCoverage ( BOXA *boxa, l_int32 wc, l_int32 hc, l_int32 exactflag, l_float32 *pfract ); +LEPT_DLL extern l_int32 boxaaSizeRange ( BOXAA *baa, l_int32 *pminw, l_int32 *pminh, l_int32 *pmaxw, l_int32 *pmaxh ); +LEPT_DLL extern l_int32 boxaSizeRange ( BOXA *boxa, l_int32 *pminw, l_int32 *pminh, l_int32 *pmaxw, l_int32 *pmaxh ); +LEPT_DLL extern l_int32 boxaLocationRange ( BOXA *boxa, l_int32 *pminx, l_int32 *pminy, l_int32 *pmaxx, l_int32 *pmaxy ); +LEPT_DLL extern l_int32 boxaGetSizes ( BOXA *boxa, NUMA **pnaw, NUMA **pnah ); +LEPT_DLL extern l_int32 boxaGetArea ( BOXA *boxa, l_int32 *parea ); +LEPT_DLL extern PIX * boxaDisplayTiled ( BOXA *boxas, PIXA *pixa, l_int32 maxwidth, l_int32 linewidth, l_float32 scalefactor, l_int32 background, l_int32 spacing, l_int32 border ); +LEPT_DLL extern L_BYTEA * l_byteaCreate ( size_t nbytes ); +LEPT_DLL extern L_BYTEA * l_byteaInitFromMem ( l_uint8 *data, size_t size ); +LEPT_DLL extern L_BYTEA * l_byteaInitFromFile ( const char *fname ); +LEPT_DLL extern L_BYTEA * l_byteaInitFromStream ( FILE *fp ); +LEPT_DLL extern L_BYTEA * l_byteaCopy ( L_BYTEA *bas, l_int32 copyflag ); +LEPT_DLL extern void l_byteaDestroy ( L_BYTEA **pba ); +LEPT_DLL extern size_t l_byteaGetSize ( L_BYTEA *ba ); +LEPT_DLL extern l_uint8 * l_byteaGetData ( L_BYTEA *ba, size_t *psize ); +LEPT_DLL extern l_uint8 * l_byteaCopyData ( L_BYTEA *ba, size_t *psize ); +LEPT_DLL extern l_int32 l_byteaAppendData ( L_BYTEA *ba, l_uint8 *newdata, size_t newbytes ); +LEPT_DLL extern l_int32 l_byteaAppendString ( L_BYTEA *ba, char *str ); +LEPT_DLL extern l_int32 l_byteaJoin ( L_BYTEA *ba1, L_BYTEA **pba2 ); +LEPT_DLL extern l_int32 l_byteaSplit ( L_BYTEA *ba1, size_t splitloc, L_BYTEA **pba2 ); +LEPT_DLL extern l_int32 l_byteaFindEachSequence ( L_BYTEA *ba, l_uint8 *sequence, l_int32 seqlen, L_DNA **pda ); +LEPT_DLL extern l_int32 l_byteaWrite ( const char *fname, L_BYTEA *ba, size_t startloc, size_t endloc ); +LEPT_DLL extern l_int32 l_byteaWriteStream ( FILE *fp, L_BYTEA *ba, size_t startloc, size_t endloc ); +LEPT_DLL extern CCBORDA * ccbaCreate ( PIX *pixs, l_int32 n ); +LEPT_DLL extern void ccbaDestroy ( CCBORDA **pccba ); +LEPT_DLL extern CCBORD * ccbCreate ( PIX *pixs ); +LEPT_DLL extern void ccbDestroy ( CCBORD **pccb ); +LEPT_DLL extern l_int32 ccbaAddCcb ( CCBORDA *ccba, CCBORD *ccb ); +LEPT_DLL extern l_int32 ccbaGetCount ( CCBORDA *ccba ); +LEPT_DLL extern CCBORD * ccbaGetCcb ( CCBORDA *ccba, l_int32 index ); +LEPT_DLL extern CCBORDA * pixGetAllCCBorders ( PIX *pixs ); +LEPT_DLL extern CCBORD * pixGetCCBorders ( PIX *pixs, BOX *box ); +LEPT_DLL extern PTAA * pixGetOuterBordersPtaa ( PIX *pixs ); +LEPT_DLL extern PTA * pixGetOuterBorderPta ( PIX *pixs, BOX *box ); +LEPT_DLL extern l_int32 pixGetOuterBorder ( CCBORD *ccb, PIX *pixs, BOX *box ); +LEPT_DLL extern l_int32 pixGetHoleBorder ( CCBORD *ccb, PIX *pixs, BOX *box, l_int32 xs, l_int32 ys ); +LEPT_DLL extern l_int32 findNextBorderPixel ( l_int32 w, l_int32 h, l_uint32 *data, l_int32 wpl, l_int32 px, l_int32 py, l_int32 *pqpos, l_int32 *pnpx, l_int32 *pnpy ); +LEPT_DLL extern void locateOutsideSeedPixel ( l_int32 fpx, l_int32 fpy, l_int32 spx, l_int32 spy, l_int32 *pxs, l_int32 *pys ); +LEPT_DLL extern l_int32 ccbaGenerateGlobalLocs ( CCBORDA *ccba ); +LEPT_DLL extern l_int32 ccbaGenerateStepChains ( CCBORDA *ccba ); +LEPT_DLL extern l_int32 ccbaStepChainsToPixCoords ( CCBORDA *ccba, l_int32 coordtype ); +LEPT_DLL extern l_int32 ccbaGenerateSPGlobalLocs ( CCBORDA *ccba, l_int32 ptsflag ); +LEPT_DLL extern l_int32 ccbaGenerateSinglePath ( CCBORDA *ccba ); +LEPT_DLL extern PTA * getCutPathForHole ( PIX *pix, PTA *pta, BOX *boxinner, l_int32 *pdir, l_int32 *plen ); +LEPT_DLL extern PIX * ccbaDisplayBorder ( CCBORDA *ccba ); +LEPT_DLL extern PIX * ccbaDisplaySPBorder ( CCBORDA *ccba ); +LEPT_DLL extern PIX * ccbaDisplayImage1 ( CCBORDA *ccba ); +LEPT_DLL extern PIX * ccbaDisplayImage2 ( CCBORDA *ccba ); +LEPT_DLL extern l_int32 ccbaWrite ( const char *filename, CCBORDA *ccba ); +LEPT_DLL extern l_int32 ccbaWriteStream ( FILE *fp, CCBORDA *ccba ); +LEPT_DLL extern CCBORDA * ccbaRead ( const char *filename ); +LEPT_DLL extern CCBORDA * ccbaReadStream ( FILE *fp ); +LEPT_DLL extern l_int32 ccbaWriteSVG ( const char *filename, CCBORDA *ccba ); +LEPT_DLL extern char * ccbaWriteSVGString ( const char *filename, CCBORDA *ccba ); +LEPT_DLL extern PIXA * pixaThinConnected ( PIXA *pixas, l_int32 type, l_int32 connectivity, l_int32 maxiters ); +LEPT_DLL extern PIX * pixThinConnected ( PIX *pixs, l_int32 type, l_int32 connectivity, l_int32 maxiters ); +LEPT_DLL extern PIX * pixThinConnectedBySet ( PIX *pixs, l_int32 type, SELA *sela, l_int32 maxiters ); +LEPT_DLL extern SELA * selaMakeThinSets ( l_int32 index, l_int32 debug ); +LEPT_DLL extern l_int32 jbCorrelation ( const char *dirin, l_float32 thresh, l_float32 weight, l_int32 components, const char *rootname, l_int32 firstpage, l_int32 npages, l_int32 renderflag ); +LEPT_DLL extern l_int32 jbRankHaus ( const char *dirin, l_int32 size, l_float32 rank, l_int32 components, const char *rootname, l_int32 firstpage, l_int32 npages, l_int32 renderflag ); +LEPT_DLL extern JBCLASSER * jbWordsInTextlines ( const char *dirin, l_int32 reduction, l_int32 maxwidth, l_int32 maxheight, l_float32 thresh, l_float32 weight, NUMA **pnatl, l_int32 firstpage, l_int32 npages ); +LEPT_DLL extern l_int32 pixGetWordsInTextlines ( PIX *pixs, l_int32 reduction, l_int32 minwidth, l_int32 minheight, l_int32 maxwidth, l_int32 maxheight, BOXA **pboxad, PIXA **ppixad, NUMA **pnai ); +LEPT_DLL extern l_int32 pixGetWordBoxesInTextlines ( PIX *pixs, l_int32 reduction, l_int32 minwidth, l_int32 minheight, l_int32 maxwidth, l_int32 maxheight, BOXA **pboxad, NUMA **pnai ); +LEPT_DLL extern NUMAA * boxaExtractSortedPattern ( BOXA *boxa, NUMA *na ); +LEPT_DLL extern l_int32 numaaCompareImagesByBoxes ( NUMAA *naa1, NUMAA *naa2, l_int32 nperline, l_int32 nreq, l_int32 maxshiftx, l_int32 maxshifty, l_int32 delx, l_int32 dely, l_int32 *psame, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixColorContent ( PIX *pixs, l_int32 rwhite, l_int32 gwhite, l_int32 bwhite, l_int32 mingray, PIX **ppixr, PIX **ppixg, PIX **ppixb ); +LEPT_DLL extern PIX * pixColorMagnitude ( PIX *pixs, l_int32 rwhite, l_int32 gwhite, l_int32 bwhite, l_int32 type ); +LEPT_DLL extern PIX * pixMaskOverColorPixels ( PIX *pixs, l_int32 threshdiff, l_int32 mindist ); +LEPT_DLL extern PIX * pixMaskOverColorRange ( PIX *pixs, l_int32 rmin, l_int32 rmax, l_int32 gmin, l_int32 gmax, l_int32 bmin, l_int32 bmax ); +LEPT_DLL extern l_int32 pixColorFraction ( PIX *pixs, l_int32 darkthresh, l_int32 lightthresh, l_int32 diffthresh, l_int32 factor, l_float32 *ppixfract, l_float32 *pcolorfract ); +LEPT_DLL extern l_int32 pixFindColorRegions ( PIX *pixs, PIX *pixm, l_int32 factor, l_int32 lightthresh, l_int32 darkthresh, l_int32 mindiff, l_int32 colordiff, l_float32 edgefract, l_float32 *pcolorfract, PIX **pcolormask1, PIX **pcolormask2, PIXA *pixadb ); +LEPT_DLL extern l_int32 pixNumSignificantGrayColors ( PIX *pixs, l_int32 darkthresh, l_int32 lightthresh, l_float32 minfract, l_int32 factor, l_int32 *pncolors ); +LEPT_DLL extern l_int32 pixColorsForQuantization ( PIX *pixs, l_int32 thresh, l_int32 *pncolors, l_int32 *piscolor, l_int32 debug ); +LEPT_DLL extern l_int32 pixNumColors ( PIX *pixs, l_int32 factor, l_int32 *pncolors ); +LEPT_DLL extern l_int32 pixGetMostPopulatedColors ( PIX *pixs, l_int32 sigbits, l_int32 factor, l_int32 ncolors, l_uint32 **parray, PIXCMAP **pcmap ); +LEPT_DLL extern PIX * pixSimpleColorQuantize ( PIX *pixs, l_int32 sigbits, l_int32 factor, l_int32 ncolors ); +LEPT_DLL extern NUMA * pixGetRGBHistogram ( PIX *pixs, l_int32 sigbits, l_int32 factor ); +LEPT_DLL extern l_int32 makeRGBIndexTables ( l_uint32 **prtab, l_uint32 **pgtab, l_uint32 **pbtab, l_int32 sigbits ); +LEPT_DLL extern l_int32 getRGBFromIndex ( l_uint32 index, l_int32 sigbits, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixHasHighlightRed ( PIX *pixs, l_int32 factor, l_float32 fract, l_float32 fthresh, l_int32 *phasred, l_float32 *pratio, PIX **ppixdb ); +LEPT_DLL extern PIX * pixColorGrayRegions ( PIX *pixs, BOXA *boxa, l_int32 type, l_int32 thresh, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixColorGray ( PIX *pixs, BOX *box, l_int32 type, l_int32 thresh, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern PIX * pixColorGrayMasked ( PIX *pixs, PIX *pixm, l_int32 type, l_int32 thresh, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern PIX * pixSnapColor ( PIX *pixd, PIX *pixs, l_uint32 srcval, l_uint32 dstval, l_int32 diff ); +LEPT_DLL extern PIX * pixSnapColorCmap ( PIX *pixd, PIX *pixs, l_uint32 srcval, l_uint32 dstval, l_int32 diff ); +LEPT_DLL extern PIX * pixLinearMapToTargetColor ( PIX *pixd, PIX *pixs, l_uint32 srcval, l_uint32 dstval ); +LEPT_DLL extern l_int32 pixelLinearMapToTargetColor ( l_uint32 scolor, l_uint32 srcmap, l_uint32 dstmap, l_uint32 *pdcolor ); +LEPT_DLL extern PIX * pixShiftByComponent ( PIX *pixd, PIX *pixs, l_uint32 srcval, l_uint32 dstval ); +LEPT_DLL extern l_int32 pixelShiftByComponent ( l_int32 rval, l_int32 gval, l_int32 bval, l_uint32 srcval, l_uint32 dstval, l_uint32 *ppixel ); +LEPT_DLL extern l_int32 pixelFractionalShift ( l_int32 rval, l_int32 gval, l_int32 bval, l_float32 fraction, l_uint32 *ppixel ); +LEPT_DLL extern PIXCMAP * pixcmapCreate ( l_int32 depth ); +LEPT_DLL extern PIXCMAP * pixcmapCreateRandom ( l_int32 depth, l_int32 hasblack, l_int32 haswhite ); +LEPT_DLL extern PIXCMAP * pixcmapCreateLinear ( l_int32 d, l_int32 nlevels ); +LEPT_DLL extern PIXCMAP * pixcmapCopy ( PIXCMAP *cmaps ); +LEPT_DLL extern void pixcmapDestroy ( PIXCMAP **pcmap ); +LEPT_DLL extern l_int32 pixcmapAddColor ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixcmapAddRGBA ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 aval ); +LEPT_DLL extern l_int32 pixcmapAddNewColor ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapAddNearestColor ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapUsableColor ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pusable ); +LEPT_DLL extern l_int32 pixcmapAddBlackOrWhite ( PIXCMAP *cmap, l_int32 color, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapSetBlackAndWhite ( PIXCMAP *cmap, l_int32 setblack, l_int32 setwhite ); +LEPT_DLL extern l_int32 pixcmapGetCount ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapGetFreeCount ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapGetDepth ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapGetMinDepth ( PIXCMAP *cmap, l_int32 *pmindepth ); +LEPT_DLL extern l_int32 pixcmapClear ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapGetColor ( PIXCMAP *cmap, l_int32 index, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixcmapGetColor32 ( PIXCMAP *cmap, l_int32 index, l_uint32 *pval32 ); +LEPT_DLL extern l_int32 pixcmapGetRGBA ( PIXCMAP *cmap, l_int32 index, l_int32 *prval, l_int32 *pgval, l_int32 *pbval, l_int32 *paval ); +LEPT_DLL extern l_int32 pixcmapGetRGBA32 ( PIXCMAP *cmap, l_int32 index, l_uint32 *pval32 ); +LEPT_DLL extern l_int32 pixcmapResetColor ( PIXCMAP *cmap, l_int32 index, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixcmapSetAlpha ( PIXCMAP *cmap, l_int32 index, l_int32 aval ); +LEPT_DLL extern l_int32 pixcmapGetIndex ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapHasColor ( PIXCMAP *cmap, l_int32 *pcolor ); +LEPT_DLL extern l_int32 pixcmapIsOpaque ( PIXCMAP *cmap, l_int32 *popaque ); +LEPT_DLL extern l_int32 pixcmapIsBlackAndWhite ( PIXCMAP *cmap, l_int32 *pblackwhite ); +LEPT_DLL extern l_int32 pixcmapCountGrayColors ( PIXCMAP *cmap, l_int32 *pngray ); +LEPT_DLL extern l_int32 pixcmapGetRankIntensity ( PIXCMAP *cmap, l_float32 rankval, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapGetNearestIndex ( PIXCMAP *cmap, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapGetNearestGrayIndex ( PIXCMAP *cmap, l_int32 val, l_int32 *pindex ); +LEPT_DLL extern l_int32 pixcmapGetDistanceToColor ( PIXCMAP *cmap, l_int32 index, l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pdist ); +LEPT_DLL extern l_int32 pixcmapGetRangeValues ( PIXCMAP *cmap, l_int32 select, l_int32 *pminval, l_int32 *pmaxval, l_int32 *pminindex, l_int32 *pmaxindex ); +LEPT_DLL extern PIXCMAP * pixcmapGrayToColor ( l_uint32 color ); +LEPT_DLL extern PIXCMAP * pixcmapColorToGray ( PIXCMAP *cmaps, l_float32 rwt, l_float32 gwt, l_float32 bwt ); +LEPT_DLL extern PIXCMAP * pixcmapConvertTo4 ( PIXCMAP *cmaps ); +LEPT_DLL extern PIXCMAP * pixcmapConvertTo8 ( PIXCMAP *cmaps ); +LEPT_DLL extern PIXCMAP * pixcmapRead ( const char *filename ); +LEPT_DLL extern PIXCMAP * pixcmapReadStream ( FILE *fp ); +LEPT_DLL extern PIXCMAP * pixcmapReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixcmapWrite ( const char *filename, PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapWriteStream ( FILE *fp, PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapWriteMem ( l_uint8 **pdata, size_t *psize, PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapToArrays ( PIXCMAP *cmap, l_int32 **prmap, l_int32 **pgmap, l_int32 **pbmap, l_int32 **pamap ); +LEPT_DLL extern l_int32 pixcmapToRGBTable ( PIXCMAP *cmap, l_uint32 **ptab, l_int32 *pncolors ); +LEPT_DLL extern l_int32 pixcmapSerializeToMemory ( PIXCMAP *cmap, l_int32 cpc, l_int32 *pncolors, l_uint8 **pdata ); +LEPT_DLL extern PIXCMAP * pixcmapDeserializeFromMemory ( l_uint8 *data, l_int32 cpc, l_int32 ncolors ); +LEPT_DLL extern char * pixcmapConvertToHex ( l_uint8 *data, l_int32 ncolors ); +LEPT_DLL extern l_int32 pixcmapGammaTRC ( PIXCMAP *cmap, l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern l_int32 pixcmapContrastTRC ( PIXCMAP *cmap, l_float32 factor ); +LEPT_DLL extern l_int32 pixcmapShiftIntensity ( PIXCMAP *cmap, l_float32 fraction ); +LEPT_DLL extern l_int32 pixcmapShiftByComponent ( PIXCMAP *cmap, l_uint32 srcval, l_uint32 dstval ); +LEPT_DLL extern PIX * pixColorMorph ( PIX *pixs, l_int32 type, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOctreeColorQuant ( PIX *pixs, l_int32 colors, l_int32 ditherflag ); +LEPT_DLL extern PIX * pixOctreeColorQuantGeneral ( PIX *pixs, l_int32 colors, l_int32 ditherflag, l_float32 validthresh, l_float32 colorthresh ); +LEPT_DLL extern l_int32 makeRGBToIndexTables ( l_uint32 **prtab, l_uint32 **pgtab, l_uint32 **pbtab, l_int32 cqlevels ); +LEPT_DLL extern void getOctcubeIndexFromRGB ( l_int32 rval, l_int32 gval, l_int32 bval, l_uint32 *rtab, l_uint32 *gtab, l_uint32 *btab, l_uint32 *pindex ); +LEPT_DLL extern PIX * pixOctreeQuantByPopulation ( PIX *pixs, l_int32 level, l_int32 ditherflag ); +LEPT_DLL extern PIX * pixOctreeQuantNumColors ( PIX *pixs, l_int32 maxcolors, l_int32 subsample ); +LEPT_DLL extern PIX * pixOctcubeQuantMixedWithGray ( PIX *pixs, l_int32 depth, l_int32 graylevels, l_int32 delta ); +LEPT_DLL extern PIX * pixFixedOctcubeQuant256 ( PIX *pixs, l_int32 ditherflag ); +LEPT_DLL extern PIX * pixFewColorsOctcubeQuant1 ( PIX *pixs, l_int32 level ); +LEPT_DLL extern PIX * pixFewColorsOctcubeQuant2 ( PIX *pixs, l_int32 level, NUMA *na, l_int32 ncolors, l_int32 *pnerrors ); +LEPT_DLL extern PIX * pixFewColorsOctcubeQuantMixed ( PIX *pixs, l_int32 level, l_int32 darkthresh, l_int32 lightthresh, l_int32 diffthresh, l_float32 minfract, l_int32 maxspan ); +LEPT_DLL extern PIX * pixFixedOctcubeQuantGenRGB ( PIX *pixs, l_int32 level ); +LEPT_DLL extern PIX * pixQuantFromCmap ( PIX *pixs, PIXCMAP *cmap, l_int32 mindepth, l_int32 level, l_int32 metric ); +LEPT_DLL extern PIX * pixOctcubeQuantFromCmap ( PIX *pixs, PIXCMAP *cmap, l_int32 mindepth, l_int32 level, l_int32 metric ); +LEPT_DLL extern NUMA * pixOctcubeHistogram ( PIX *pixs, l_int32 level, l_int32 *pncolors ); +LEPT_DLL extern l_int32 * pixcmapToOctcubeLUT ( PIXCMAP *cmap, l_int32 level, l_int32 metric ); +LEPT_DLL extern l_int32 pixRemoveUnusedColors ( PIX *pixs ); +LEPT_DLL extern l_int32 pixNumberOccupiedOctcubes ( PIX *pix, l_int32 level, l_int32 mincount, l_float32 minfract, l_int32 *pncolors ); +LEPT_DLL extern PIX * pixMedianCutQuant ( PIX *pixs, l_int32 ditherflag ); +LEPT_DLL extern PIX * pixMedianCutQuantGeneral ( PIX *pixs, l_int32 ditherflag, l_int32 outdepth, l_int32 maxcolors, l_int32 sigbits, l_int32 maxsub, l_int32 checkbw ); +LEPT_DLL extern PIX * pixMedianCutQuantMixed ( PIX *pixs, l_int32 ncolor, l_int32 ngray, l_int32 darkthresh, l_int32 lightthresh, l_int32 diffthresh ); +LEPT_DLL extern PIX * pixFewColorsMedianCutQuantMixed ( PIX *pixs, l_int32 ncolor, l_int32 ngray, l_int32 maxncolors, l_int32 darkthresh, l_int32 lightthresh, l_int32 diffthresh ); +LEPT_DLL extern l_int32 * pixMedianCutHisto ( PIX *pixs, l_int32 sigbits, l_int32 subsample ); +LEPT_DLL extern PIX * pixColorSegment ( PIX *pixs, l_int32 maxdist, l_int32 maxcolors, l_int32 selsize, l_int32 finalcolors, l_int32 debugflag ); +LEPT_DLL extern PIX * pixColorSegmentCluster ( PIX *pixs, l_int32 maxdist, l_int32 maxcolors, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixAssignToNearestColor ( PIX *pixd, PIX *pixs, PIX *pixm, l_int32 level, l_int32 *countarray ); +LEPT_DLL extern l_int32 pixColorSegmentClean ( PIX *pixs, l_int32 selsize, l_int32 *countarray ); +LEPT_DLL extern l_int32 pixColorSegmentRemoveColors ( PIX *pixd, PIX *pixs, l_int32 finalcolors ); +LEPT_DLL extern PIX * pixConvertRGBToHSV ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixConvertHSVToRGB ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 convertRGBToHSV ( l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *phval, l_int32 *psval, l_int32 *pvval ); +LEPT_DLL extern l_int32 convertHSVToRGB ( l_int32 hval, l_int32 sval, l_int32 vval, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixcmapConvertRGBToHSV ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapConvertHSVToRGB ( PIXCMAP *cmap ); +LEPT_DLL extern PIX * pixConvertRGBToHue ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertRGBToSaturation ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertRGBToValue ( PIX *pixs ); +LEPT_DLL extern PIX * pixMakeRangeMaskHS ( PIX *pixs, l_int32 huecenter, l_int32 huehw, l_int32 satcenter, l_int32 sathw, l_int32 regionflag ); +LEPT_DLL extern PIX * pixMakeRangeMaskHV ( PIX *pixs, l_int32 huecenter, l_int32 huehw, l_int32 valcenter, l_int32 valhw, l_int32 regionflag ); +LEPT_DLL extern PIX * pixMakeRangeMaskSV ( PIX *pixs, l_int32 satcenter, l_int32 sathw, l_int32 valcenter, l_int32 valhw, l_int32 regionflag ); +LEPT_DLL extern PIX * pixMakeHistoHS ( PIX *pixs, l_int32 factor, NUMA **pnahue, NUMA **pnasat ); +LEPT_DLL extern PIX * pixMakeHistoHV ( PIX *pixs, l_int32 factor, NUMA **pnahue, NUMA **pnaval ); +LEPT_DLL extern PIX * pixMakeHistoSV ( PIX *pixs, l_int32 factor, NUMA **pnasat, NUMA **pnaval ); +LEPT_DLL extern l_int32 pixFindHistoPeaksHSV ( PIX *pixs, l_int32 type, l_int32 width, l_int32 height, l_int32 npeaks, l_float32 erasefactor, PTA **ppta, NUMA **pnatot, PIXA **ppixa ); +LEPT_DLL extern PIX * displayHSVColorRange ( l_int32 hval, l_int32 sval, l_int32 vval, l_int32 huehw, l_int32 sathw, l_int32 nsamp, l_int32 factor ); +LEPT_DLL extern PIX * pixConvertRGBToYUV ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixConvertYUVToRGB ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 convertRGBToYUV ( l_int32 rval, l_int32 gval, l_int32 bval, l_int32 *pyval, l_int32 *puval, l_int32 *pvval ); +LEPT_DLL extern l_int32 convertYUVToRGB ( l_int32 yval, l_int32 uval, l_int32 vval, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixcmapConvertRGBToYUV ( PIXCMAP *cmap ); +LEPT_DLL extern l_int32 pixcmapConvertYUVToRGB ( PIXCMAP *cmap ); +LEPT_DLL extern FPIXA * pixConvertRGBToXYZ ( PIX *pixs ); +LEPT_DLL extern PIX * fpixaConvertXYZToRGB ( FPIXA *fpixa ); +LEPT_DLL extern l_int32 convertRGBToXYZ ( l_int32 rval, l_int32 gval, l_int32 bval, l_float32 *pfxval, l_float32 *pfyval, l_float32 *pfzval ); +LEPT_DLL extern l_int32 convertXYZToRGB ( l_float32 fxval, l_float32 fyval, l_float32 fzval, l_int32 blackout, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern FPIXA * fpixaConvertXYZToLAB ( FPIXA *fpixas ); +LEPT_DLL extern FPIXA * fpixaConvertLABToXYZ ( FPIXA *fpixas ); +LEPT_DLL extern l_int32 convertXYZToLAB ( l_float32 xval, l_float32 yval, l_float32 zval, l_float32 *plval, l_float32 *paval, l_float32 *pbval ); +LEPT_DLL extern l_int32 convertLABToXYZ ( l_float32 lval, l_float32 aval, l_float32 bval, l_float32 *pxval, l_float32 *pyval, l_float32 *pzval ); +LEPT_DLL extern FPIXA * pixConvertRGBToLAB ( PIX *pixs ); +LEPT_DLL extern PIX * fpixaConvertLABToRGB ( FPIXA *fpixa ); +LEPT_DLL extern l_int32 convertRGBToLAB ( l_int32 rval, l_int32 gval, l_int32 bval, l_float32 *pflval, l_float32 *pfaval, l_float32 *pfbval ); +LEPT_DLL extern l_int32 convertLABToRGB ( l_float32 flval, l_float32 faval, l_float32 fbval, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixEqual ( PIX *pix1, PIX *pix2, l_int32 *psame ); +LEPT_DLL extern l_int32 pixEqualWithAlpha ( PIX *pix1, PIX *pix2, l_int32 use_alpha, l_int32 *psame ); +LEPT_DLL extern l_int32 pixEqualWithCmap ( PIX *pix1, PIX *pix2, l_int32 *psame ); +LEPT_DLL extern l_int32 cmapEqual ( PIXCMAP *cmap1, PIXCMAP *cmap2, l_int32 ncomps, l_int32 *psame ); +LEPT_DLL extern l_int32 pixUsesCmapColor ( PIX *pixs, l_int32 *pcolor ); +LEPT_DLL extern l_int32 pixCorrelationBinary ( PIX *pix1, PIX *pix2, l_float32 *pval ); +LEPT_DLL extern PIX * pixDisplayDiffBinary ( PIX *pix1, PIX *pix2 ); +LEPT_DLL extern l_int32 pixCompareBinary ( PIX *pix1, PIX *pix2, l_int32 comptype, l_float32 *pfract, PIX **ppixdiff ); +LEPT_DLL extern l_int32 pixCompareGrayOrRGB ( PIX *pix1, PIX *pix2, l_int32 comptype, l_int32 plottype, l_int32 *psame, l_float32 *pdiff, l_float32 *prmsdiff, PIX **ppixdiff ); +LEPT_DLL extern l_int32 pixCompareGray ( PIX *pix1, PIX *pix2, l_int32 comptype, l_int32 plottype, l_int32 *psame, l_float32 *pdiff, l_float32 *prmsdiff, PIX **ppixdiff ); +LEPT_DLL extern l_int32 pixCompareRGB ( PIX *pix1, PIX *pix2, l_int32 comptype, l_int32 plottype, l_int32 *psame, l_float32 *pdiff, l_float32 *prmsdiff, PIX **ppixdiff ); +LEPT_DLL extern l_int32 pixCompareTiled ( PIX *pix1, PIX *pix2, l_int32 sx, l_int32 sy, l_int32 type, PIX **ppixdiff ); +LEPT_DLL extern NUMA * pixCompareRankDifference ( PIX *pix1, PIX *pix2, l_int32 factor ); +LEPT_DLL extern l_int32 pixTestForSimilarity ( PIX *pix1, PIX *pix2, l_int32 factor, l_int32 mindiff, l_float32 maxfract, l_float32 maxave, l_int32 *psimilar, l_int32 printstats ); +LEPT_DLL extern l_int32 pixGetDifferenceStats ( PIX *pix1, PIX *pix2, l_int32 factor, l_int32 mindiff, l_float32 *pfractdiff, l_float32 *pavediff, l_int32 printstats ); +LEPT_DLL extern NUMA * pixGetDifferenceHistogram ( PIX *pix1, PIX *pix2, l_int32 factor ); +LEPT_DLL extern l_int32 pixGetPerceptualDiff ( PIX *pixs1, PIX *pixs2, l_int32 sampling, l_int32 dilation, l_int32 mindiff, l_float32 *pfract, PIX **ppixdiff1, PIX **ppixdiff2 ); +LEPT_DLL extern l_int32 pixGetPSNR ( PIX *pix1, PIX *pix2, l_int32 factor, l_float32 *ppsnr ); +LEPT_DLL extern l_int32 pixaComparePhotoRegionsByHisto ( PIXA *pixa, l_float32 minratio, l_float32 textthresh, l_int32 factor, l_int32 nx, l_int32 ny, l_float32 simthresh, NUMA **pnai, l_float32 **pscores, PIX **ppixd ); +LEPT_DLL extern l_int32 pixComparePhotoRegionsByHisto ( PIX *pix1, PIX *pix2, BOX *box1, BOX *box2, l_float32 minratio, l_int32 factor, l_int32 nx, l_int32 ny, l_float32 *pscore, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixGenPhotoHistos ( PIX *pixs, BOX *box, l_int32 factor, l_float32 thresh, l_int32 nx, l_int32 ny, NUMAA **pnaa, l_int32 *pw, l_int32 *ph, l_int32 debugflag ); +LEPT_DLL extern PIX * pixPadToCenterCentroid ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern l_int32 pixCentroid8 ( PIX *pixs, l_int32 factor, l_float32 *pcx, l_float32 *pcy ); +LEPT_DLL extern l_int32 pixDecideIfPhotoImage ( PIX *pix, l_int32 factor, l_int32 nx, l_int32 ny, l_float32 thresh, NUMAA **pnaa, PIXA *pixadebug ); +LEPT_DLL extern l_int32 compareTilesByHisto ( NUMAA *naa1, NUMAA *naa2, l_float32 minratio, l_int32 w1, l_int32 h1, l_int32 w2, l_int32 h2, l_float32 *pscore, PIXA *pixadebug ); +LEPT_DLL extern l_int32 pixCompareGrayByHisto ( PIX *pix1, PIX *pix2, BOX *box1, BOX *box2, l_float32 minratio, l_int32 maxgray, l_int32 factor, l_int32 nx, l_int32 ny, l_float32 *pscore, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixCropAlignedToCentroid ( PIX *pix1, PIX *pix2, l_int32 factor, BOX **pbox1, BOX **pbox2 ); +LEPT_DLL extern l_uint8 * l_compressGrayHistograms ( NUMAA *naa, l_int32 w, l_int32 h, size_t *psize ); +LEPT_DLL extern NUMAA * l_uncompressGrayHistograms ( l_uint8 *bytea, size_t size, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 pixCompareWithTranslation ( PIX *pix1, PIX *pix2, l_int32 thresh, l_int32 *pdelx, l_int32 *pdely, l_float32 *pscore, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixBestCorrelation ( PIX *pix1, PIX *pix2, l_int32 area1, l_int32 area2, l_int32 etransx, l_int32 etransy, l_int32 maxshift, l_int32 *tab8, l_int32 *pdelx, l_int32 *pdely, l_float32 *pscore, l_int32 debugflag ); +LEPT_DLL extern BOXA * pixConnComp ( PIX *pixs, PIXA **ppixa, l_int32 connectivity ); +LEPT_DLL extern BOXA * pixConnCompPixa ( PIX *pixs, PIXA **ppixa, l_int32 connectivity ); +LEPT_DLL extern BOXA * pixConnCompBB ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixCountConnComp ( PIX *pixs, l_int32 connectivity, l_int32 *pcount ); +LEPT_DLL extern l_int32 nextOnPixelInRaster ( PIX *pixs, l_int32 xstart, l_int32 ystart, l_int32 *px, l_int32 *py ); +LEPT_DLL extern l_int32 nextOnPixelInRasterLow ( l_uint32 *data, l_int32 w, l_int32 h, l_int32 wpl, l_int32 xstart, l_int32 ystart, l_int32 *px, l_int32 *py ); +LEPT_DLL extern BOX * pixSeedfillBB ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y, l_int32 connectivity ); +LEPT_DLL extern BOX * pixSeedfill4BB ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y ); +LEPT_DLL extern BOX * pixSeedfill8BB ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixSeedfill ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixSeedfill4 ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixSeedfill8 ( PIX *pixs, L_STACK *stack, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 convertFilesTo1bpp ( const char *dirin, const char *substr, l_int32 upscaling, l_int32 thresh, l_int32 firstpage, l_int32 npages, const char *dirout, l_int32 outformat ); +LEPT_DLL extern PIX * pixBlockconv ( PIX *pix, l_int32 wc, l_int32 hc ); +LEPT_DLL extern PIX * pixBlockconvGray ( PIX *pixs, PIX *pixacc, l_int32 wc, l_int32 hc ); +LEPT_DLL extern PIX * pixBlockconvAccum ( PIX *pixs ); +LEPT_DLL extern PIX * pixBlockconvGrayUnnormalized ( PIX *pixs, l_int32 wc, l_int32 hc ); +LEPT_DLL extern PIX * pixBlockconvTiled ( PIX *pix, l_int32 wc, l_int32 hc, l_int32 nx, l_int32 ny ); +LEPT_DLL extern PIX * pixBlockconvGrayTile ( PIX *pixs, PIX *pixacc, l_int32 wc, l_int32 hc ); +LEPT_DLL extern l_int32 pixWindowedStats ( PIX *pixs, l_int32 wc, l_int32 hc, l_int32 hasborder, PIX **ppixm, PIX **ppixms, FPIX **pfpixv, FPIX **pfpixrv ); +LEPT_DLL extern PIX * pixWindowedMean ( PIX *pixs, l_int32 wc, l_int32 hc, l_int32 hasborder, l_int32 normflag ); +LEPT_DLL extern PIX * pixWindowedMeanSquare ( PIX *pixs, l_int32 wc, l_int32 hc, l_int32 hasborder ); +LEPT_DLL extern l_int32 pixWindowedVariance ( PIX *pixm, PIX *pixms, FPIX **pfpixv, FPIX **pfpixrv ); +LEPT_DLL extern DPIX * pixMeanSquareAccum ( PIX *pixs ); +LEPT_DLL extern PIX * pixBlockrank ( PIX *pixs, PIX *pixacc, l_int32 wc, l_int32 hc, l_float32 rank ); +LEPT_DLL extern PIX * pixBlocksum ( PIX *pixs, PIX *pixacc, l_int32 wc, l_int32 hc ); +LEPT_DLL extern PIX * pixCensusTransform ( PIX *pixs, l_int32 halfsize, PIX *pixacc ); +LEPT_DLL extern PIX * pixConvolve ( PIX *pixs, L_KERNEL *kel, l_int32 outdepth, l_int32 normflag ); +LEPT_DLL extern PIX * pixConvolveSep ( PIX *pixs, L_KERNEL *kelx, L_KERNEL *kely, l_int32 outdepth, l_int32 normflag ); +LEPT_DLL extern PIX * pixConvolveRGB ( PIX *pixs, L_KERNEL *kel ); +LEPT_DLL extern PIX * pixConvolveRGBSep ( PIX *pixs, L_KERNEL *kelx, L_KERNEL *kely ); +LEPT_DLL extern FPIX * fpixConvolve ( FPIX *fpixs, L_KERNEL *kel, l_int32 normflag ); +LEPT_DLL extern FPIX * fpixConvolveSep ( FPIX *fpixs, L_KERNEL *kelx, L_KERNEL *kely, l_int32 normflag ); +LEPT_DLL extern PIX * pixConvolveWithBias ( PIX *pixs, L_KERNEL *kel1, L_KERNEL *kel2, l_int32 force8, l_int32 *pbias ); +LEPT_DLL extern void l_setConvolveSampling ( l_int32 xfact, l_int32 yfact ); +LEPT_DLL extern PIX * pixAddGaussianNoise ( PIX *pixs, l_float32 stdev ); +LEPT_DLL extern l_float32 gaussDistribSampling ( ); +LEPT_DLL extern l_int32 pixCorrelationScore ( PIX *pix1, PIX *pix2, l_int32 area1, l_int32 area2, l_float32 delx, l_float32 dely, l_int32 maxdiffw, l_int32 maxdiffh, l_int32 *tab, l_float32 *pscore ); +LEPT_DLL extern l_int32 pixCorrelationScoreThresholded ( PIX *pix1, PIX *pix2, l_int32 area1, l_int32 area2, l_float32 delx, l_float32 dely, l_int32 maxdiffw, l_int32 maxdiffh, l_int32 *tab, l_int32 *downcount, l_float32 score_threshold ); +LEPT_DLL extern l_int32 pixCorrelationScoreSimple ( PIX *pix1, PIX *pix2, l_int32 area1, l_int32 area2, l_float32 delx, l_float32 dely, l_int32 maxdiffw, l_int32 maxdiffh, l_int32 *tab, l_float32 *pscore ); +LEPT_DLL extern l_int32 pixCorrelationScoreShifted ( PIX *pix1, PIX *pix2, l_int32 area1, l_int32 area2, l_int32 delx, l_int32 dely, l_int32 *tab, l_float32 *pscore ); +LEPT_DLL extern L_DEWARP * dewarpCreate ( PIX *pixs, l_int32 pageno ); +LEPT_DLL extern L_DEWARP * dewarpCreateRef ( l_int32 pageno, l_int32 refpage ); +LEPT_DLL extern void dewarpDestroy ( L_DEWARP **pdew ); +LEPT_DLL extern L_DEWARPA * dewarpaCreate ( l_int32 nptrs, l_int32 sampling, l_int32 redfactor, l_int32 minlines, l_int32 maxdist ); +LEPT_DLL extern L_DEWARPA * dewarpaCreateFromPixacomp ( PIXAC *pixac, l_int32 useboth, l_int32 sampling, l_int32 minlines, l_int32 maxdist ); +LEPT_DLL extern void dewarpaDestroy ( L_DEWARPA **pdewa ); +LEPT_DLL extern l_int32 dewarpaDestroyDewarp ( L_DEWARPA *dewa, l_int32 pageno ); +LEPT_DLL extern l_int32 dewarpaInsertDewarp ( L_DEWARPA *dewa, L_DEWARP *dew ); +LEPT_DLL extern L_DEWARP * dewarpaGetDewarp ( L_DEWARPA *dewa, l_int32 index ); +LEPT_DLL extern l_int32 dewarpaSetCurvatures ( L_DEWARPA *dewa, l_int32 max_linecurv, l_int32 min_diff_linecurv, l_int32 max_diff_linecurv, l_int32 max_edgecurv, l_int32 max_diff_edgecurv, l_int32 max_edgeslope ); +LEPT_DLL extern l_int32 dewarpaUseBothArrays ( L_DEWARPA *dewa, l_int32 useboth ); +LEPT_DLL extern l_int32 dewarpaSetCheckColumns ( L_DEWARPA *dewa, l_int32 check_columns ); +LEPT_DLL extern l_int32 dewarpaSetMaxDistance ( L_DEWARPA *dewa, l_int32 maxdist ); +LEPT_DLL extern L_DEWARP * dewarpRead ( const char *filename ); +LEPT_DLL extern L_DEWARP * dewarpReadStream ( FILE *fp ); +LEPT_DLL extern L_DEWARP * dewarpReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 dewarpWrite ( const char *filename, L_DEWARP *dew ); +LEPT_DLL extern l_int32 dewarpWriteStream ( FILE *fp, L_DEWARP *dew ); +LEPT_DLL extern l_int32 dewarpWriteMem ( l_uint8 **pdata, size_t *psize, L_DEWARP *dew ); +LEPT_DLL extern L_DEWARPA * dewarpaRead ( const char *filename ); +LEPT_DLL extern L_DEWARPA * dewarpaReadStream ( FILE *fp ); +LEPT_DLL extern L_DEWARPA * dewarpaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 dewarpaWrite ( const char *filename, L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaWriteStream ( FILE *fp, L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaWriteMem ( l_uint8 **pdata, size_t *psize, L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpBuildPageModel ( L_DEWARP *dew, const char *debugfile ); +LEPT_DLL extern l_int32 dewarpFindVertDisparity ( L_DEWARP *dew, PTAA *ptaa, l_int32 rotflag ); +LEPT_DLL extern l_int32 dewarpFindHorizDisparity ( L_DEWARP *dew, PTAA *ptaa ); +LEPT_DLL extern PTAA * dewarpGetTextlineCenters ( PIX *pixs, l_int32 debugflag ); +LEPT_DLL extern PTAA * dewarpRemoveShortLines ( PIX *pixs, PTAA *ptaas, l_float32 fract, l_int32 debugflag ); +LEPT_DLL extern l_int32 dewarpFindHorizSlopeDisparity ( L_DEWARP *dew, PIX *pixb, l_float32 fractthresh, l_int32 parity ); +LEPT_DLL extern l_int32 dewarpBuildLineModel ( L_DEWARP *dew, l_int32 opensize, const char *debugfile ); +LEPT_DLL extern l_int32 dewarpaModelStatus ( L_DEWARPA *dewa, l_int32 pageno, l_int32 *pvsuccess, l_int32 *phsuccess ); +LEPT_DLL extern l_int32 dewarpaApplyDisparity ( L_DEWARPA *dewa, l_int32 pageno, PIX *pixs, l_int32 grayin, l_int32 x, l_int32 y, PIX **ppixd, const char *debugfile ); +LEPT_DLL extern l_int32 dewarpaApplyDisparityBoxa ( L_DEWARPA *dewa, l_int32 pageno, PIX *pixs, BOXA *boxas, l_int32 mapdir, l_int32 x, l_int32 y, BOXA **pboxad, const char *debugfile ); +LEPT_DLL extern l_int32 dewarpMinimize ( L_DEWARP *dew ); +LEPT_DLL extern l_int32 dewarpPopulateFullRes ( L_DEWARP *dew, PIX *pix, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 dewarpSinglePage ( PIX *pixs, l_int32 thresh, l_int32 adaptive, l_int32 useboth, l_int32 check_columns, PIX **ppixd, L_DEWARPA **pdewa, l_int32 debug ); +LEPT_DLL extern l_int32 dewarpSinglePageInit ( PIX *pixs, l_int32 thresh, l_int32 adaptive, l_int32 useboth, l_int32 check_columns, PIX **ppixb, L_DEWARPA **pdewa ); +LEPT_DLL extern l_int32 dewarpSinglePageRun ( PIX *pixs, PIX *pixb, L_DEWARPA *dewa, PIX **ppixd, l_int32 debug ); +LEPT_DLL extern l_int32 dewarpaListPages ( L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaSetValidModels ( L_DEWARPA *dewa, l_int32 notests, l_int32 debug ); +LEPT_DLL extern l_int32 dewarpaInsertRefModels ( L_DEWARPA *dewa, l_int32 notests, l_int32 debug ); +LEPT_DLL extern l_int32 dewarpaStripRefModels ( L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaRestoreModels ( L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaInfo ( FILE *fp, L_DEWARPA *dewa ); +LEPT_DLL extern l_int32 dewarpaModelStats ( L_DEWARPA *dewa, l_int32 *pnnone, l_int32 *pnvsuccess, l_int32 *pnvvalid, l_int32 *pnhsuccess, l_int32 *pnhvalid, l_int32 *pnref ); +LEPT_DLL extern l_int32 dewarpaShowArrays ( L_DEWARPA *dewa, l_float32 scalefact, l_int32 first, l_int32 last ); +LEPT_DLL extern l_int32 dewarpDebug ( L_DEWARP *dew, const char *subdirs, l_int32 index ); +LEPT_DLL extern l_int32 dewarpShowResults ( L_DEWARPA *dewa, SARRAY *sa, BOXA *boxa, l_int32 firstpage, l_int32 lastpage, const char *pdfout ); +LEPT_DLL extern L_DNA * l_dnaCreate ( l_int32 n ); +LEPT_DLL extern L_DNA * l_dnaCreateFromIArray ( l_int32 *iarray, l_int32 size ); +LEPT_DLL extern L_DNA * l_dnaCreateFromDArray ( l_float64 *darray, l_int32 size, l_int32 copyflag ); +LEPT_DLL extern L_DNA * l_dnaMakeSequence ( l_float64 startval, l_float64 increment, l_int32 size ); +LEPT_DLL extern void l_dnaDestroy ( L_DNA **pda ); +LEPT_DLL extern L_DNA * l_dnaCopy ( L_DNA *da ); +LEPT_DLL extern L_DNA * l_dnaClone ( L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaEmpty ( L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaAddNumber ( L_DNA *da, l_float64 val ); +LEPT_DLL extern l_int32 l_dnaInsertNumber ( L_DNA *da, l_int32 index, l_float64 val ); +LEPT_DLL extern l_int32 l_dnaRemoveNumber ( L_DNA *da, l_int32 index ); +LEPT_DLL extern l_int32 l_dnaReplaceNumber ( L_DNA *da, l_int32 index, l_float64 val ); +LEPT_DLL extern l_int32 l_dnaGetCount ( L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaSetCount ( L_DNA *da, l_int32 newcount ); +LEPT_DLL extern l_int32 l_dnaGetDValue ( L_DNA *da, l_int32 index, l_float64 *pval ); +LEPT_DLL extern l_int32 l_dnaGetIValue ( L_DNA *da, l_int32 index, l_int32 *pival ); +LEPT_DLL extern l_int32 l_dnaSetValue ( L_DNA *da, l_int32 index, l_float64 val ); +LEPT_DLL extern l_int32 l_dnaShiftValue ( L_DNA *da, l_int32 index, l_float64 diff ); +LEPT_DLL extern l_int32 * l_dnaGetIArray ( L_DNA *da ); +LEPT_DLL extern l_float64 * l_dnaGetDArray ( L_DNA *da, l_int32 copyflag ); +LEPT_DLL extern l_int32 l_dnaGetRefcount ( L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaChangeRefcount ( L_DNA *da, l_int32 delta ); +LEPT_DLL extern l_int32 l_dnaGetParameters ( L_DNA *da, l_float64 *pstartx, l_float64 *pdelx ); +LEPT_DLL extern l_int32 l_dnaSetParameters ( L_DNA *da, l_float64 startx, l_float64 delx ); +LEPT_DLL extern l_int32 l_dnaCopyParameters ( L_DNA *dad, L_DNA *das ); +LEPT_DLL extern L_DNA * l_dnaRead ( const char *filename ); +LEPT_DLL extern L_DNA * l_dnaReadStream ( FILE *fp ); +LEPT_DLL extern l_int32 l_dnaWrite ( const char *filename, L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaWriteStream ( FILE *fp, L_DNA *da ); +LEPT_DLL extern L_DNAA * l_dnaaCreate ( l_int32 n ); +LEPT_DLL extern L_DNAA * l_dnaaCreateFull ( l_int32 nptr, l_int32 n ); +LEPT_DLL extern l_int32 l_dnaaTruncate ( L_DNAA *daa ); +LEPT_DLL extern void l_dnaaDestroy ( L_DNAA **pdaa ); +LEPT_DLL extern l_int32 l_dnaaAddDna ( L_DNAA *daa, L_DNA *da, l_int32 copyflag ); +LEPT_DLL extern l_int32 l_dnaaGetCount ( L_DNAA *daa ); +LEPT_DLL extern l_int32 l_dnaaGetDnaCount ( L_DNAA *daa, l_int32 index ); +LEPT_DLL extern l_int32 l_dnaaGetNumberCount ( L_DNAA *daa ); +LEPT_DLL extern L_DNA * l_dnaaGetDna ( L_DNAA *daa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern l_int32 l_dnaaReplaceDna ( L_DNAA *daa, l_int32 index, L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaaGetValue ( L_DNAA *daa, l_int32 i, l_int32 j, l_float64 *pval ); +LEPT_DLL extern l_int32 l_dnaaAddNumber ( L_DNAA *daa, l_int32 index, l_float64 val ); +LEPT_DLL extern L_DNAA * l_dnaaRead ( const char *filename ); +LEPT_DLL extern L_DNAA * l_dnaaReadStream ( FILE *fp ); +LEPT_DLL extern l_int32 l_dnaaWrite ( const char *filename, L_DNAA *daa ); +LEPT_DLL extern l_int32 l_dnaaWriteStream ( FILE *fp, L_DNAA *daa ); +LEPT_DLL extern l_int32 l_dnaJoin ( L_DNA *dad, L_DNA *das, l_int32 istart, l_int32 iend ); +LEPT_DLL extern L_DNA * l_dnaaFlattenToDna ( L_DNAA *daa ); +LEPT_DLL extern NUMA * l_dnaConvertToNuma ( L_DNA *da ); +LEPT_DLL extern L_DNA * numaConvertToDna ( NUMA *na ); +LEPT_DLL extern L_DNA * l_dnaUnionByAset ( L_DNA *da1, L_DNA *da2 ); +LEPT_DLL extern L_DNA * l_dnaRemoveDupsByAset ( L_DNA *das ); +LEPT_DLL extern L_DNA * l_dnaIntersectionByAset ( L_DNA *da1, L_DNA *da2 ); +LEPT_DLL extern L_ASET * l_asetCreateFromDna ( L_DNA *da ); +LEPT_DLL extern L_DNA * l_dnaDiffAdjValues ( L_DNA *das ); +LEPT_DLL extern L_DNAHASH * l_dnaHashCreate ( l_int32 nbuckets, l_int32 initsize ); +LEPT_DLL extern void l_dnaHashDestroy ( L_DNAHASH **pdahash ); +LEPT_DLL extern l_int32 l_dnaHashGetCount ( L_DNAHASH *dahash ); +LEPT_DLL extern l_int32 l_dnaHashGetTotalCount ( L_DNAHASH *dahash ); +LEPT_DLL extern L_DNA * l_dnaHashGetDna ( L_DNAHASH *dahash, l_uint64 key, l_int32 copyflag ); +LEPT_DLL extern l_int32 l_dnaHashAdd ( L_DNAHASH *dahash, l_uint64 key, l_float64 value ); +LEPT_DLL extern L_DNAHASH * l_dnaHashCreateFromDna ( L_DNA *da ); +LEPT_DLL extern l_int32 l_dnaRemoveDupsByHash ( L_DNA *das, L_DNA **pdad, L_DNAHASH **pdahash ); +LEPT_DLL extern l_int32 l_dnaMakeHistoByHash ( L_DNA *das, L_DNAHASH **pdahash, L_DNA **pdav, L_DNA **pdac ); +LEPT_DLL extern L_DNA * l_dnaIntersectionByHash ( L_DNA *da1, L_DNA *da2 ); +LEPT_DLL extern l_int32 l_dnaFindValByHash ( L_DNA *da, L_DNAHASH *dahash, l_float64 val, l_int32 *pindex ); +LEPT_DLL extern PIX * pixMorphDwa_2 ( PIX *pixd, PIX *pixs, l_int32 operation, char *selname ); +LEPT_DLL extern PIX * pixFMorphopGen_2 ( PIX *pixd, PIX *pixs, l_int32 operation, char *selname ); +LEPT_DLL extern l_int32 fmorphopgen_low_2 ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 index ); +LEPT_DLL extern PIX * pixSobelEdgeFilter ( PIX *pixs, l_int32 orientflag ); +LEPT_DLL extern PIX * pixTwoSidedEdgeFilter ( PIX *pixs, l_int32 orientflag ); +LEPT_DLL extern l_int32 pixMeasureEdgeSmoothness ( PIX *pixs, l_int32 side, l_int32 minjump, l_int32 minreversal, l_float32 *pjpl, l_float32 *pjspl, l_float32 *prpl, const char *debugfile ); +LEPT_DLL extern NUMA * pixGetEdgeProfile ( PIX *pixs, l_int32 side, const char *debugfile ); +LEPT_DLL extern l_int32 pixGetLastOffPixelInRun ( PIX *pixs, l_int32 x, l_int32 y, l_int32 direction, l_int32 *ploc ); +LEPT_DLL extern l_int32 pixGetLastOnPixelInRun ( PIX *pixs, l_int32 x, l_int32 y, l_int32 direction, l_int32 *ploc ); +LEPT_DLL extern char * encodeBase64 ( l_uint8 *inarray, l_int32 insize, l_int32 *poutsize ); +LEPT_DLL extern l_uint8 * decodeBase64 ( const char *inarray, l_int32 insize, l_int32 *poutsize ); +LEPT_DLL extern char * encodeAscii85 ( l_uint8 *inarray, l_int32 insize, l_int32 *poutsize ); +LEPT_DLL extern l_uint8 * decodeAscii85 ( char *inarray, l_int32 insize, l_int32 *poutsize ); +LEPT_DLL extern char * reformatPacked64 ( char *inarray, l_int32 insize, l_int32 leadspace, l_int32 linechars, l_int32 addquotes, l_int32 *poutsize ); +LEPT_DLL extern PIX * pixGammaTRC ( PIX *pixd, PIX *pixs, l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern PIX * pixGammaTRCMasked ( PIX *pixd, PIX *pixs, PIX *pixm, l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern PIX * pixGammaTRCWithAlpha ( PIX *pixd, PIX *pixs, l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern NUMA * numaGammaTRC ( l_float32 gamma, l_int32 minval, l_int32 maxval ); +LEPT_DLL extern PIX * pixContrastTRC ( PIX *pixd, PIX *pixs, l_float32 factor ); +LEPT_DLL extern PIX * pixContrastTRCMasked ( PIX *pixd, PIX *pixs, PIX *pixm, l_float32 factor ); +LEPT_DLL extern NUMA * numaContrastTRC ( l_float32 factor ); +LEPT_DLL extern PIX * pixEqualizeTRC ( PIX *pixd, PIX *pixs, l_float32 fract, l_int32 factor ); +LEPT_DLL extern NUMA * numaEqualizeTRC ( PIX *pix, l_float32 fract, l_int32 factor ); +LEPT_DLL extern l_int32 pixTRCMap ( PIX *pixs, PIX *pixm, NUMA *na ); +LEPT_DLL extern PIX * pixUnsharpMasking ( PIX *pixs, l_int32 halfwidth, l_float32 fract ); +LEPT_DLL extern PIX * pixUnsharpMaskingGray ( PIX *pixs, l_int32 halfwidth, l_float32 fract ); +LEPT_DLL extern PIX * pixUnsharpMaskingFast ( PIX *pixs, l_int32 halfwidth, l_float32 fract, l_int32 direction ); +LEPT_DLL extern PIX * pixUnsharpMaskingGrayFast ( PIX *pixs, l_int32 halfwidth, l_float32 fract, l_int32 direction ); +LEPT_DLL extern PIX * pixUnsharpMaskingGray1D ( PIX *pixs, l_int32 halfwidth, l_float32 fract, l_int32 direction ); +LEPT_DLL extern PIX * pixUnsharpMaskingGray2D ( PIX *pixs, l_int32 halfwidth, l_float32 fract ); +LEPT_DLL extern PIX * pixModifyHue ( PIX *pixd, PIX *pixs, l_float32 fract ); +LEPT_DLL extern PIX * pixModifySaturation ( PIX *pixd, PIX *pixs, l_float32 fract ); +LEPT_DLL extern l_int32 pixMeasureSaturation ( PIX *pixs, l_int32 factor, l_float32 *psat ); +LEPT_DLL extern PIX * pixModifyBrightness ( PIX *pixd, PIX *pixs, l_float32 fract ); +LEPT_DLL extern PIX * pixColorShiftRGB ( PIX *pixs, l_float32 rfract, l_float32 gfract, l_float32 bfract ); +LEPT_DLL extern PIX * pixMultConstantColor ( PIX *pixs, l_float32 rfact, l_float32 gfact, l_float32 bfact ); +LEPT_DLL extern PIX * pixMultMatrixColor ( PIX *pixs, L_KERNEL *kel ); +LEPT_DLL extern PIX * pixHalfEdgeByBandpass ( PIX *pixs, l_int32 sm1h, l_int32 sm1v, l_int32 sm2h, l_int32 sm2v ); +LEPT_DLL extern l_int32 fhmtautogen ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern l_int32 fhmtautogen1 ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern l_int32 fhmtautogen2 ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern PIX * pixHMTDwa_1 ( PIX *pixd, PIX *pixs, const char *selname ); +LEPT_DLL extern PIX * pixFHMTGen_1 ( PIX *pixd, PIX *pixs, const char *selname ); +LEPT_DLL extern l_int32 fhmtgen_low_1 ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 index ); +LEPT_DLL extern l_int32 pixItalicWords ( PIX *pixs, BOXA *boxaw, PIX *pixw, BOXA **pboxa, l_int32 debugflag ); +LEPT_DLL extern l_int32 pixOrientDetect ( PIX *pixs, l_float32 *pupconf, l_float32 *pleftconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern l_int32 makeOrientDecision ( l_float32 upconf, l_float32 leftconf, l_float32 minupconf, l_float32 minratio, l_int32 *porient, l_int32 debug ); +LEPT_DLL extern l_int32 pixUpDownDetect ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern l_int32 pixUpDownDetectGeneral ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 npixels, l_int32 debug ); +LEPT_DLL extern l_int32 pixOrientDetectDwa ( PIX *pixs, l_float32 *pupconf, l_float32 *pleftconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern l_int32 pixUpDownDetectDwa ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern l_int32 pixUpDownDetectGeneralDwa ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 npixels, l_int32 debug ); +LEPT_DLL extern l_int32 pixMirrorDetect ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern l_int32 pixMirrorDetectDwa ( PIX *pixs, l_float32 *pconf, l_int32 mincount, l_int32 debug ); +LEPT_DLL extern PIX * pixFlipFHMTGen ( PIX *pixd, PIX *pixs, char *selname ); +LEPT_DLL extern l_int32 fmorphautogen ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern l_int32 fmorphautogen1 ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern l_int32 fmorphautogen2 ( SELA *sela, l_int32 fileindex, const char *filename ); +LEPT_DLL extern PIX * pixMorphDwa_1 ( PIX *pixd, PIX *pixs, l_int32 operation, char *selname ); +LEPT_DLL extern PIX * pixFMorphopGen_1 ( PIX *pixd, PIX *pixs, l_int32 operation, char *selname ); +LEPT_DLL extern l_int32 fmorphopgen_low_1 ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 index ); +LEPT_DLL extern FPIX * fpixCreate ( l_int32 width, l_int32 height ); +LEPT_DLL extern FPIX * fpixCreateTemplate ( FPIX *fpixs ); +LEPT_DLL extern FPIX * fpixClone ( FPIX *fpix ); +LEPT_DLL extern FPIX * fpixCopy ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern l_int32 fpixResizeImageData ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern void fpixDestroy ( FPIX **pfpix ); +LEPT_DLL extern l_int32 fpixGetDimensions ( FPIX *fpix, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 fpixSetDimensions ( FPIX *fpix, l_int32 w, l_int32 h ); +LEPT_DLL extern l_int32 fpixGetWpl ( FPIX *fpix ); +LEPT_DLL extern l_int32 fpixSetWpl ( FPIX *fpix, l_int32 wpl ); +LEPT_DLL extern l_int32 fpixGetRefcount ( FPIX *fpix ); +LEPT_DLL extern l_int32 fpixChangeRefcount ( FPIX *fpix, l_int32 delta ); +LEPT_DLL extern l_int32 fpixGetResolution ( FPIX *fpix, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 fpixSetResolution ( FPIX *fpix, l_int32 xres, l_int32 yres ); +LEPT_DLL extern l_int32 fpixCopyResolution ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern l_float32 * fpixGetData ( FPIX *fpix ); +LEPT_DLL extern l_int32 fpixSetData ( FPIX *fpix, l_float32 *data ); +LEPT_DLL extern l_int32 fpixGetPixel ( FPIX *fpix, l_int32 x, l_int32 y, l_float32 *pval ); +LEPT_DLL extern l_int32 fpixSetPixel ( FPIX *fpix, l_int32 x, l_int32 y, l_float32 val ); +LEPT_DLL extern FPIXA * fpixaCreate ( l_int32 n ); +LEPT_DLL extern FPIXA * fpixaCopy ( FPIXA *fpixa, l_int32 copyflag ); +LEPT_DLL extern void fpixaDestroy ( FPIXA **pfpixa ); +LEPT_DLL extern l_int32 fpixaAddFPix ( FPIXA *fpixa, FPIX *fpix, l_int32 copyflag ); +LEPT_DLL extern l_int32 fpixaGetCount ( FPIXA *fpixa ); +LEPT_DLL extern l_int32 fpixaChangeRefcount ( FPIXA *fpixa, l_int32 delta ); +LEPT_DLL extern FPIX * fpixaGetFPix ( FPIXA *fpixa, l_int32 index, l_int32 accesstype ); +LEPT_DLL extern l_int32 fpixaGetFPixDimensions ( FPIXA *fpixa, l_int32 index, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_float32 * fpixaGetData ( FPIXA *fpixa, l_int32 index ); +LEPT_DLL extern l_int32 fpixaGetPixel ( FPIXA *fpixa, l_int32 index, l_int32 x, l_int32 y, l_float32 *pval ); +LEPT_DLL extern l_int32 fpixaSetPixel ( FPIXA *fpixa, l_int32 index, l_int32 x, l_int32 y, l_float32 val ); +LEPT_DLL extern DPIX * dpixCreate ( l_int32 width, l_int32 height ); +LEPT_DLL extern DPIX * dpixCreateTemplate ( DPIX *dpixs ); +LEPT_DLL extern DPIX * dpixClone ( DPIX *dpix ); +LEPT_DLL extern DPIX * dpixCopy ( DPIX *dpixd, DPIX *dpixs ); +LEPT_DLL extern l_int32 dpixResizeImageData ( DPIX *dpixd, DPIX *dpixs ); +LEPT_DLL extern void dpixDestroy ( DPIX **pdpix ); +LEPT_DLL extern l_int32 dpixGetDimensions ( DPIX *dpix, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 dpixSetDimensions ( DPIX *dpix, l_int32 w, l_int32 h ); +LEPT_DLL extern l_int32 dpixGetWpl ( DPIX *dpix ); +LEPT_DLL extern l_int32 dpixSetWpl ( DPIX *dpix, l_int32 wpl ); +LEPT_DLL extern l_int32 dpixGetRefcount ( DPIX *dpix ); +LEPT_DLL extern l_int32 dpixChangeRefcount ( DPIX *dpix, l_int32 delta ); +LEPT_DLL extern l_int32 dpixGetResolution ( DPIX *dpix, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 dpixSetResolution ( DPIX *dpix, l_int32 xres, l_int32 yres ); +LEPT_DLL extern l_int32 dpixCopyResolution ( DPIX *dpixd, DPIX *dpixs ); +LEPT_DLL extern l_float64 * dpixGetData ( DPIX *dpix ); +LEPT_DLL extern l_int32 dpixSetData ( DPIX *dpix, l_float64 *data ); +LEPT_DLL extern l_int32 dpixGetPixel ( DPIX *dpix, l_int32 x, l_int32 y, l_float64 *pval ); +LEPT_DLL extern l_int32 dpixSetPixel ( DPIX *dpix, l_int32 x, l_int32 y, l_float64 val ); +LEPT_DLL extern FPIX * fpixRead ( const char *filename ); +LEPT_DLL extern FPIX * fpixReadStream ( FILE *fp ); +LEPT_DLL extern FPIX * fpixReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 fpixWrite ( const char *filename, FPIX *fpix ); +LEPT_DLL extern l_int32 fpixWriteStream ( FILE *fp, FPIX *fpix ); +LEPT_DLL extern l_int32 fpixWriteMem ( l_uint8 **pdata, size_t *psize, FPIX *fpix ); +LEPT_DLL extern FPIX * fpixEndianByteSwap ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern DPIX * dpixRead ( const char *filename ); +LEPT_DLL extern DPIX * dpixReadStream ( FILE *fp ); +LEPT_DLL extern DPIX * dpixReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 dpixWrite ( const char *filename, DPIX *dpix ); +LEPT_DLL extern l_int32 dpixWriteStream ( FILE *fp, DPIX *dpix ); +LEPT_DLL extern l_int32 dpixWriteMem ( l_uint8 **pdata, size_t *psize, DPIX *dpix ); +LEPT_DLL extern DPIX * dpixEndianByteSwap ( DPIX *dpixd, DPIX *dpixs ); +LEPT_DLL extern l_int32 fpixPrintStream ( FILE *fp, FPIX *fpix, l_int32 factor ); +LEPT_DLL extern FPIX * pixConvertToFPix ( PIX *pixs, l_int32 ncomps ); +LEPT_DLL extern DPIX * pixConvertToDPix ( PIX *pixs, l_int32 ncomps ); +LEPT_DLL extern PIX * fpixConvertToPix ( FPIX *fpixs, l_int32 outdepth, l_int32 negvals, l_int32 errorflag ); +LEPT_DLL extern PIX * fpixDisplayMaxDynamicRange ( FPIX *fpixs ); +LEPT_DLL extern DPIX * fpixConvertToDPix ( FPIX *fpix ); +LEPT_DLL extern PIX * dpixConvertToPix ( DPIX *dpixs, l_int32 outdepth, l_int32 negvals, l_int32 errorflag ); +LEPT_DLL extern FPIX * dpixConvertToFPix ( DPIX *dpix ); +LEPT_DLL extern l_int32 fpixGetMin ( FPIX *fpix, l_float32 *pminval, l_int32 *pxminloc, l_int32 *pyminloc ); +LEPT_DLL extern l_int32 fpixGetMax ( FPIX *fpix, l_float32 *pmaxval, l_int32 *pxmaxloc, l_int32 *pymaxloc ); +LEPT_DLL extern l_int32 dpixGetMin ( DPIX *dpix, l_float64 *pminval, l_int32 *pxminloc, l_int32 *pyminloc ); +LEPT_DLL extern l_int32 dpixGetMax ( DPIX *dpix, l_float64 *pmaxval, l_int32 *pxmaxloc, l_int32 *pymaxloc ); +LEPT_DLL extern FPIX * fpixScaleByInteger ( FPIX *fpixs, l_int32 factor ); +LEPT_DLL extern DPIX * dpixScaleByInteger ( DPIX *dpixs, l_int32 factor ); +LEPT_DLL extern FPIX * fpixLinearCombination ( FPIX *fpixd, FPIX *fpixs1, FPIX *fpixs2, l_float32 a, l_float32 b ); +LEPT_DLL extern l_int32 fpixAddMultConstant ( FPIX *fpix, l_float32 addc, l_float32 multc ); +LEPT_DLL extern DPIX * dpixLinearCombination ( DPIX *dpixd, DPIX *dpixs1, DPIX *dpixs2, l_float32 a, l_float32 b ); +LEPT_DLL extern l_int32 dpixAddMultConstant ( DPIX *dpix, l_float64 addc, l_float64 multc ); +LEPT_DLL extern l_int32 fpixSetAllArbitrary ( FPIX *fpix, l_float32 inval ); +LEPT_DLL extern l_int32 dpixSetAllArbitrary ( DPIX *dpix, l_float64 inval ); +LEPT_DLL extern FPIX * fpixAddBorder ( FPIX *fpixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern FPIX * fpixRemoveBorder ( FPIX *fpixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern FPIX * fpixAddMirroredBorder ( FPIX *fpixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern FPIX * fpixAddContinuedBorder ( FPIX *fpixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern FPIX * fpixAddSlopeBorder ( FPIX *fpixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern l_int32 fpixRasterop ( FPIX *fpixd, l_int32 dx, l_int32 dy, l_int32 dw, l_int32 dh, FPIX *fpixs, l_int32 sx, l_int32 sy ); +LEPT_DLL extern FPIX * fpixRotateOrth ( FPIX *fpixs, l_int32 quads ); +LEPT_DLL extern FPIX * fpixRotate180 ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern FPIX * fpixRotate90 ( FPIX *fpixs, l_int32 direction ); +LEPT_DLL extern FPIX * fpixFlipLR ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern FPIX * fpixFlipTB ( FPIX *fpixd, FPIX *fpixs ); +LEPT_DLL extern FPIX * fpixAffinePta ( FPIX *fpixs, PTA *ptad, PTA *ptas, l_int32 border, l_float32 inval ); +LEPT_DLL extern FPIX * fpixAffine ( FPIX *fpixs, l_float32 *vc, l_float32 inval ); +LEPT_DLL extern FPIX * fpixProjectivePta ( FPIX *fpixs, PTA *ptad, PTA *ptas, l_int32 border, l_float32 inval ); +LEPT_DLL extern FPIX * fpixProjective ( FPIX *fpixs, l_float32 *vc, l_float32 inval ); +LEPT_DLL extern l_int32 linearInterpolatePixelFloat ( l_float32 *datas, l_int32 w, l_int32 h, l_float32 x, l_float32 y, l_float32 inval, l_float32 *pval ); +LEPT_DLL extern PIX * fpixThresholdToPix ( FPIX *fpix, l_float32 thresh ); +LEPT_DLL extern FPIX * pixComponentFunction ( PIX *pix, l_float32 rnum, l_float32 gnum, l_float32 bnum, l_float32 rdenom, l_float32 gdenom, l_float32 bdenom ); +LEPT_DLL extern PIX * pixReadStreamGif ( FILE *fp ); +LEPT_DLL extern l_int32 pixWriteStreamGif ( FILE *fp, PIX *pix ); +LEPT_DLL extern PIX * pixReadMemGif ( const l_uint8 *cdata, size_t size ); +LEPT_DLL extern l_int32 pixWriteMemGif ( l_uint8 **pdata, size_t *psize, PIX *pix ); +LEPT_DLL extern GPLOT * gplotCreate ( const char *rootname, l_int32 outformat, const char *title, const char *xlabel, const char *ylabel ); +LEPT_DLL extern void gplotDestroy ( GPLOT **pgplot ); +LEPT_DLL extern l_int32 gplotAddPlot ( GPLOT *gplot, NUMA *nax, NUMA *nay, l_int32 plotstyle, const char *plottitle ); +LEPT_DLL extern l_int32 gplotSetScaling ( GPLOT *gplot, l_int32 scaling ); +LEPT_DLL extern l_int32 gplotMakeOutput ( GPLOT *gplot ); +LEPT_DLL extern l_int32 gplotGenCommandFile ( GPLOT *gplot ); +LEPT_DLL extern l_int32 gplotGenDataFiles ( GPLOT *gplot ); +LEPT_DLL extern l_int32 gplotSimple1 ( NUMA *na, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern l_int32 gplotSimple2 ( NUMA *na1, NUMA *na2, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern l_int32 gplotSimpleN ( NUMAA *naa, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern l_int32 gplotSimpleXY1 ( NUMA *nax, NUMA *nay, l_int32 plotstyle, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern l_int32 gplotSimpleXY2 ( NUMA *nax, NUMA *nay1, NUMA *nay2, l_int32 plotstyle, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern l_int32 gplotSimpleXYN ( NUMA *nax, NUMAA *naay, l_int32 plotstyle, l_int32 outformat, const char *outroot, const char *title ); +LEPT_DLL extern GPLOT * gplotRead ( const char *filename ); +LEPT_DLL extern l_int32 gplotWrite ( const char *filename, GPLOT *gplot ); +LEPT_DLL extern PTA * generatePtaLine ( l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2 ); +LEPT_DLL extern PTA * generatePtaWideLine ( l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 width ); +LEPT_DLL extern PTA * generatePtaBox ( BOX *box, l_int32 width ); +LEPT_DLL extern PTA * generatePtaBoxa ( BOXA *boxa, l_int32 width, l_int32 removedups ); +LEPT_DLL extern PTA * generatePtaHashBox ( BOX *box, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline ); +LEPT_DLL extern PTA * generatePtaHashBoxa ( BOXA *boxa, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 removedups ); +LEPT_DLL extern PTAA * generatePtaaBoxa ( BOXA *boxa ); +LEPT_DLL extern PTAA * generatePtaaHashBoxa ( BOXA *boxa, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline ); +LEPT_DLL extern PTA * generatePtaPolyline ( PTA *ptas, l_int32 width, l_int32 closeflag, l_int32 removedups ); +LEPT_DLL extern PTA * generatePtaGrid ( l_int32 w, l_int32 h, l_int32 nx, l_int32 ny, l_int32 width ); +LEPT_DLL extern PTA * convertPtaLineTo4cc ( PTA *ptas ); +LEPT_DLL extern PTA * generatePtaFilledCircle ( l_int32 radius ); +LEPT_DLL extern PTA * generatePtaFilledSquare ( l_int32 side ); +LEPT_DLL extern PTA * generatePtaLineFromPt ( l_int32 x, l_int32 y, l_float64 length, l_float64 radang ); +LEPT_DLL extern l_int32 locatePtRadially ( l_int32 xr, l_int32 yr, l_float64 dist, l_float64 radang, l_float64 *px, l_float64 *py ); +LEPT_DLL extern l_int32 pixRenderPlotFromNuma ( PIX **ppix, NUMA *na, l_int32 plotloc, l_int32 linewidth, l_int32 max, l_uint32 color ); +LEPT_DLL extern PTA * makePlotPtaFromNuma ( NUMA *na, l_int32 size, l_int32 plotloc, l_int32 linewidth, l_int32 max ); +LEPT_DLL extern l_int32 pixRenderPlotFromNumaGen ( PIX **ppix, NUMA *na, l_int32 orient, l_int32 linewidth, l_int32 refpos, l_int32 max, l_int32 drawref, l_uint32 color ); +LEPT_DLL extern PTA * makePlotPtaFromNumaGen ( NUMA *na, l_int32 orient, l_int32 linewidth, l_int32 refpos, l_int32 max, l_int32 drawref ); +LEPT_DLL extern l_int32 pixRenderPta ( PIX *pix, PTA *pta, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderPtaArb ( PIX *pix, PTA *pta, l_uint8 rval, l_uint8 gval, l_uint8 bval ); +LEPT_DLL extern l_int32 pixRenderPtaBlend ( PIX *pix, PTA *pta, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_float32 fract ); +LEPT_DLL extern l_int32 pixRenderLine ( PIX *pix, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 width, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderLineArb ( PIX *pix, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval ); +LEPT_DLL extern l_int32 pixRenderLineBlend ( PIX *pix, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_float32 fract ); +LEPT_DLL extern l_int32 pixRenderBox ( PIX *pix, BOX *box, l_int32 width, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderBoxArb ( PIX *pix, BOX *box, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval ); +LEPT_DLL extern l_int32 pixRenderBoxBlend ( PIX *pix, BOX *box, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_float32 fract ); +LEPT_DLL extern l_int32 pixRenderBoxa ( PIX *pix, BOXA *boxa, l_int32 width, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderBoxaArb ( PIX *pix, BOXA *boxa, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval ); +LEPT_DLL extern l_int32 pixRenderBoxaBlend ( PIX *pix, BOXA *boxa, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_float32 fract, l_int32 removedups ); +LEPT_DLL extern l_int32 pixRenderHashBox ( PIX *pix, BOX *box, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderHashBoxArb ( PIX *pix, BOX *box, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixRenderHashBoxBlend ( PIX *pix, BOX *box, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 rval, l_int32 gval, l_int32 bval, l_float32 fract ); +LEPT_DLL extern l_int32 pixRenderHashMaskArb ( PIX *pix, PIX *pixm, l_int32 x, l_int32 y, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixRenderHashBoxa ( PIX *pix, BOXA *boxa, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 op ); +LEPT_DLL extern l_int32 pixRenderHashBoxaArb ( PIX *pix, BOXA *boxa, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixRenderHashBoxaBlend ( PIX *pix, BOXA *boxa, l_int32 spacing, l_int32 width, l_int32 orient, l_int32 outline, l_int32 rval, l_int32 gval, l_int32 bval, l_float32 fract ); +LEPT_DLL extern l_int32 pixRenderPolyline ( PIX *pix, PTA *ptas, l_int32 width, l_int32 op, l_int32 closeflag ); +LEPT_DLL extern l_int32 pixRenderPolylineArb ( PIX *pix, PTA *ptas, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_int32 closeflag ); +LEPT_DLL extern l_int32 pixRenderPolylineBlend ( PIX *pix, PTA *ptas, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval, l_float32 fract, l_int32 closeflag, l_int32 removedups ); +LEPT_DLL extern l_int32 pixRenderGridArb ( PIX *pix, l_int32 nx, l_int32 ny, l_int32 width, l_uint8 rval, l_uint8 gval, l_uint8 bval ); +LEPT_DLL extern PIX * pixRenderRandomCmapPtaa ( PIX *pix, PTAA *ptaa, l_int32 polyflag, l_int32 width, l_int32 closeflag ); +LEPT_DLL extern PIX * pixRenderPolygon ( PTA *ptas, l_int32 width, l_int32 *pxmin, l_int32 *pymin ); +LEPT_DLL extern PIX * pixFillPolygon ( PIX *pixs, PTA *pta, l_int32 xmin, l_int32 ymin ); +LEPT_DLL extern PIX * pixRenderContours ( PIX *pixs, l_int32 startval, l_int32 incr, l_int32 outdepth ); +LEPT_DLL extern PIX * fpixAutoRenderContours ( FPIX *fpix, l_int32 ncontours ); +LEPT_DLL extern PIX * fpixRenderContours ( FPIX *fpixs, l_float32 incr, l_float32 proxim ); +LEPT_DLL extern PTA * pixGeneratePtaBoundary ( PIX *pixs, l_int32 width ); +LEPT_DLL extern PIX * pixErodeGray ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixDilateGray ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenGray ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseGray ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeGray3 ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixDilateGray3 ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenGray3 ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseGray3 ( PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixDitherToBinary ( PIX *pixs ); +LEPT_DLL extern PIX * pixDitherToBinarySpec ( PIX *pixs, l_int32 lowerclip, l_int32 upperclip ); +LEPT_DLL extern PIX * pixThresholdToBinary ( PIX *pixs, l_int32 thresh ); +LEPT_DLL extern PIX * pixVarThresholdToBinary ( PIX *pixs, PIX *pixg ); +LEPT_DLL extern PIX * pixAdaptThresholdToBinary ( PIX *pixs, PIX *pixm, l_float32 gamma ); +LEPT_DLL extern PIX * pixAdaptThresholdToBinaryGen ( PIX *pixs, PIX *pixm, l_float32 gamma, l_int32 blackval, l_int32 whiteval, l_int32 thresh ); +LEPT_DLL extern PIX * pixGenerateMaskByValue ( PIX *pixs, l_int32 val, l_int32 usecmap ); +LEPT_DLL extern PIX * pixGenerateMaskByBand ( PIX *pixs, l_int32 lower, l_int32 upper, l_int32 inband, l_int32 usecmap ); +LEPT_DLL extern PIX * pixDitherTo2bpp ( PIX *pixs, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixDitherTo2bppSpec ( PIX *pixs, l_int32 lowerclip, l_int32 upperclip, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixThresholdTo2bpp ( PIX *pixs, l_int32 nlevels, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixThresholdTo4bpp ( PIX *pixs, l_int32 nlevels, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixThresholdOn8bpp ( PIX *pixs, l_int32 nlevels, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixThresholdGrayArb ( PIX *pixs, const char *edgevals, l_int32 outdepth, l_int32 use_average, l_int32 setblack, l_int32 setwhite ); +LEPT_DLL extern l_int32 * makeGrayQuantIndexTable ( l_int32 nlevels ); +LEPT_DLL extern l_int32 * makeGrayQuantTargetTable ( l_int32 nlevels, l_int32 depth ); +LEPT_DLL extern l_int32 makeGrayQuantTableArb ( NUMA *na, l_int32 outdepth, l_int32 **ptab, PIXCMAP **pcmap ); +LEPT_DLL extern l_int32 makeGrayQuantColormapArb ( PIX *pixs, l_int32 *tab, l_int32 outdepth, PIXCMAP **pcmap ); +LEPT_DLL extern PIX * pixGenerateMaskByBand32 ( PIX *pixs, l_uint32 refval, l_int32 delm, l_int32 delp, l_float32 fractm, l_float32 fractp ); +LEPT_DLL extern PIX * pixGenerateMaskByDiscr32 ( PIX *pixs, l_uint32 refval1, l_uint32 refval2, l_int32 distflag ); +LEPT_DLL extern PIX * pixGrayQuantFromHisto ( PIX *pixd, PIX *pixs, PIX *pixm, l_float32 minfract, l_int32 maxsize ); +LEPT_DLL extern PIX * pixGrayQuantFromCmap ( PIX *pixs, PIXCMAP *cmap, l_int32 mindepth ); +LEPT_DLL extern void ditherToBinaryLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *bufs1, l_uint32 *bufs2, l_int32 lowerclip, l_int32 upperclip ); +LEPT_DLL extern void ditherToBinaryLineLow ( l_uint32 *lined, l_int32 w, l_uint32 *bufs1, l_uint32 *bufs2, l_int32 lowerclip, l_int32 upperclip, l_int32 lastlineflag ); +LEPT_DLL extern void thresholdToBinaryLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 d, l_int32 wpls, l_int32 thresh ); +LEPT_DLL extern void thresholdToBinaryLineLow ( l_uint32 *lined, l_int32 w, l_uint32 *lines, l_int32 d, l_int32 thresh ); +LEPT_DLL extern void ditherTo2bppLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *bufs1, l_uint32 *bufs2, l_int32 *tabval, l_int32 *tab38, l_int32 *tab14 ); +LEPT_DLL extern void ditherTo2bppLineLow ( l_uint32 *lined, l_int32 w, l_uint32 *bufs1, l_uint32 *bufs2, l_int32 *tabval, l_int32 *tab38, l_int32 *tab14, l_int32 lastlineflag ); +LEPT_DLL extern l_int32 make8To2DitherTables ( l_int32 **ptabval, l_int32 **ptab38, l_int32 **ptab14, l_int32 cliptoblack, l_int32 cliptowhite ); +LEPT_DLL extern void thresholdTo2bppLow ( l_uint32 *datad, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab ); +LEPT_DLL extern void thresholdTo4bppLow ( l_uint32 *datad, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab ); +LEPT_DLL extern L_HEAP * lheapCreate ( l_int32 nalloc, l_int32 direction ); +LEPT_DLL extern void lheapDestroy ( L_HEAP **plh, l_int32 freeflag ); +LEPT_DLL extern l_int32 lheapAdd ( L_HEAP *lh, void *item ); +LEPT_DLL extern void * lheapRemove ( L_HEAP *lh ); +LEPT_DLL extern l_int32 lheapGetCount ( L_HEAP *lh ); +LEPT_DLL extern l_int32 lheapSwapUp ( L_HEAP *lh, l_int32 index ); +LEPT_DLL extern l_int32 lheapSwapDown ( L_HEAP *lh ); +LEPT_DLL extern l_int32 lheapSort ( L_HEAP *lh ); +LEPT_DLL extern l_int32 lheapSortStrictOrder ( L_HEAP *lh ); +LEPT_DLL extern l_int32 lheapPrint ( FILE *fp, L_HEAP *lh ); +LEPT_DLL extern JBCLASSER * jbRankHausInit ( l_int32 components, l_int32 maxwidth, l_int32 maxheight, l_int32 size, l_float32 rank ); +LEPT_DLL extern JBCLASSER * jbCorrelationInit ( l_int32 components, l_int32 maxwidth, l_int32 maxheight, l_float32 thresh, l_float32 weightfactor ); +LEPT_DLL extern JBCLASSER * jbCorrelationInitWithoutComponents ( l_int32 components, l_int32 maxwidth, l_int32 maxheight, l_float32 thresh, l_float32 weightfactor ); +LEPT_DLL extern l_int32 jbAddPages ( JBCLASSER *classer, SARRAY *safiles ); +LEPT_DLL extern l_int32 jbAddPage ( JBCLASSER *classer, PIX *pixs ); +LEPT_DLL extern l_int32 jbAddPageComponents ( JBCLASSER *classer, PIX *pixs, BOXA *boxas, PIXA *pixas ); +LEPT_DLL extern l_int32 jbClassifyRankHaus ( JBCLASSER *classer, BOXA *boxa, PIXA *pixas ); +LEPT_DLL extern l_int32 pixHaustest ( PIX *pix1, PIX *pix2, PIX *pix3, PIX *pix4, l_float32 delx, l_float32 dely, l_int32 maxdiffw, l_int32 maxdiffh ); +LEPT_DLL extern l_int32 pixRankHaustest ( PIX *pix1, PIX *pix2, PIX *pix3, PIX *pix4, l_float32 delx, l_float32 dely, l_int32 maxdiffw, l_int32 maxdiffh, l_int32 area1, l_int32 area3, l_float32 rank, l_int32 *tab8 ); +LEPT_DLL extern l_int32 jbClassifyCorrelation ( JBCLASSER *classer, BOXA *boxa, PIXA *pixas ); +LEPT_DLL extern l_int32 jbGetComponents ( PIX *pixs, l_int32 components, l_int32 maxwidth, l_int32 maxheight, BOXA **pboxad, PIXA **ppixad ); +LEPT_DLL extern l_int32 pixWordMaskByDilation ( PIX *pixs, l_int32 maxdil, PIX **ppixm, l_int32 *psize ); +LEPT_DLL extern l_int32 pixWordBoxesByDilation ( PIX *pixs, l_int32 maxdil, l_int32 minwidth, l_int32 minheight, l_int32 maxwidth, l_int32 maxheight, BOXA **pboxa, l_int32 *psize ); +LEPT_DLL extern PIXA * jbAccumulateComposites ( PIXAA *pixaa, NUMA **pna, PTA **pptat ); +LEPT_DLL extern PIXA * jbTemplatesFromComposites ( PIXA *pixac, NUMA *na ); +LEPT_DLL extern JBCLASSER * jbClasserCreate ( l_int32 method, l_int32 components ); +LEPT_DLL extern void jbClasserDestroy ( JBCLASSER **pclasser ); +LEPT_DLL extern JBDATA * jbDataSave ( JBCLASSER *classer ); +LEPT_DLL extern void jbDataDestroy ( JBDATA **pdata ); +LEPT_DLL extern l_int32 jbDataWrite ( const char *rootout, JBDATA *jbdata ); +LEPT_DLL extern JBDATA * jbDataRead ( const char *rootname ); +LEPT_DLL extern PIXA * jbDataRender ( JBDATA *data, l_int32 debugflag ); +LEPT_DLL extern l_int32 jbGetULCorners ( JBCLASSER *classer, PIX *pixs, BOXA *boxa ); +LEPT_DLL extern l_int32 jbGetLLCorners ( JBCLASSER *classer ); +LEPT_DLL extern l_int32 readHeaderJp2k ( const char *filename, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 freadHeaderJp2k ( FILE *fp, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 readHeaderMemJp2k ( const l_uint8 *data, size_t size, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 fgetJp2kResolution ( FILE *fp, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern PIX * pixReadJp2k ( const char *filename, l_uint32 reduction, BOX *box, l_int32 hint, l_int32 debug ); +LEPT_DLL extern PIX * pixReadStreamJp2k ( FILE *fp, l_uint32 reduction, BOX *box, l_int32 hint, l_int32 debug ); +LEPT_DLL extern l_int32 pixWriteJp2k ( const char *filename, PIX *pix, l_int32 quality, l_int32 nlevels, l_int32 hint, l_int32 debug ); +LEPT_DLL extern l_int32 pixWriteStreamJp2k ( FILE *fp, PIX *pix, l_int32 quality, l_int32 nlevels, l_int32 hint, l_int32 debug ); +LEPT_DLL extern PIX * pixReadMemJp2k ( const l_uint8 *data, size_t size, l_uint32 reduction, BOX *box, l_int32 hint, l_int32 debug ); +LEPT_DLL extern l_int32 pixWriteMemJp2k ( l_uint8 **pdata, size_t *psize, PIX *pix, l_int32 quality, l_int32 nlevels, l_int32 hint, l_int32 debug ); +LEPT_DLL extern PIX * pixReadJpeg ( const char *filename, l_int32 cmapflag, l_int32 reduction, l_int32 *pnwarn, l_int32 hint ); +LEPT_DLL extern PIX * pixReadStreamJpeg ( FILE *fp, l_int32 cmapflag, l_int32 reduction, l_int32 *pnwarn, l_int32 hint ); +LEPT_DLL extern l_int32 readHeaderJpeg ( const char *filename, l_int32 *pw, l_int32 *ph, l_int32 *pspp, l_int32 *pycck, l_int32 *pcmyk ); +LEPT_DLL extern l_int32 freadHeaderJpeg ( FILE *fp, l_int32 *pw, l_int32 *ph, l_int32 *pspp, l_int32 *pycck, l_int32 *pcmyk ); +LEPT_DLL extern l_int32 fgetJpegResolution ( FILE *fp, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 fgetJpegComment ( FILE *fp, l_uint8 **pcomment ); +LEPT_DLL extern l_int32 pixWriteJpeg ( const char *filename, PIX *pix, l_int32 quality, l_int32 progressive ); +LEPT_DLL extern l_int32 pixWriteStreamJpeg ( FILE *fp, PIX *pixs, l_int32 quality, l_int32 progressive ); +LEPT_DLL extern PIX * pixReadMemJpeg ( const l_uint8 *data, size_t size, l_int32 cmflag, l_int32 reduction, l_int32 *pnwarn, l_int32 hint ); +LEPT_DLL extern l_int32 readHeaderMemJpeg ( const l_uint8 *data, size_t size, l_int32 *pw, l_int32 *ph, l_int32 *pspp, l_int32 *pycck, l_int32 *pcmyk ); +LEPT_DLL extern l_int32 pixWriteMemJpeg ( l_uint8 **pdata, size_t *psize, PIX *pix, l_int32 quality, l_int32 progressive ); +LEPT_DLL extern l_int32 pixSetChromaSampling ( PIX *pix, l_int32 sampling ); +LEPT_DLL extern L_KERNEL * kernelCreate ( l_int32 height, l_int32 width ); +LEPT_DLL extern void kernelDestroy ( L_KERNEL **pkel ); +LEPT_DLL extern L_KERNEL * kernelCopy ( L_KERNEL *kels ); +LEPT_DLL extern l_int32 kernelGetElement ( L_KERNEL *kel, l_int32 row, l_int32 col, l_float32 *pval ); +LEPT_DLL extern l_int32 kernelSetElement ( L_KERNEL *kel, l_int32 row, l_int32 col, l_float32 val ); +LEPT_DLL extern l_int32 kernelGetParameters ( L_KERNEL *kel, l_int32 *psy, l_int32 *psx, l_int32 *pcy, l_int32 *pcx ); +LEPT_DLL extern l_int32 kernelSetOrigin ( L_KERNEL *kel, l_int32 cy, l_int32 cx ); +LEPT_DLL extern l_int32 kernelGetSum ( L_KERNEL *kel, l_float32 *psum ); +LEPT_DLL extern l_int32 kernelGetMinMax ( L_KERNEL *kel, l_float32 *pmin, l_float32 *pmax ); +LEPT_DLL extern L_KERNEL * kernelNormalize ( L_KERNEL *kels, l_float32 normsum ); +LEPT_DLL extern L_KERNEL * kernelInvert ( L_KERNEL *kels ); +LEPT_DLL extern l_float32 ** create2dFloatArray ( l_int32 sy, l_int32 sx ); +LEPT_DLL extern L_KERNEL * kernelRead ( const char *fname ); +LEPT_DLL extern L_KERNEL * kernelReadStream ( FILE *fp ); +LEPT_DLL extern l_int32 kernelWrite ( const char *fname, L_KERNEL *kel ); +LEPT_DLL extern l_int32 kernelWriteStream ( FILE *fp, L_KERNEL *kel ); +LEPT_DLL extern L_KERNEL * kernelCreateFromString ( l_int32 h, l_int32 w, l_int32 cy, l_int32 cx, const char *kdata ); +LEPT_DLL extern L_KERNEL * kernelCreateFromFile ( const char *filename ); +LEPT_DLL extern L_KERNEL * kernelCreateFromPix ( PIX *pix, l_int32 cy, l_int32 cx ); +LEPT_DLL extern PIX * kernelDisplayInPix ( L_KERNEL *kel, l_int32 size, l_int32 gthick ); +LEPT_DLL extern NUMA * parseStringForNumbers ( const char *str, const char *seps ); +LEPT_DLL extern L_KERNEL * makeFlatKernel ( l_int32 height, l_int32 width, l_int32 cy, l_int32 cx ); +LEPT_DLL extern L_KERNEL * makeGaussianKernel ( l_int32 halfheight, l_int32 halfwidth, l_float32 stdev, l_float32 max ); +LEPT_DLL extern l_int32 makeGaussianKernelSep ( l_int32 halfheight, l_int32 halfwidth, l_float32 stdev, l_float32 max, L_KERNEL **pkelx, L_KERNEL **pkely ); +LEPT_DLL extern L_KERNEL * makeDoGKernel ( l_int32 halfheight, l_int32 halfwidth, l_float32 stdev, l_float32 ratio ); +LEPT_DLL extern char * getImagelibVersions ( ); +LEPT_DLL extern void listDestroy ( DLLIST **phead ); +LEPT_DLL extern l_int32 listAddToHead ( DLLIST **phead, void *data ); +LEPT_DLL extern l_int32 listAddToTail ( DLLIST **phead, DLLIST **ptail, void *data ); +LEPT_DLL extern l_int32 listInsertBefore ( DLLIST **phead, DLLIST *elem, void *data ); +LEPT_DLL extern l_int32 listInsertAfter ( DLLIST **phead, DLLIST *elem, void *data ); +LEPT_DLL extern void * listRemoveElement ( DLLIST **phead, DLLIST *elem ); +LEPT_DLL extern void * listRemoveFromHead ( DLLIST **phead ); +LEPT_DLL extern void * listRemoveFromTail ( DLLIST **phead, DLLIST **ptail ); +LEPT_DLL extern DLLIST * listFindElement ( DLLIST *head, void *data ); +LEPT_DLL extern DLLIST * listFindTail ( DLLIST *head ); +LEPT_DLL extern l_int32 listGetCount ( DLLIST *head ); +LEPT_DLL extern l_int32 listReverse ( DLLIST **phead ); +LEPT_DLL extern l_int32 listJoin ( DLLIST **phead1, DLLIST **phead2 ); +LEPT_DLL extern L_AMAP * l_amapCreate ( l_int32 keytype ); +LEPT_DLL extern RB_TYPE * l_amapFind ( L_AMAP *m, RB_TYPE key ); +LEPT_DLL extern void l_amapInsert ( L_AMAP *m, RB_TYPE key, RB_TYPE value ); +LEPT_DLL extern void l_amapDelete ( L_AMAP *m, RB_TYPE key ); +LEPT_DLL extern void l_amapDestroy ( L_AMAP **pm ); +LEPT_DLL extern L_AMAP_NODE * l_amapGetFirst ( L_AMAP *m ); +LEPT_DLL extern L_AMAP_NODE * l_amapGetNext ( L_AMAP_NODE *n ); +LEPT_DLL extern L_AMAP_NODE * l_amapGetLast ( L_AMAP *m ); +LEPT_DLL extern L_AMAP_NODE * l_amapGetPrev ( L_AMAP_NODE *n ); +LEPT_DLL extern l_int32 l_amapSize ( L_AMAP *m ); +LEPT_DLL extern L_ASET * l_asetCreate ( l_int32 keytype ); +LEPT_DLL extern RB_TYPE * l_asetFind ( L_ASET *s, RB_TYPE key ); +LEPT_DLL extern void l_asetInsert ( L_ASET *s, RB_TYPE key ); +LEPT_DLL extern void l_asetDelete ( L_ASET *s, RB_TYPE key ); +LEPT_DLL extern void l_asetDestroy ( L_ASET **ps ); +LEPT_DLL extern L_ASET_NODE * l_asetGetFirst ( L_ASET *s ); +LEPT_DLL extern L_ASET_NODE * l_asetGetNext ( L_ASET_NODE *n ); +LEPT_DLL extern L_ASET_NODE * l_asetGetLast ( L_ASET *s ); +LEPT_DLL extern L_ASET_NODE * l_asetGetPrev ( L_ASET_NODE *n ); +LEPT_DLL extern l_int32 l_asetSize ( L_ASET *s ); +LEPT_DLL extern PIX * generateBinaryMaze ( l_int32 w, l_int32 h, l_int32 xi, l_int32 yi, l_float32 wallps, l_float32 ranis ); +LEPT_DLL extern PTA * pixSearchBinaryMaze ( PIX *pixs, l_int32 xi, l_int32 yi, l_int32 xf, l_int32 yf, PIX **ppixd ); +LEPT_DLL extern PTA * pixSearchGrayMaze ( PIX *pixs, l_int32 xi, l_int32 yi, l_int32 xf, l_int32 yf, PIX **ppixd ); +LEPT_DLL extern PIX * pixDilate ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixErode ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixHMT ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixOpen ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixClose ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixCloseSafe ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixOpenGeneralized ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixCloseGeneralized ( PIX *pixd, PIX *pixs, SEL *sel ); +LEPT_DLL extern PIX * pixDilateBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseSafeBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern l_int32 selectComposableSels ( l_int32 size, l_int32 direction, SEL **psel1, SEL **psel2 ); +LEPT_DLL extern l_int32 selectComposableSizes ( l_int32 size, l_int32 *pfactor1, l_int32 *pfactor2 ); +LEPT_DLL extern PIX * pixDilateCompBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeCompBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenCompBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseCompBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseSafeCompBrick ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern void resetMorphBoundaryCondition ( l_int32 bc ); +LEPT_DLL extern l_uint32 getMorphBorderPixelColor ( l_int32 type, l_int32 depth ); +LEPT_DLL extern PIX * pixExtractBoundary ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixMorphSequenceMasked ( PIX *pixs, PIX *pixm, const char *sequence, l_int32 dispsep ); +LEPT_DLL extern PIX * pixMorphSequenceByComponent ( PIX *pixs, const char *sequence, l_int32 connectivity, l_int32 minw, l_int32 minh, BOXA **pboxa ); +LEPT_DLL extern PIXA * pixaMorphSequenceByComponent ( PIXA *pixas, const char *sequence, l_int32 minw, l_int32 minh ); +LEPT_DLL extern PIX * pixMorphSequenceByRegion ( PIX *pixs, PIX *pixm, const char *sequence, l_int32 connectivity, l_int32 minw, l_int32 minh, BOXA **pboxa ); +LEPT_DLL extern PIXA * pixaMorphSequenceByRegion ( PIX *pixs, PIXA *pixam, const char *sequence, l_int32 minw, l_int32 minh ); +LEPT_DLL extern PIX * pixUnionOfMorphOps ( PIX *pixs, SELA *sela, l_int32 type ); +LEPT_DLL extern PIX * pixIntersectionOfMorphOps ( PIX *pixs, SELA *sela, l_int32 type ); +LEPT_DLL extern PIX * pixSelectiveConnCompFill ( PIX *pixs, l_int32 connectivity, l_int32 minw, l_int32 minh ); +LEPT_DLL extern l_int32 pixRemoveMatchedPattern ( PIX *pixs, PIX *pixp, PIX *pixe, l_int32 x0, l_int32 y0, l_int32 dsize ); +LEPT_DLL extern PIX * pixDisplayMatchedPattern ( PIX *pixs, PIX *pixp, PIX *pixe, l_int32 x0, l_int32 y0, l_uint32 color, l_float32 scale, l_int32 nlevels ); +LEPT_DLL extern PIXA * pixaExtendByMorph ( PIXA *pixas, l_int32 type, l_int32 niters, SEL *sel, l_int32 include ); +LEPT_DLL extern PIXA * pixaExtendByScaling ( PIXA *pixas, NUMA *nasc, l_int32 type, l_int32 include ); +LEPT_DLL extern PIX * pixSeedfillMorph ( PIX *pixs, PIX *pixm, l_int32 maxiters, l_int32 connectivity ); +LEPT_DLL extern NUMA * pixRunHistogramMorph ( PIX *pixs, l_int32 runtype, l_int32 direction, l_int32 maxsize ); +LEPT_DLL extern PIX * pixTophat ( PIX *pixs, l_int32 hsize, l_int32 vsize, l_int32 type ); +LEPT_DLL extern PIX * pixHDome ( PIX *pixs, l_int32 height, l_int32 connectivity ); +LEPT_DLL extern PIX * pixFastTophat ( PIX *pixs, l_int32 xsize, l_int32 ysize, l_int32 type ); +LEPT_DLL extern PIX * pixMorphGradient ( PIX *pixs, l_int32 hsize, l_int32 vsize, l_int32 smoothing ); +LEPT_DLL extern PTA * pixaCentroids ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixCentroid ( PIX *pix, l_int32 *centtab, l_int32 *sumtab, l_float32 *pxave, l_float32 *pyave ); +LEPT_DLL extern PIX * pixDilateBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixDilateCompBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeCompBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenCompBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseCompBrickDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixDilateCompBrickExtendDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixErodeCompBrickExtendDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixOpenCompBrickExtendDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern PIX * pixCloseCompBrickExtendDwa ( PIX *pixd, PIX *pixs, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern l_int32 getExtendedCompositeParameters ( l_int32 size, l_int32 *pn, l_int32 *pextra, l_int32 *pactualsize ); +LEPT_DLL extern PIX * pixMorphSequence ( PIX *pixs, const char *sequence, l_int32 dispsep ); +LEPT_DLL extern PIX * pixMorphCompSequence ( PIX *pixs, const char *sequence, l_int32 dispsep ); +LEPT_DLL extern PIX * pixMorphSequenceDwa ( PIX *pixs, const char *sequence, l_int32 dispsep ); +LEPT_DLL extern PIX * pixMorphCompSequenceDwa ( PIX *pixs, const char *sequence, l_int32 dispsep ); +LEPT_DLL extern l_int32 morphSequenceVerify ( SARRAY *sa ); +LEPT_DLL extern PIX * pixGrayMorphSequence ( PIX *pixs, const char *sequence, l_int32 dispsep, l_int32 dispy ); +LEPT_DLL extern PIX * pixColorMorphSequence ( PIX *pixs, const char *sequence, l_int32 dispsep, l_int32 dispy ); +LEPT_DLL extern NUMA * numaCreate ( l_int32 n ); +LEPT_DLL extern NUMA * numaCreateFromIArray ( l_int32 *iarray, l_int32 size ); +LEPT_DLL extern NUMA * numaCreateFromFArray ( l_float32 *farray, l_int32 size, l_int32 copyflag ); +LEPT_DLL extern NUMA * numaCreateFromString ( const char *str ); +LEPT_DLL extern void numaDestroy ( NUMA **pna ); +LEPT_DLL extern NUMA * numaCopy ( NUMA *na ); +LEPT_DLL extern NUMA * numaClone ( NUMA *na ); +LEPT_DLL extern l_int32 numaEmpty ( NUMA *na ); +LEPT_DLL extern l_int32 numaAddNumber ( NUMA *na, l_float32 val ); +LEPT_DLL extern l_int32 numaInsertNumber ( NUMA *na, l_int32 index, l_float32 val ); +LEPT_DLL extern l_int32 numaRemoveNumber ( NUMA *na, l_int32 index ); +LEPT_DLL extern l_int32 numaReplaceNumber ( NUMA *na, l_int32 index, l_float32 val ); +LEPT_DLL extern l_int32 numaGetCount ( NUMA *na ); +LEPT_DLL extern l_int32 numaSetCount ( NUMA *na, l_int32 newcount ); +LEPT_DLL extern l_int32 numaGetFValue ( NUMA *na, l_int32 index, l_float32 *pval ); +LEPT_DLL extern l_int32 numaGetIValue ( NUMA *na, l_int32 index, l_int32 *pival ); +LEPT_DLL extern l_int32 numaSetValue ( NUMA *na, l_int32 index, l_float32 val ); +LEPT_DLL extern l_int32 numaShiftValue ( NUMA *na, l_int32 index, l_float32 diff ); +LEPT_DLL extern l_int32 * numaGetIArray ( NUMA *na ); +LEPT_DLL extern l_float32 * numaGetFArray ( NUMA *na, l_int32 copyflag ); +LEPT_DLL extern l_int32 numaGetRefcount ( NUMA *na ); +LEPT_DLL extern l_int32 numaChangeRefcount ( NUMA *na, l_int32 delta ); +LEPT_DLL extern l_int32 numaGetParameters ( NUMA *na, l_float32 *pstartx, l_float32 *pdelx ); +LEPT_DLL extern l_int32 numaSetParameters ( NUMA *na, l_float32 startx, l_float32 delx ); +LEPT_DLL extern l_int32 numaCopyParameters ( NUMA *nad, NUMA *nas ); +LEPT_DLL extern SARRAY * numaConvertToSarray ( NUMA *na, l_int32 size1, l_int32 size2, l_int32 addzeros, l_int32 type ); +LEPT_DLL extern NUMA * numaRead ( const char *filename ); +LEPT_DLL extern NUMA * numaReadStream ( FILE *fp ); +LEPT_DLL extern NUMA * numaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 numaWrite ( const char *filename, NUMA *na ); +LEPT_DLL extern l_int32 numaWriteStream ( FILE *fp, NUMA *na ); +LEPT_DLL extern l_int32 numaWriteMem ( l_uint8 **pdata, size_t *psize, NUMA *na ); +LEPT_DLL extern NUMAA * numaaCreate ( l_int32 n ); +LEPT_DLL extern NUMAA * numaaCreateFull ( l_int32 nptr, l_int32 n ); +LEPT_DLL extern l_int32 numaaTruncate ( NUMAA *naa ); +LEPT_DLL extern void numaaDestroy ( NUMAA **pnaa ); +LEPT_DLL extern l_int32 numaaAddNuma ( NUMAA *naa, NUMA *na, l_int32 copyflag ); +LEPT_DLL extern l_int32 numaaGetCount ( NUMAA *naa ); +LEPT_DLL extern l_int32 numaaGetNumaCount ( NUMAA *naa, l_int32 index ); +LEPT_DLL extern l_int32 numaaGetNumberCount ( NUMAA *naa ); +LEPT_DLL extern NUMA ** numaaGetPtrArray ( NUMAA *naa ); +LEPT_DLL extern NUMA * numaaGetNuma ( NUMAA *naa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern l_int32 numaaReplaceNuma ( NUMAA *naa, l_int32 index, NUMA *na ); +LEPT_DLL extern l_int32 numaaGetValue ( NUMAA *naa, l_int32 i, l_int32 j, l_float32 *pfval, l_int32 *pival ); +LEPT_DLL extern l_int32 numaaAddNumber ( NUMAA *naa, l_int32 index, l_float32 val ); +LEPT_DLL extern NUMAA * numaaRead ( const char *filename ); +LEPT_DLL extern NUMAA * numaaReadStream ( FILE *fp ); +LEPT_DLL extern NUMAA * numaaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 numaaWrite ( const char *filename, NUMAA *naa ); +LEPT_DLL extern l_int32 numaaWriteStream ( FILE *fp, NUMAA *naa ); +LEPT_DLL extern l_int32 numaaWriteMem ( l_uint8 **pdata, size_t *psize, NUMAA *naa ); +LEPT_DLL extern NUMA * numaArithOp ( NUMA *nad, NUMA *na1, NUMA *na2, l_int32 op ); +LEPT_DLL extern NUMA * numaLogicalOp ( NUMA *nad, NUMA *na1, NUMA *na2, l_int32 op ); +LEPT_DLL extern NUMA * numaInvert ( NUMA *nad, NUMA *nas ); +LEPT_DLL extern l_int32 numaSimilar ( NUMA *na1, NUMA *na2, l_float32 maxdiff, l_int32 *psimilar ); +LEPT_DLL extern l_int32 numaAddToNumber ( NUMA *na, l_int32 index, l_float32 val ); +LEPT_DLL extern l_int32 numaGetMin ( NUMA *na, l_float32 *pminval, l_int32 *piminloc ); +LEPT_DLL extern l_int32 numaGetMax ( NUMA *na, l_float32 *pmaxval, l_int32 *pimaxloc ); +LEPT_DLL extern l_int32 numaGetSum ( NUMA *na, l_float32 *psum ); +LEPT_DLL extern NUMA * numaGetPartialSums ( NUMA *na ); +LEPT_DLL extern l_int32 numaGetSumOnInterval ( NUMA *na, l_int32 first, l_int32 last, l_float32 *psum ); +LEPT_DLL extern l_int32 numaHasOnlyIntegers ( NUMA *na, l_int32 maxsamples, l_int32 *pallints ); +LEPT_DLL extern NUMA * numaSubsample ( NUMA *nas, l_int32 subfactor ); +LEPT_DLL extern NUMA * numaMakeDelta ( NUMA *nas ); +LEPT_DLL extern NUMA * numaMakeSequence ( l_float32 startval, l_float32 increment, l_int32 size ); +LEPT_DLL extern NUMA * numaMakeConstant ( l_float32 val, l_int32 size ); +LEPT_DLL extern NUMA * numaMakeAbsValue ( NUMA *nad, NUMA *nas ); +LEPT_DLL extern NUMA * numaAddBorder ( NUMA *nas, l_int32 left, l_int32 right, l_float32 val ); +LEPT_DLL extern NUMA * numaAddSpecifiedBorder ( NUMA *nas, l_int32 left, l_int32 right, l_int32 type ); +LEPT_DLL extern NUMA * numaRemoveBorder ( NUMA *nas, l_int32 left, l_int32 right ); +LEPT_DLL extern l_int32 numaCountNonzeroRuns ( NUMA *na, l_int32 *pcount ); +LEPT_DLL extern l_int32 numaGetNonzeroRange ( NUMA *na, l_float32 eps, l_int32 *pfirst, l_int32 *plast ); +LEPT_DLL extern l_int32 numaGetCountRelativeToZero ( NUMA *na, l_int32 type, l_int32 *pcount ); +LEPT_DLL extern NUMA * numaClipToInterval ( NUMA *nas, l_int32 first, l_int32 last ); +LEPT_DLL extern NUMA * numaMakeThresholdIndicator ( NUMA *nas, l_float32 thresh, l_int32 type ); +LEPT_DLL extern NUMA * numaUniformSampling ( NUMA *nas, l_int32 nsamp ); +LEPT_DLL extern NUMA * numaReverse ( NUMA *nad, NUMA *nas ); +LEPT_DLL extern NUMA * numaLowPassIntervals ( NUMA *nas, l_float32 thresh, l_float32 maxn ); +LEPT_DLL extern NUMA * numaThresholdEdges ( NUMA *nas, l_float32 thresh1, l_float32 thresh2, l_float32 maxn ); +LEPT_DLL extern l_int32 numaGetSpanValues ( NUMA *na, l_int32 span, l_int32 *pstart, l_int32 *pend ); +LEPT_DLL extern l_int32 numaGetEdgeValues ( NUMA *na, l_int32 edge, l_int32 *pstart, l_int32 *pend, l_int32 *psign ); +LEPT_DLL extern l_int32 numaInterpolateEqxVal ( l_float32 startx, l_float32 deltax, NUMA *nay, l_int32 type, l_float32 xval, l_float32 *pyval ); +LEPT_DLL extern l_int32 numaInterpolateArbxVal ( NUMA *nax, NUMA *nay, l_int32 type, l_float32 xval, l_float32 *pyval ); +LEPT_DLL extern l_int32 numaInterpolateEqxInterval ( l_float32 startx, l_float32 deltax, NUMA *nasy, l_int32 type, l_float32 x0, l_float32 x1, l_int32 npts, NUMA **pnax, NUMA **pnay ); +LEPT_DLL extern l_int32 numaInterpolateArbxInterval ( NUMA *nax, NUMA *nay, l_int32 type, l_float32 x0, l_float32 x1, l_int32 npts, NUMA **pnadx, NUMA **pnady ); +LEPT_DLL extern l_int32 numaFitMax ( NUMA *na, l_float32 *pmaxval, NUMA *naloc, l_float32 *pmaxloc ); +LEPT_DLL extern l_int32 numaDifferentiateInterval ( NUMA *nax, NUMA *nay, l_float32 x0, l_float32 x1, l_int32 npts, NUMA **pnadx, NUMA **pnady ); +LEPT_DLL extern l_int32 numaIntegrateInterval ( NUMA *nax, NUMA *nay, l_float32 x0, l_float32 x1, l_int32 npts, l_float32 *psum ); +LEPT_DLL extern l_int32 numaSortGeneral ( NUMA *na, NUMA **pnasort, NUMA **pnaindex, NUMA **pnainvert, l_int32 sortorder, l_int32 sorttype ); +LEPT_DLL extern NUMA * numaSortAutoSelect ( NUMA *nas, l_int32 sortorder ); +LEPT_DLL extern NUMA * numaSortIndexAutoSelect ( NUMA *nas, l_int32 sortorder ); +LEPT_DLL extern l_int32 numaChooseSortType ( NUMA *nas ); +LEPT_DLL extern NUMA * numaSort ( NUMA *naout, NUMA *nain, l_int32 sortorder ); +LEPT_DLL extern NUMA * numaBinSort ( NUMA *nas, l_int32 sortorder ); +LEPT_DLL extern NUMA * numaGetSortIndex ( NUMA *na, l_int32 sortorder ); +LEPT_DLL extern NUMA * numaGetBinSortIndex ( NUMA *nas, l_int32 sortorder ); +LEPT_DLL extern NUMA * numaSortByIndex ( NUMA *nas, NUMA *naindex ); +LEPT_DLL extern l_int32 numaIsSorted ( NUMA *nas, l_int32 sortorder, l_int32 *psorted ); +LEPT_DLL extern l_int32 numaSortPair ( NUMA *nax, NUMA *nay, l_int32 sortorder, NUMA **pnasx, NUMA **pnasy ); +LEPT_DLL extern NUMA * numaInvertMap ( NUMA *nas ); +LEPT_DLL extern NUMA * numaPseudorandomSequence ( l_int32 size, l_int32 seed ); +LEPT_DLL extern NUMA * numaRandomPermutation ( NUMA *nas, l_int32 seed ); +LEPT_DLL extern l_int32 numaGetRankValue ( NUMA *na, l_float32 fract, NUMA *nasort, l_int32 usebins, l_float32 *pval ); +LEPT_DLL extern l_int32 numaGetMedian ( NUMA *na, l_float32 *pval ); +LEPT_DLL extern l_int32 numaGetBinnedMedian ( NUMA *na, l_int32 *pval ); +LEPT_DLL extern l_int32 numaGetMode ( NUMA *na, l_float32 *pval, l_int32 *pcount ); +LEPT_DLL extern l_int32 numaGetMedianVariation ( NUMA *na, l_float32 *pmedval, l_float32 *pmedvar ); +LEPT_DLL extern l_int32 numaJoin ( NUMA *nad, NUMA *nas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern l_int32 numaaJoin ( NUMAA *naad, NUMAA *naas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern NUMA * numaaFlattenToNuma ( NUMAA *naa ); +LEPT_DLL extern NUMA * numaErode ( NUMA *nas, l_int32 size ); +LEPT_DLL extern NUMA * numaDilate ( NUMA *nas, l_int32 size ); +LEPT_DLL extern NUMA * numaOpen ( NUMA *nas, l_int32 size ); +LEPT_DLL extern NUMA * numaClose ( NUMA *nas, l_int32 size ); +LEPT_DLL extern NUMA * numaTransform ( NUMA *nas, l_float32 shift, l_float32 scale ); +LEPT_DLL extern l_int32 numaSimpleStats ( NUMA *na, l_int32 first, l_int32 last, l_float32 *pmean, l_float32 *pvar, l_float32 *prvar ); +LEPT_DLL extern l_int32 numaWindowedStats ( NUMA *nas, l_int32 wc, NUMA **pnam, NUMA **pnams, NUMA **pnav, NUMA **pnarv ); +LEPT_DLL extern NUMA * numaWindowedMean ( NUMA *nas, l_int32 wc ); +LEPT_DLL extern NUMA * numaWindowedMeanSquare ( NUMA *nas, l_int32 wc ); +LEPT_DLL extern l_int32 numaWindowedVariance ( NUMA *nam, NUMA *nams, NUMA **pnav, NUMA **pnarv ); +LEPT_DLL extern NUMA * numaWindowedMedian ( NUMA *nas, l_int32 halfwin ); +LEPT_DLL extern NUMA * numaConvertToInt ( NUMA *nas ); +LEPT_DLL extern NUMA * numaMakeHistogram ( NUMA *na, l_int32 maxbins, l_int32 *pbinsize, l_int32 *pbinstart ); +LEPT_DLL extern NUMA * numaMakeHistogramAuto ( NUMA *na, l_int32 maxbins ); +LEPT_DLL extern NUMA * numaMakeHistogramClipped ( NUMA *na, l_float32 binsize, l_float32 maxsize ); +LEPT_DLL extern NUMA * numaRebinHistogram ( NUMA *nas, l_int32 newsize ); +LEPT_DLL extern NUMA * numaNormalizeHistogram ( NUMA *nas, l_float32 tsum ); +LEPT_DLL extern l_int32 numaGetStatsUsingHistogram ( NUMA *na, l_int32 maxbins, l_float32 *pmin, l_float32 *pmax, l_float32 *pmean, l_float32 *pvariance, l_float32 *pmedian, l_float32 rank, l_float32 *prval, NUMA **phisto ); +LEPT_DLL extern l_int32 numaGetHistogramStats ( NUMA *nahisto, l_float32 startx, l_float32 deltax, l_float32 *pxmean, l_float32 *pxmedian, l_float32 *pxmode, l_float32 *pxvariance ); +LEPT_DLL extern l_int32 numaGetHistogramStatsOnInterval ( NUMA *nahisto, l_float32 startx, l_float32 deltax, l_int32 ifirst, l_int32 ilast, l_float32 *pxmean, l_float32 *pxmedian, l_float32 *pxmode, l_float32 *pxvariance ); +LEPT_DLL extern l_int32 numaMakeRankFromHistogram ( l_float32 startx, l_float32 deltax, NUMA *nasy, l_int32 npts, NUMA **pnax, NUMA **pnay ); +LEPT_DLL extern l_int32 numaHistogramGetRankFromVal ( NUMA *na, l_float32 rval, l_float32 *prank ); +LEPT_DLL extern l_int32 numaHistogramGetValFromRank ( NUMA *na, l_float32 rank, l_float32 *prval ); +LEPT_DLL extern l_int32 numaDiscretizeRankAndIntensity ( NUMA *na, l_int32 nbins, NUMA **pnarbin, NUMA **pnam, NUMA **pnar, NUMA **pnabb ); +LEPT_DLL extern l_int32 numaGetRankBinValues ( NUMA *na, l_int32 nbins, NUMA **pnarbin, NUMA **pnam ); +LEPT_DLL extern l_int32 numaSplitDistribution ( NUMA *na, l_float32 scorefract, l_int32 *psplitindex, l_float32 *pave1, l_float32 *pave2, l_float32 *pnum1, l_float32 *pnum2, NUMA **pnascore ); +LEPT_DLL extern l_int32 grayHistogramsToEMD ( NUMAA *naa1, NUMAA *naa2, NUMA **pnad ); +LEPT_DLL extern l_int32 numaEarthMoverDistance ( NUMA *na1, NUMA *na2, l_float32 *pdist ); +LEPT_DLL extern l_int32 grayInterHistogramStats ( NUMAA *naa, l_int32 wc, NUMA **pnam, NUMA **pnams, NUMA **pnav, NUMA **pnarv ); +LEPT_DLL extern NUMA * numaFindPeaks ( NUMA *nas, l_int32 nmax, l_float32 fract1, l_float32 fract2 ); +LEPT_DLL extern NUMA * numaFindExtrema ( NUMA *nas, l_float32 delta, NUMA **pnav ); +LEPT_DLL extern l_int32 numaCountReversals ( NUMA *nas, l_float32 minreversal, l_int32 *pnr, l_float32 *pnrpl ); +LEPT_DLL extern l_int32 numaSelectCrossingThreshold ( NUMA *nax, NUMA *nay, l_float32 estthresh, l_float32 *pbestthresh ); +LEPT_DLL extern NUMA * numaCrossingsByThreshold ( NUMA *nax, NUMA *nay, l_float32 thresh ); +LEPT_DLL extern NUMA * numaCrossingsByPeaks ( NUMA *nax, NUMA *nay, l_float32 delta ); +LEPT_DLL extern l_int32 numaEvalBestHaarParameters ( NUMA *nas, l_float32 relweight, l_int32 nwidth, l_int32 nshift, l_float32 minwidth, l_float32 maxwidth, l_float32 *pbestwidth, l_float32 *pbestshift, l_float32 *pbestscore ); +LEPT_DLL extern l_int32 numaEvalHaarSum ( NUMA *nas, l_float32 width, l_float32 shift, l_float32 relweight, l_float32 *pscore ); +LEPT_DLL extern NUMA * genConstrainedNumaInRange ( l_int32 first, l_int32 last, l_int32 nmax, l_int32 use_pairs ); +LEPT_DLL extern l_int32 pixGetRegionsBinary ( PIX *pixs, PIX **ppixhm, PIX **ppixtm, PIX **ppixtb, PIXA *pixadb ); +LEPT_DLL extern PIX * pixGenHalftoneMask ( PIX *pixs, PIX **ppixtext, l_int32 *phtfound, l_int32 debug ); +LEPT_DLL extern PIX * pixGenerateHalftoneMask ( PIX *pixs, PIX **ppixtext, l_int32 *phtfound, PIXA *pixadb ); +LEPT_DLL extern PIX * pixGenTextlineMask ( PIX *pixs, PIX **ppixvws, l_int32 *ptlfound, PIXA *pixadb ); +LEPT_DLL extern PIX * pixGenTextblockMask ( PIX *pixs, PIX *pixvws, PIXA *pixadb ); +LEPT_DLL extern BOX * pixFindPageForeground ( PIX *pixs, l_int32 threshold, l_int32 mindist, l_int32 erasedist, l_int32 pagenum, l_int32 showmorph, l_int32 display, const char *pdfdir ); +LEPT_DLL extern l_int32 pixSplitIntoCharacters ( PIX *pixs, l_int32 minw, l_int32 minh, BOXA **pboxa, PIXA **ppixa, PIX **ppixdebug ); +LEPT_DLL extern BOXA * pixSplitComponentWithProfile ( PIX *pixs, l_int32 delta, l_int32 mindel, PIX **ppixdebug ); +LEPT_DLL extern PIXA * pixExtractTextlines ( PIX *pixs, l_int32 maxw, l_int32 maxh, l_int32 minw, l_int32 minh, l_int32 adjw, l_int32 adjh, PIXA *pixadb ); +LEPT_DLL extern PIXA * pixExtractRawTextlines ( PIX *pixs, l_int32 maxw, l_int32 maxh, l_int32 adjw, l_int32 adjh, PIXA *pixadb ); +LEPT_DLL extern l_int32 pixCountTextColumns ( PIX *pixs, l_float32 deltafract, l_float32 peakfract, l_float32 clipfract, l_int32 *pncols, PIXA *pixadb ); +LEPT_DLL extern l_int32 pixDecideIfText ( PIX *pixs, BOX *box, l_int32 *pistext, PIXA *pixadb ); +LEPT_DLL extern l_int32 pixFindThreshFgExtent ( PIX *pixs, l_int32 thresh, l_int32 *ptop, l_int32 *pbot ); +LEPT_DLL extern l_int32 pixDecideIfTable ( PIX *pixs, BOX *box, l_int32 *pistable, PIXA *pixadb ); +LEPT_DLL extern PIX * pixPrepare1bpp ( PIX *pixs, BOX *box, l_float32 cropfract, l_int32 outres ); +LEPT_DLL extern l_int32 pixEstimateBackground ( PIX *pixs, l_int32 darkthresh, l_float32 edgecrop, l_int32 *pbg ); +LEPT_DLL extern l_int32 pixFindLargeRectangles ( PIX *pixs, l_int32 polarity, l_int32 nrect, BOXA **pboxa, PIX **ppixdb ); +LEPT_DLL extern l_int32 pixFindLargestRectangle ( PIX *pixs, l_int32 polarity, BOX **pbox, PIX **ppixdb ); +LEPT_DLL extern l_int32 pixSetSelectCmap ( PIX *pixs, BOX *box, l_int32 sindex, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixColorGrayRegionsCmap ( PIX *pixs, BOXA *boxa, l_int32 type, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixColorGrayCmap ( PIX *pixs, BOX *box, l_int32 type, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixColorGrayMaskedCmap ( PIX *pixs, PIX *pixm, l_int32 type, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 addColorizedGrayToCmap ( PIXCMAP *cmap, l_int32 type, l_int32 rval, l_int32 gval, l_int32 bval, NUMA **pna ); +LEPT_DLL extern l_int32 pixSetSelectMaskedCmap ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 sindex, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixSetMaskedCmap ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern char * parseForProtos ( const char *filein, const char *prestring ); +LEPT_DLL extern BOXA * boxaGetWhiteblocks ( BOXA *boxas, BOX *box, l_int32 sortflag, l_int32 maxboxes, l_float32 maxoverlap, l_int32 maxperim, l_float32 fract, l_int32 maxpops ); +LEPT_DLL extern BOXA * boxaPruneSortedOnOverlap ( BOXA *boxas, l_float32 maxoverlap ); +LEPT_DLL extern l_int32 convertFilesToPdf ( const char *dirname, const char *substr, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 saConvertFilesToPdf ( SARRAY *sa, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 saConvertFilesToPdfData ( SARRAY *sa, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 selectDefaultPdfEncoding ( PIX *pix, l_int32 *ptype ); +LEPT_DLL extern l_int32 convertUnscaledFilesToPdf ( const char *dirname, const char *substr, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 saConvertUnscaledFilesToPdf ( SARRAY *sa, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 saConvertUnscaledFilesToPdfData ( SARRAY *sa, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 convertUnscaledToPdfData ( const char *fname, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 pixaConvertToPdf ( PIXA *pixa, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 pixaConvertToPdfData ( PIXA *pixa, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 convertToPdf ( const char *filein, l_int32 type, l_int32 quality, const char *fileout, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 convertImageDataToPdf ( l_uint8 *imdata, size_t size, l_int32 type, l_int32 quality, const char *fileout, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 convertToPdfData ( const char *filein, l_int32 type, l_int32 quality, l_uint8 **pdata, size_t *pnbytes, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 convertImageDataToPdfData ( l_uint8 *imdata, size_t size, l_int32 type, l_int32 quality, l_uint8 **pdata, size_t *pnbytes, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 pixConvertToPdf ( PIX *pix, l_int32 type, l_int32 quality, const char *fileout, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 pixWriteStreamPdf ( FILE *fp, PIX *pix, l_int32 res, const char *title ); +LEPT_DLL extern l_int32 pixWriteMemPdf ( l_uint8 **pdata, size_t *pnbytes, PIX *pix, l_int32 res, const char *title ); +LEPT_DLL extern l_int32 convertSegmentedFilesToPdf ( const char *dirname, const char *substr, l_int32 res, l_int32 type, l_int32 thresh, BOXAA *baa, l_int32 quality, l_float32 scalefactor, const char *title, const char *fileout ); +LEPT_DLL extern BOXAA * convertNumberedMasksToBoxaa ( const char *dirname, const char *substr, l_int32 numpre, l_int32 numpost ); +LEPT_DLL extern l_int32 convertToPdfSegmented ( const char *filein, l_int32 res, l_int32 type, l_int32 thresh, BOXA *boxa, l_int32 quality, l_float32 scalefactor, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 pixConvertToPdfSegmented ( PIX *pixs, l_int32 res, l_int32 type, l_int32 thresh, BOXA *boxa, l_int32 quality, l_float32 scalefactor, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 convertToPdfDataSegmented ( const char *filein, l_int32 res, l_int32 type, l_int32 thresh, BOXA *boxa, l_int32 quality, l_float32 scalefactor, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 pixConvertToPdfDataSegmented ( PIX *pixs, l_int32 res, l_int32 type, l_int32 thresh, BOXA *boxa, l_int32 quality, l_float32 scalefactor, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 concatenatePdf ( const char *dirname, const char *substr, const char *fileout ); +LEPT_DLL extern l_int32 saConcatenatePdf ( SARRAY *sa, const char *fileout ); +LEPT_DLL extern l_int32 ptraConcatenatePdf ( L_PTRA *pa, const char *fileout ); +LEPT_DLL extern l_int32 concatenatePdfToData ( const char *dirname, const char *substr, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 saConcatenatePdfToData ( SARRAY *sa, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 pixConvertToPdfData ( PIX *pix, l_int32 type, l_int32 quality, l_uint8 **pdata, size_t *pnbytes, l_int32 x, l_int32 y, l_int32 res, const char *title, L_PDF_DATA **plpd, l_int32 position ); +LEPT_DLL extern l_int32 ptraConcatenatePdfToData ( L_PTRA *pa_data, SARRAY *sa, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 convertTiffMultipageToPdf ( const char *filein, const char *fileout ); +LEPT_DLL extern l_int32 l_generateCIDataForPdf ( const char *fname, PIX *pix, l_int32 quality, L_COMP_DATA **pcid ); +LEPT_DLL extern L_COMP_DATA * l_generateFlateDataPdf ( const char *fname, PIX *pixs ); +LEPT_DLL extern L_COMP_DATA * l_generateJpegData ( const char *fname, l_int32 ascii85flag ); +LEPT_DLL extern l_int32 l_generateCIData ( const char *fname, l_int32 type, l_int32 quality, l_int32 ascii85, L_COMP_DATA **pcid ); +LEPT_DLL extern l_int32 pixGenerateCIData ( PIX *pixs, l_int32 type, l_int32 quality, l_int32 ascii85, L_COMP_DATA **pcid ); +LEPT_DLL extern L_COMP_DATA * l_generateFlateData ( const char *fname, l_int32 ascii85flag ); +LEPT_DLL extern L_COMP_DATA * l_generateG4Data ( const char *fname, l_int32 ascii85flag ); +LEPT_DLL extern l_int32 cidConvertToPdfData ( L_COMP_DATA *cid, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern void l_CIDataDestroy ( L_COMP_DATA **pcid ); +LEPT_DLL extern void l_pdfSetG4ImageMask ( l_int32 flag ); +LEPT_DLL extern void l_pdfSetDateAndVersion ( l_int32 flag ); +LEPT_DLL extern void setPixMemoryManager ( alloc_fn allocator, dealloc_fn deallocator ); +LEPT_DLL extern PIX * pixCreate ( l_int32 width, l_int32 height, l_int32 depth ); +LEPT_DLL extern PIX * pixCreateNoInit ( l_int32 width, l_int32 height, l_int32 depth ); +LEPT_DLL extern PIX * pixCreateTemplate ( PIX *pixs ); +LEPT_DLL extern PIX * pixCreateTemplateNoInit ( PIX *pixs ); +LEPT_DLL extern PIX * pixCreateHeader ( l_int32 width, l_int32 height, l_int32 depth ); +LEPT_DLL extern PIX * pixClone ( PIX *pixs ); +LEPT_DLL extern void pixDestroy ( PIX **ppix ); +LEPT_DLL extern PIX * pixCopy ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixResizeImageData ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixCopyColormap ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixSizesEqual ( PIX *pix1, PIX *pix2 ); +LEPT_DLL extern l_int32 pixTransferAllData ( PIX *pixd, PIX **ppixs, l_int32 copytext, l_int32 copyformat ); +LEPT_DLL extern l_int32 pixSwapAndDestroy ( PIX **ppixd, PIX **ppixs ); +LEPT_DLL extern l_int32 pixGetWidth ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetWidth ( PIX *pix, l_int32 width ); +LEPT_DLL extern l_int32 pixGetHeight ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetHeight ( PIX *pix, l_int32 height ); +LEPT_DLL extern l_int32 pixGetDepth ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetDepth ( PIX *pix, l_int32 depth ); +LEPT_DLL extern l_int32 pixGetDimensions ( PIX *pix, l_int32 *pw, l_int32 *ph, l_int32 *pd ); +LEPT_DLL extern l_int32 pixSetDimensions ( PIX *pix, l_int32 w, l_int32 h, l_int32 d ); +LEPT_DLL extern l_int32 pixCopyDimensions ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixGetSpp ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetSpp ( PIX *pix, l_int32 spp ); +LEPT_DLL extern l_int32 pixCopySpp ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixGetWpl ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetWpl ( PIX *pix, l_int32 wpl ); +LEPT_DLL extern l_int32 pixGetRefcount ( PIX *pix ); +LEPT_DLL extern l_int32 pixChangeRefcount ( PIX *pix, l_int32 delta ); +LEPT_DLL extern l_int32 pixGetXRes ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetXRes ( PIX *pix, l_int32 res ); +LEPT_DLL extern l_int32 pixGetYRes ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetYRes ( PIX *pix, l_int32 res ); +LEPT_DLL extern l_int32 pixGetResolution ( PIX *pix, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 pixSetResolution ( PIX *pix, l_int32 xres, l_int32 yres ); +LEPT_DLL extern l_int32 pixCopyResolution ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixScaleResolution ( PIX *pix, l_float32 xscale, l_float32 yscale ); +LEPT_DLL extern l_int32 pixGetInputFormat ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetInputFormat ( PIX *pix, l_int32 informat ); +LEPT_DLL extern l_int32 pixCopyInputFormat ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern l_int32 pixSetSpecial ( PIX *pix, l_int32 special ); +LEPT_DLL extern char * pixGetText ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetText ( PIX *pix, const char *textstring ); +LEPT_DLL extern l_int32 pixAddText ( PIX *pix, const char *textstring ); +LEPT_DLL extern l_int32 pixCopyText ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIXCMAP * pixGetColormap ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetColormap ( PIX *pix, PIXCMAP *colormap ); +LEPT_DLL extern l_int32 pixDestroyColormap ( PIX *pix ); +LEPT_DLL extern l_uint32 * pixGetData ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetData ( PIX *pix, l_uint32 *data ); +LEPT_DLL extern l_uint32 * pixExtractData ( PIX *pixs ); +LEPT_DLL extern l_int32 pixFreeData ( PIX *pix ); +LEPT_DLL extern void ** pixGetLinePtrs ( PIX *pix, l_int32 *psize ); +LEPT_DLL extern l_int32 pixPrintStreamInfo ( FILE *fp, PIX *pix, const char *text ); +LEPT_DLL extern l_int32 pixGetPixel ( PIX *pix, l_int32 x, l_int32 y, l_uint32 *pval ); +LEPT_DLL extern l_int32 pixSetPixel ( PIX *pix, l_int32 x, l_int32 y, l_uint32 val ); +LEPT_DLL extern l_int32 pixGetRGBPixel ( PIX *pix, l_int32 x, l_int32 y, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern l_int32 pixSetRGBPixel ( PIX *pix, l_int32 x, l_int32 y, l_int32 rval, l_int32 gval, l_int32 bval ); +LEPT_DLL extern l_int32 pixGetRandomPixel ( PIX *pix, l_uint32 *pval, l_int32 *px, l_int32 *py ); +LEPT_DLL extern l_int32 pixClearPixel ( PIX *pix, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixFlipPixel ( PIX *pix, l_int32 x, l_int32 y ); +LEPT_DLL extern void setPixelLow ( l_uint32 *line, l_int32 x, l_int32 depth, l_uint32 val ); +LEPT_DLL extern l_int32 pixGetBlackOrWhiteVal ( PIX *pixs, l_int32 op, l_uint32 *pval ); +LEPT_DLL extern l_int32 pixClearAll ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetAll ( PIX *pix ); +LEPT_DLL extern l_int32 pixSetAllGray ( PIX *pix, l_int32 grayval ); +LEPT_DLL extern l_int32 pixSetAllArbitrary ( PIX *pix, l_uint32 val ); +LEPT_DLL extern l_int32 pixSetBlackOrWhite ( PIX *pixs, l_int32 op ); +LEPT_DLL extern l_int32 pixSetComponentArbitrary ( PIX *pix, l_int32 comp, l_int32 val ); +LEPT_DLL extern l_int32 pixClearInRect ( PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixSetInRect ( PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixSetInRectArbitrary ( PIX *pix, BOX *box, l_uint32 val ); +LEPT_DLL extern l_int32 pixBlendInRect ( PIX *pixs, BOX *box, l_uint32 val, l_float32 fract ); +LEPT_DLL extern l_int32 pixSetPadBits ( PIX *pix, l_int32 val ); +LEPT_DLL extern l_int32 pixSetPadBitsBand ( PIX *pix, l_int32 by, l_int32 bh, l_int32 val ); +LEPT_DLL extern l_int32 pixSetOrClearBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot, l_int32 op ); +LEPT_DLL extern l_int32 pixSetBorderVal ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot, l_uint32 val ); +LEPT_DLL extern l_int32 pixSetBorderRingVal ( PIX *pixs, l_int32 dist, l_uint32 val ); +LEPT_DLL extern l_int32 pixSetMirroredBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixCopyBorder ( PIX *pixd, PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixAddBorder ( PIX *pixs, l_int32 npix, l_uint32 val ); +LEPT_DLL extern PIX * pixAddBlackOrWhiteBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot, l_int32 op ); +LEPT_DLL extern PIX * pixAddBorderGeneral ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot, l_uint32 val ); +LEPT_DLL extern PIX * pixRemoveBorder ( PIX *pixs, l_int32 npix ); +LEPT_DLL extern PIX * pixRemoveBorderGeneral ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixRemoveBorderToSize ( PIX *pixs, l_int32 wd, l_int32 hd ); +LEPT_DLL extern PIX * pixAddMirroredBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixAddRepeatedBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixAddMixedBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern PIX * pixAddContinuedBorder ( PIX *pixs, l_int32 left, l_int32 right, l_int32 top, l_int32 bot ); +LEPT_DLL extern l_int32 pixShiftAndTransferAlpha ( PIX *pixd, PIX *pixs, l_float32 shiftx, l_float32 shifty ); +LEPT_DLL extern PIX * pixDisplayLayersRGBA ( PIX *pixs, l_uint32 val, l_int32 maxw ); +LEPT_DLL extern PIX * pixCreateRGBImage ( PIX *pixr, PIX *pixg, PIX *pixb ); +LEPT_DLL extern PIX * pixGetRGBComponent ( PIX *pixs, l_int32 comp ); +LEPT_DLL extern l_int32 pixSetRGBComponent ( PIX *pixd, PIX *pixs, l_int32 comp ); +LEPT_DLL extern PIX * pixGetRGBComponentCmap ( PIX *pixs, l_int32 comp ); +LEPT_DLL extern l_int32 pixCopyRGBComponent ( PIX *pixd, PIX *pixs, l_int32 comp ); +LEPT_DLL extern l_int32 composeRGBPixel ( l_int32 rval, l_int32 gval, l_int32 bval, l_uint32 *ppixel ); +LEPT_DLL extern l_int32 composeRGBAPixel ( l_int32 rval, l_int32 gval, l_int32 bval, l_int32 aval, l_uint32 *ppixel ); +LEPT_DLL extern void extractRGBValues ( l_uint32 pixel, l_int32 *prval, l_int32 *pgval, l_int32 *pbval ); +LEPT_DLL extern void extractRGBAValues ( l_uint32 pixel, l_int32 *prval, l_int32 *pgval, l_int32 *pbval, l_int32 *paval ); +LEPT_DLL extern l_int32 extractMinMaxComponent ( l_uint32 pixel, l_int32 type ); +LEPT_DLL extern l_int32 pixGetRGBLine ( PIX *pixs, l_int32 row, l_uint8 *bufr, l_uint8 *bufg, l_uint8 *bufb ); +LEPT_DLL extern PIX * pixEndianByteSwapNew ( PIX *pixs ); +LEPT_DLL extern l_int32 pixEndianByteSwap ( PIX *pixs ); +LEPT_DLL extern l_int32 lineEndianByteSwap ( l_uint32 *datad, l_uint32 *datas, l_int32 wpl ); +LEPT_DLL extern PIX * pixEndianTwoByteSwapNew ( PIX *pixs ); +LEPT_DLL extern l_int32 pixEndianTwoByteSwap ( PIX *pixs ); +LEPT_DLL extern l_int32 pixGetRasterData ( PIX *pixs, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 pixAlphaIsOpaque ( PIX *pix, l_int32 *popaque ); +LEPT_DLL extern l_uint8 ** pixSetupByteProcessing ( PIX *pix, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 pixCleanupByteProcessing ( PIX *pix, l_uint8 **lineptrs ); +LEPT_DLL extern void l_setAlphaMaskBorder ( l_float32 val1, l_float32 val2 ); +LEPT_DLL extern l_int32 pixSetMasked ( PIX *pixd, PIX *pixm, l_uint32 val ); +LEPT_DLL extern l_int32 pixSetMaskedGeneral ( PIX *pixd, PIX *pixm, l_uint32 val, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixCombineMasked ( PIX *pixd, PIX *pixs, PIX *pixm ); +LEPT_DLL extern l_int32 pixCombineMaskedGeneral ( PIX *pixd, PIX *pixs, PIX *pixm, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixPaintThroughMask ( PIX *pixd, PIX *pixm, l_int32 x, l_int32 y, l_uint32 val ); +LEPT_DLL extern l_int32 pixPaintSelfThroughMask ( PIX *pixd, PIX *pixm, l_int32 x, l_int32 y, l_int32 searchdir, l_int32 mindist, l_int32 tilesize, l_int32 ntiles, l_int32 distblend ); +LEPT_DLL extern PIX * pixMakeMaskFromVal ( PIX *pixs, l_int32 val ); +LEPT_DLL extern PIX * pixMakeMaskFromLUT ( PIX *pixs, l_int32 *tab ); +LEPT_DLL extern PIX * pixMakeArbMaskFromRGB ( PIX *pixs, l_float32 rc, l_float32 gc, l_float32 bc, l_float32 thresh ); +LEPT_DLL extern PIX * pixSetUnderTransparency ( PIX *pixs, l_uint32 val, l_int32 debug ); +LEPT_DLL extern PIX * pixMakeAlphaFromMask ( PIX *pixs, l_int32 dist, BOX **pbox ); +LEPT_DLL extern l_int32 pixGetColorNearMaskBoundary ( PIX *pixs, PIX *pixm, BOX *box, l_int32 dist, l_uint32 *pval, l_int32 debug ); +LEPT_DLL extern PIX * pixInvert ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixOr ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixAnd ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixXor ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixSubtract ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern l_int32 pixZero ( PIX *pix, l_int32 *pempty ); +LEPT_DLL extern l_int32 pixForegroundFraction ( PIX *pix, l_float32 *pfract ); +LEPT_DLL extern NUMA * pixaCountPixels ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixCountPixels ( PIX *pix, l_int32 *pcount, l_int32 *tab8 ); +LEPT_DLL extern NUMA * pixCountByRow ( PIX *pix, BOX *box ); +LEPT_DLL extern NUMA * pixCountByColumn ( PIX *pix, BOX *box ); +LEPT_DLL extern NUMA * pixCountPixelsByRow ( PIX *pix, l_int32 *tab8 ); +LEPT_DLL extern NUMA * pixCountPixelsByColumn ( PIX *pix ); +LEPT_DLL extern l_int32 pixCountPixelsInRow ( PIX *pix, l_int32 row, l_int32 *pcount, l_int32 *tab8 ); +LEPT_DLL extern NUMA * pixGetMomentByColumn ( PIX *pix, l_int32 order ); +LEPT_DLL extern l_int32 pixThresholdPixelSum ( PIX *pix, l_int32 thresh, l_int32 *pabove, l_int32 *tab8 ); +LEPT_DLL extern l_int32 * makePixelSumTab8 ( void ); +LEPT_DLL extern l_int32 * makePixelCentroidTab8 ( void ); +LEPT_DLL extern NUMA * pixAverageByRow ( PIX *pix, BOX *box, l_int32 type ); +LEPT_DLL extern NUMA * pixAverageByColumn ( PIX *pix, BOX *box, l_int32 type ); +LEPT_DLL extern l_int32 pixAverageInRect ( PIX *pix, BOX *box, l_float32 *pave ); +LEPT_DLL extern NUMA * pixVarianceByRow ( PIX *pix, BOX *box ); +LEPT_DLL extern NUMA * pixVarianceByColumn ( PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixVarianceInRect ( PIX *pix, BOX *box, l_float32 *prootvar ); +LEPT_DLL extern NUMA * pixAbsDiffByRow ( PIX *pix, BOX *box ); +LEPT_DLL extern NUMA * pixAbsDiffByColumn ( PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixAbsDiffInRect ( PIX *pix, BOX *box, l_int32 dir, l_float32 *pabsdiff ); +LEPT_DLL extern l_int32 pixAbsDiffOnLine ( PIX *pix, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_float32 *pabsdiff ); +LEPT_DLL extern l_int32 pixCountArbInRect ( PIX *pixs, BOX *box, l_int32 val, l_int32 factor, l_int32 *pcount ); +LEPT_DLL extern PIX * pixMirroredTiling ( PIX *pixs, l_int32 w, l_int32 h ); +LEPT_DLL extern l_int32 pixFindRepCloseTile ( PIX *pixs, BOX *box, l_int32 searchdir, l_int32 mindist, l_int32 tsize, l_int32 ntiles, BOX **pboxtile, l_int32 debug ); +LEPT_DLL extern NUMA * pixGetGrayHistogram ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern NUMA * pixGetGrayHistogramMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor ); +LEPT_DLL extern NUMA * pixGetGrayHistogramInRect ( PIX *pixs, BOX *box, l_int32 factor ); +LEPT_DLL extern NUMAA * pixGetGrayHistogramTiled ( PIX *pixs, l_int32 factor, l_int32 nx, l_int32 ny ); +LEPT_DLL extern l_int32 pixGetColorHistogram ( PIX *pixs, l_int32 factor, NUMA **pnar, NUMA **pnag, NUMA **pnab ); +LEPT_DLL extern l_int32 pixGetColorHistogramMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor, NUMA **pnar, NUMA **pnag, NUMA **pnab ); +LEPT_DLL extern NUMA * pixGetCmapHistogram ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern NUMA * pixGetCmapHistogramMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor ); +LEPT_DLL extern NUMA * pixGetCmapHistogramInRect ( PIX *pixs, BOX *box, l_int32 factor ); +LEPT_DLL extern l_int32 pixCountRGBColors ( PIX *pixs ); +LEPT_DLL extern L_AMAP * pixGetColorAmapHistogram ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern l_int32 amapGetCountForColor ( L_AMAP *amap, l_uint32 val ); +LEPT_DLL extern l_int32 pixGetRankValue ( PIX *pixs, l_int32 factor, l_float32 rank, l_uint32 *pvalue ); +LEPT_DLL extern l_int32 pixGetRankValueMaskedRGB ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor, l_float32 rank, l_float32 *prval, l_float32 *pgval, l_float32 *pbval ); +LEPT_DLL extern l_int32 pixGetRankValueMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor, l_float32 rank, l_float32 *pval, NUMA **pna ); +LEPT_DLL extern l_int32 pixGetAverageValue ( PIX *pixs, l_int32 factor, l_int32 type, l_uint32 *pvalue ); +LEPT_DLL extern l_int32 pixGetAverageMaskedRGB ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor, l_int32 type, l_float32 *prval, l_float32 *pgval, l_float32 *pbval ); +LEPT_DLL extern l_int32 pixGetAverageMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_int32 factor, l_int32 type, l_float32 *pval ); +LEPT_DLL extern l_int32 pixGetAverageTiledRGB ( PIX *pixs, l_int32 sx, l_int32 sy, l_int32 type, PIX **ppixr, PIX **ppixg, PIX **ppixb ); +LEPT_DLL extern PIX * pixGetAverageTiled ( PIX *pixs, l_int32 sx, l_int32 sy, l_int32 type ); +LEPT_DLL extern l_int32 pixRowStats ( PIX *pixs, BOX *box, NUMA **pnamean, NUMA **pnamedian, NUMA **pnamode, NUMA **pnamodecount, NUMA **pnavar, NUMA **pnarootvar ); +LEPT_DLL extern l_int32 pixColumnStats ( PIX *pixs, BOX *box, NUMA **pnamean, NUMA **pnamedian, NUMA **pnamode, NUMA **pnamodecount, NUMA **pnavar, NUMA **pnarootvar ); +LEPT_DLL extern l_int32 pixGetRangeValues ( PIX *pixs, l_int32 factor, l_int32 color, l_int32 *pminval, l_int32 *pmaxval ); +LEPT_DLL extern l_int32 pixGetExtremeValue ( PIX *pixs, l_int32 factor, l_int32 type, l_int32 *prval, l_int32 *pgval, l_int32 *pbval, l_int32 *pgrayval ); +LEPT_DLL extern l_int32 pixGetMaxValueInRect ( PIX *pixs, BOX *box, l_uint32 *pmaxval, l_int32 *pxmax, l_int32 *pymax ); +LEPT_DLL extern l_int32 pixGetBinnedComponentRange ( PIX *pixs, l_int32 nbins, l_int32 factor, l_int32 color, l_int32 *pminval, l_int32 *pmaxval, l_uint32 **pcarray, l_int32 fontsize ); +LEPT_DLL extern l_int32 pixGetRankColorArray ( PIX *pixs, l_int32 nbins, l_int32 type, l_int32 factor, l_uint32 **pcarray, l_int32 debugflag, l_int32 fontsize ); +LEPT_DLL extern l_int32 pixGetBinnedColor ( PIX *pixs, PIX *pixg, l_int32 factor, l_int32 nbins, NUMA *nalut, l_uint32 **pcarray, l_int32 debugflag ); +LEPT_DLL extern PIX * pixDisplayColorArray ( l_uint32 *carray, l_int32 ncolors, l_int32 side, l_int32 ncols, l_int32 fontsize ); +LEPT_DLL extern PIX * pixRankBinByStrip ( PIX *pixs, l_int32 direction, l_int32 size, l_int32 nbins, l_int32 type ); +LEPT_DLL extern PIX * pixaGetAlignedStats ( PIXA *pixa, l_int32 type, l_int32 nbins, l_int32 thresh ); +LEPT_DLL extern l_int32 pixaExtractColumnFromEachPix ( PIXA *pixa, l_int32 col, PIX *pixd ); +LEPT_DLL extern l_int32 pixGetRowStats ( PIX *pixs, l_int32 type, l_int32 nbins, l_int32 thresh, l_float32 *colvect ); +LEPT_DLL extern l_int32 pixGetColumnStats ( PIX *pixs, l_int32 type, l_int32 nbins, l_int32 thresh, l_float32 *rowvect ); +LEPT_DLL extern l_int32 pixSetPixelColumn ( PIX *pix, l_int32 col, l_float32 *colvect ); +LEPT_DLL extern l_int32 pixThresholdForFgBg ( PIX *pixs, l_int32 factor, l_int32 thresh, l_int32 *pfgval, l_int32 *pbgval ); +LEPT_DLL extern l_int32 pixSplitDistributionFgBg ( PIX *pixs, l_float32 scorefract, l_int32 factor, l_int32 *pthresh, l_int32 *pfgval, l_int32 *pbgval, PIX **ppixdb ); +LEPT_DLL extern l_int32 pixaFindDimensions ( PIXA *pixa, NUMA **pnaw, NUMA **pnah ); +LEPT_DLL extern l_int32 pixFindAreaPerimRatio ( PIX *pixs, l_int32 *tab, l_float32 *pfract ); +LEPT_DLL extern NUMA * pixaFindPerimToAreaRatio ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixFindPerimToAreaRatio ( PIX *pixs, l_int32 *tab, l_float32 *pfract ); +LEPT_DLL extern NUMA * pixaFindPerimSizeRatio ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixFindPerimSizeRatio ( PIX *pixs, l_int32 *tab, l_float32 *pratio ); +LEPT_DLL extern NUMA * pixaFindAreaFraction ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixFindAreaFraction ( PIX *pixs, l_int32 *tab, l_float32 *pfract ); +LEPT_DLL extern NUMA * pixaFindAreaFractionMasked ( PIXA *pixa, PIX *pixm, l_int32 debug ); +LEPT_DLL extern l_int32 pixFindAreaFractionMasked ( PIX *pixs, BOX *box, PIX *pixm, l_int32 *tab, l_float32 *pfract ); +LEPT_DLL extern NUMA * pixaFindWidthHeightRatio ( PIXA *pixa ); +LEPT_DLL extern NUMA * pixaFindWidthHeightProduct ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixFindOverlapFraction ( PIX *pixs1, PIX *pixs2, l_int32 x2, l_int32 y2, l_int32 *tab, l_float32 *pratio, l_int32 *pnoverlap ); +LEPT_DLL extern BOXA * pixFindRectangleComps ( PIX *pixs, l_int32 dist, l_int32 minw, l_int32 minh ); +LEPT_DLL extern l_int32 pixConformsToRectangle ( PIX *pixs, BOX *box, l_int32 dist, l_int32 *pconforms ); +LEPT_DLL extern PIXA * pixClipRectangles ( PIX *pixs, BOXA *boxa ); +LEPT_DLL extern PIX * pixClipRectangle ( PIX *pixs, BOX *box, BOX **pboxc ); +LEPT_DLL extern PIX * pixClipMasked ( PIX *pixs, PIX *pixm, l_int32 x, l_int32 y, l_uint32 outval ); +LEPT_DLL extern l_int32 pixCropToMatch ( PIX *pixs1, PIX *pixs2, PIX **ppixd1, PIX **ppixd2 ); +LEPT_DLL extern PIX * pixCropToSize ( PIX *pixs, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixResizeToMatch ( PIX *pixs, PIX *pixt, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixMakeFrameMask ( l_int32 w, l_int32 h, l_float32 hf1, l_float32 hf2, l_float32 vf1, l_float32 vf2 ); +LEPT_DLL extern l_int32 pixFractionFgInMask ( PIX *pix1, PIX *pix2, l_float32 *pfract ); +LEPT_DLL extern l_int32 pixClipToForeground ( PIX *pixs, PIX **ppixd, BOX **pbox ); +LEPT_DLL extern l_int32 pixTestClipToForeground ( PIX *pixs, l_int32 *pcanclip ); +LEPT_DLL extern l_int32 pixClipBoxToForeground ( PIX *pixs, BOX *boxs, PIX **ppixd, BOX **pboxd ); +LEPT_DLL extern l_int32 pixScanForForeground ( PIX *pixs, BOX *box, l_int32 scanflag, l_int32 *ploc ); +LEPT_DLL extern l_int32 pixClipBoxToEdges ( PIX *pixs, BOX *boxs, l_int32 lowthresh, l_int32 highthresh, l_int32 maxwidth, l_int32 factor, PIX **ppixd, BOX **pboxd ); +LEPT_DLL extern l_int32 pixScanForEdge ( PIX *pixs, BOX *box, l_int32 lowthresh, l_int32 highthresh, l_int32 maxwidth, l_int32 factor, l_int32 scanflag, l_int32 *ploc ); +LEPT_DLL extern NUMA * pixExtractOnLine ( PIX *pixs, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 factor ); +LEPT_DLL extern l_float32 pixAverageOnLine ( PIX *pixs, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 factor ); +LEPT_DLL extern NUMA * pixAverageIntensityProfile ( PIX *pixs, l_float32 fract, l_int32 dir, l_int32 first, l_int32 last, l_int32 factor1, l_int32 factor2 ); +LEPT_DLL extern NUMA * pixReversalProfile ( PIX *pixs, l_float32 fract, l_int32 dir, l_int32 first, l_int32 last, l_int32 minreversal, l_int32 factor1, l_int32 factor2 ); +LEPT_DLL extern l_int32 pixWindowedVarianceOnLine ( PIX *pixs, l_int32 dir, l_int32 loc, l_int32 c1, l_int32 c2, l_int32 size, NUMA **pnad ); +LEPT_DLL extern l_int32 pixMinMaxNearLine ( PIX *pixs, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2, l_int32 dist, l_int32 direction, NUMA **pnamin, NUMA **pnamax, l_float32 *pminave, l_float32 *pmaxave ); +LEPT_DLL extern PIX * pixRankRowTransform ( PIX *pixs ); +LEPT_DLL extern PIX * pixRankColumnTransform ( PIX *pixs ); +LEPT_DLL extern PIXA * pixaCreate ( l_int32 n ); +LEPT_DLL extern PIXA * pixaCreateFromPix ( PIX *pixs, l_int32 n, l_int32 cellw, l_int32 cellh ); +LEPT_DLL extern PIXA * pixaCreateFromBoxa ( PIX *pixs, BOXA *boxa, l_int32 *pcropwarn ); +LEPT_DLL extern PIXA * pixaSplitPix ( PIX *pixs, l_int32 nx, l_int32 ny, l_int32 borderwidth, l_uint32 bordercolor ); +LEPT_DLL extern void pixaDestroy ( PIXA **ppixa ); +LEPT_DLL extern PIXA * pixaCopy ( PIXA *pixa, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaAddPix ( PIXA *pixa, PIX *pix, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaAddBox ( PIXA *pixa, BOX *box, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaExtendArrayToSize ( PIXA *pixa, l_int32 size ); +LEPT_DLL extern l_int32 pixaGetCount ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixaChangeRefcount ( PIXA *pixa, l_int32 delta ); +LEPT_DLL extern PIX * pixaGetPix ( PIXA *pixa, l_int32 index, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixaGetPixDimensions ( PIXA *pixa, l_int32 index, l_int32 *pw, l_int32 *ph, l_int32 *pd ); +LEPT_DLL extern BOXA * pixaGetBoxa ( PIXA *pixa, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixaGetBoxaCount ( PIXA *pixa ); +LEPT_DLL extern BOX * pixaGetBox ( PIXA *pixa, l_int32 index, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixaGetBoxGeometry ( PIXA *pixa, l_int32 index, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 pixaSetBoxa ( PIXA *pixa, BOXA *boxa, l_int32 accesstype ); +LEPT_DLL extern PIX ** pixaGetPixArray ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixaVerifyDepth ( PIXA *pixa, l_int32 *pmaxdepth ); +LEPT_DLL extern l_int32 pixaIsFull ( PIXA *pixa, l_int32 *pfullpa, l_int32 *pfullba ); +LEPT_DLL extern l_int32 pixaCountText ( PIXA *pixa, l_int32 *pntext ); +LEPT_DLL extern l_int32 pixaSetText ( PIXA *pixa, SARRAY *sa ); +LEPT_DLL extern void *** pixaGetLinePtrs ( PIXA *pixa, l_int32 *psize ); +LEPT_DLL extern l_int32 pixaWriteStreamInfo ( FILE *fp, PIXA *pixa ); +LEPT_DLL extern l_int32 pixaReplacePix ( PIXA *pixa, l_int32 index, PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixaInsertPix ( PIXA *pixa, l_int32 index, PIX *pixs, BOX *box ); +LEPT_DLL extern l_int32 pixaRemovePix ( PIXA *pixa, l_int32 index ); +LEPT_DLL extern l_int32 pixaRemovePixAndSave ( PIXA *pixa, l_int32 index, PIX **ppix, BOX **pbox ); +LEPT_DLL extern l_int32 pixaInitFull ( PIXA *pixa, PIX *pix, BOX *box ); +LEPT_DLL extern l_int32 pixaClear ( PIXA *pixa ); +LEPT_DLL extern l_int32 pixaJoin ( PIXA *pixad, PIXA *pixas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern PIXA * pixaInterleave ( PIXA *pixa1, PIXA *pixa2, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaaJoin ( PIXAA *paad, PIXAA *paas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern PIXAA * pixaaCreate ( l_int32 n ); +LEPT_DLL extern PIXAA * pixaaCreateFromPixa ( PIXA *pixa, l_int32 n, l_int32 type, l_int32 copyflag ); +LEPT_DLL extern void pixaaDestroy ( PIXAA **ppaa ); +LEPT_DLL extern l_int32 pixaaAddPixa ( PIXAA *paa, PIXA *pixa, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaaExtendArray ( PIXAA *paa ); +LEPT_DLL extern l_int32 pixaaAddPix ( PIXAA *paa, l_int32 index, PIX *pix, BOX *box, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaaAddBox ( PIXAA *paa, BOX *box, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaaGetCount ( PIXAA *paa, NUMA **pna ); +LEPT_DLL extern PIXA * pixaaGetPixa ( PIXAA *paa, l_int32 index, l_int32 accesstype ); +LEPT_DLL extern BOXA * pixaaGetBoxa ( PIXAA *paa, l_int32 accesstype ); +LEPT_DLL extern PIX * pixaaGetPix ( PIXAA *paa, l_int32 index, l_int32 ipix, l_int32 accessflag ); +LEPT_DLL extern l_int32 pixaaVerifyDepth ( PIXAA *paa, l_int32 *pmaxdepth ); +LEPT_DLL extern l_int32 pixaaIsFull ( PIXAA *paa, l_int32 *pfull ); +LEPT_DLL extern l_int32 pixaaInitFull ( PIXAA *paa, PIXA *pixa ); +LEPT_DLL extern l_int32 pixaaReplacePixa ( PIXAA *paa, l_int32 index, PIXA *pixa ); +LEPT_DLL extern l_int32 pixaaClear ( PIXAA *paa ); +LEPT_DLL extern l_int32 pixaaTruncate ( PIXAA *paa ); +LEPT_DLL extern PIXA * pixaRead ( const char *filename ); +LEPT_DLL extern PIXA * pixaReadStream ( FILE *fp ); +LEPT_DLL extern PIXA * pixaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixaWrite ( const char *filename, PIXA *pixa ); +LEPT_DLL extern l_int32 pixaWriteStream ( FILE *fp, PIXA *pixa ); +LEPT_DLL extern l_int32 pixaWriteMem ( l_uint8 **pdata, size_t *psize, PIXA *pixa ); +LEPT_DLL extern PIXA * pixaReadBoth ( const char *filename ); +LEPT_DLL extern PIXAA * pixaaReadFromFiles ( const char *dirname, const char *substr, l_int32 first, l_int32 nfiles ); +LEPT_DLL extern PIXAA * pixaaRead ( const char *filename ); +LEPT_DLL extern PIXAA * pixaaReadStream ( FILE *fp ); +LEPT_DLL extern PIXAA * pixaaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixaaWrite ( const char *filename, PIXAA *paa ); +LEPT_DLL extern l_int32 pixaaWriteStream ( FILE *fp, PIXAA *paa ); +LEPT_DLL extern l_int32 pixaaWriteMem ( l_uint8 **pdata, size_t *psize, PIXAA *paa ); +LEPT_DLL extern PIXACC * pixaccCreate ( l_int32 w, l_int32 h, l_int32 negflag ); +LEPT_DLL extern PIXACC * pixaccCreateFromPix ( PIX *pix, l_int32 negflag ); +LEPT_DLL extern void pixaccDestroy ( PIXACC **ppixacc ); +LEPT_DLL extern PIX * pixaccFinal ( PIXACC *pixacc, l_int32 outdepth ); +LEPT_DLL extern PIX * pixaccGetPix ( PIXACC *pixacc ); +LEPT_DLL extern l_int32 pixaccGetOffset ( PIXACC *pixacc ); +LEPT_DLL extern l_int32 pixaccAdd ( PIXACC *pixacc, PIX *pix ); +LEPT_DLL extern l_int32 pixaccSubtract ( PIXACC *pixacc, PIX *pix ); +LEPT_DLL extern l_int32 pixaccMultConst ( PIXACC *pixacc, l_float32 factor ); +LEPT_DLL extern l_int32 pixaccMultConstAccumulate ( PIXACC *pixacc, PIX *pix, l_float32 factor ); +LEPT_DLL extern PIX * pixSelectBySize ( PIX *pixs, l_int32 width, l_int32 height, l_int32 connectivity, l_int32 type, l_int32 relation, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectBySize ( PIXA *pixas, l_int32 width, l_int32 height, l_int32 type, l_int32 relation, l_int32 *pchanged ); +LEPT_DLL extern NUMA * pixaMakeSizeIndicator ( PIXA *pixa, l_int32 width, l_int32 height, l_int32 type, l_int32 relation ); +LEPT_DLL extern PIX * pixSelectByPerimToAreaRatio ( PIX *pixs, l_float32 thresh, l_int32 connectivity, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectByPerimToAreaRatio ( PIXA *pixas, l_float32 thresh, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIX * pixSelectByPerimSizeRatio ( PIX *pixs, l_float32 thresh, l_int32 connectivity, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectByPerimSizeRatio ( PIXA *pixas, l_float32 thresh, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIX * pixSelectByAreaFraction ( PIX *pixs, l_float32 thresh, l_int32 connectivity, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectByAreaFraction ( PIXA *pixas, l_float32 thresh, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIX * pixSelectByWidthHeightRatio ( PIX *pixs, l_float32 thresh, l_int32 connectivity, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectByWidthHeightRatio ( PIXA *pixas, l_float32 thresh, l_int32 type, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectByNumConnComp ( PIXA *pixas, l_int32 nmin, l_int32 nmax, l_int32 connectivity, l_int32 *pchanged ); +LEPT_DLL extern PIXA * pixaSelectWithIndicator ( PIXA *pixas, NUMA *na, l_int32 *pchanged ); +LEPT_DLL extern l_int32 pixRemoveWithIndicator ( PIX *pixs, PIXA *pixa, NUMA *na ); +LEPT_DLL extern l_int32 pixAddWithIndicator ( PIX *pixs, PIXA *pixa, NUMA *na ); +LEPT_DLL extern PIXA * pixaSelectWithString ( PIXA *pixas, const char *str, l_int32 *perror ); +LEPT_DLL extern PIX * pixaRenderComponent ( PIX *pixs, PIXA *pixa, l_int32 index ); +LEPT_DLL extern PIXA * pixaSort ( PIXA *pixas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex, l_int32 copyflag ); +LEPT_DLL extern PIXA * pixaBinSort ( PIXA *pixas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex, l_int32 copyflag ); +LEPT_DLL extern PIXA * pixaSortByIndex ( PIXA *pixas, NUMA *naindex, l_int32 copyflag ); +LEPT_DLL extern PIXAA * pixaSort2dByIndex ( PIXA *pixas, NUMAA *naa, l_int32 copyflag ); +LEPT_DLL extern PIXA * pixaSelectRange ( PIXA *pixas, l_int32 first, l_int32 last, l_int32 copyflag ); +LEPT_DLL extern PIXAA * pixaaSelectRange ( PIXAA *paas, l_int32 first, l_int32 last, l_int32 copyflag ); +LEPT_DLL extern PIXAA * pixaaScaleToSize ( PIXAA *paas, l_int32 wd, l_int32 hd ); +LEPT_DLL extern PIXAA * pixaaScaleToSizeVar ( PIXAA *paas, NUMA *nawd, NUMA *nahd ); +LEPT_DLL extern PIXA * pixaScaleToSize ( PIXA *pixas, l_int32 wd, l_int32 hd ); +LEPT_DLL extern PIXA * pixaScaleToSizeRel ( PIXA *pixas, l_int32 delw, l_int32 delh ); +LEPT_DLL extern PIXA * pixaScale ( PIXA *pixas, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIXA * pixaAddBorderGeneral ( PIXA *pixad, PIXA *pixas, l_int32 left, l_int32 right, l_int32 top, l_int32 bot, l_uint32 val ); +LEPT_DLL extern PIXA * pixaaFlattenToPixa ( PIXAA *paa, NUMA **pnaindex, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaaSizeRange ( PIXAA *paa, l_int32 *pminw, l_int32 *pminh, l_int32 *pmaxw, l_int32 *pmaxh ); +LEPT_DLL extern l_int32 pixaSizeRange ( PIXA *pixa, l_int32 *pminw, l_int32 *pminh, l_int32 *pmaxw, l_int32 *pmaxh ); +LEPT_DLL extern PIXA * pixaClipToPix ( PIXA *pixas, PIX *pixs ); +LEPT_DLL extern l_int32 pixaClipToForeground ( PIXA *pixas, PIXA **ppixad, BOXA **pboxa ); +LEPT_DLL extern l_int32 pixaGetRenderingDepth ( PIXA *pixa, l_int32 *pdepth ); +LEPT_DLL extern l_int32 pixaHasColor ( PIXA *pixa, l_int32 *phascolor ); +LEPT_DLL extern l_int32 pixaAnyColormaps ( PIXA *pixa, l_int32 *phascmap ); +LEPT_DLL extern l_int32 pixaGetDepthInfo ( PIXA *pixa, l_int32 *pmaxdepth, l_int32 *psame ); +LEPT_DLL extern PIXA * pixaConvertToSameDepth ( PIXA *pixas ); +LEPT_DLL extern l_int32 pixaEqual ( PIXA *pixa1, PIXA *pixa2, l_int32 maxdist, NUMA **pnaindex, l_int32 *psame ); +LEPT_DLL extern PIXA * pixaRotateOrth ( PIXA *pixas, l_int32 rotation ); +LEPT_DLL extern l_int32 pixaSetFullSizeBoxa ( PIXA *pixa ); +LEPT_DLL extern PIX * pixaDisplay ( PIXA *pixa, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixaDisplayOnColor ( PIXA *pixa, l_int32 w, l_int32 h, l_uint32 bgcolor ); +LEPT_DLL extern PIX * pixaDisplayRandomCmap ( PIXA *pixa, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixaDisplayLinearly ( PIXA *pixas, l_int32 direction, l_float32 scalefactor, l_int32 background, l_int32 spacing, l_int32 border, BOXA **pboxa ); +LEPT_DLL extern PIX * pixaDisplayOnLattice ( PIXA *pixa, l_int32 cellw, l_int32 cellh, l_int32 *pncols, BOXA **pboxa ); +LEPT_DLL extern PIX * pixaDisplayUnsplit ( PIXA *pixa, l_int32 nx, l_int32 ny, l_int32 borderwidth, l_uint32 bordercolor ); +LEPT_DLL extern PIX * pixaDisplayTiled ( PIXA *pixa, l_int32 maxwidth, l_int32 background, l_int32 spacing ); +LEPT_DLL extern PIX * pixaDisplayTiledInRows ( PIXA *pixa, l_int32 outdepth, l_int32 maxwidth, l_float32 scalefactor, l_int32 background, l_int32 spacing, l_int32 border ); +LEPT_DLL extern PIX * pixaDisplayTiledInColumns ( PIXA *pixas, l_int32 nx, l_float32 scalefactor, l_int32 spacing, l_int32 border ); +LEPT_DLL extern PIX * pixaDisplayTiledAndScaled ( PIXA *pixa, l_int32 outdepth, l_int32 tilewidth, l_int32 ncols, l_int32 background, l_int32 spacing, l_int32 border ); +LEPT_DLL extern PIX * pixaDisplayTiledWithText ( PIXA *pixa, l_int32 maxwidth, l_float32 scalefactor, l_int32 spacing, l_int32 border, l_int32 fontsize, l_uint32 textcolor ); +LEPT_DLL extern PIX * pixaDisplayTiledByIndex ( PIXA *pixa, NUMA *na, l_int32 width, l_int32 spacing, l_int32 border, l_int32 fontsize, l_uint32 textcolor ); +LEPT_DLL extern PIX * pixaaDisplay ( PIXAA *paa, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixaaDisplayByPixa ( PIXAA *paa, l_int32 xspace, l_int32 yspace, l_int32 maxw ); +LEPT_DLL extern PIXA * pixaaDisplayTiledAndScaled ( PIXAA *paa, l_int32 outdepth, l_int32 tilewidth, l_int32 ncols, l_int32 background, l_int32 spacing, l_int32 border ); +LEPT_DLL extern PIXA * pixaConvertTo1 ( PIXA *pixas, l_int32 thresh ); +LEPT_DLL extern PIXA * pixaConvertTo8 ( PIXA *pixas, l_int32 cmapflag ); +LEPT_DLL extern PIXA * pixaConvertTo8Color ( PIXA *pixas, l_int32 dither ); +LEPT_DLL extern PIXA * pixaConvertTo32 ( PIXA *pixas ); +LEPT_DLL extern PIXA * pixaConstrainedSelect ( PIXA *pixas, l_int32 first, l_int32 last, l_int32 nmax, l_int32 use_pairs, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixaSelectToPdf ( PIXA *pixas, l_int32 first, l_int32 last, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, l_uint32 color, l_int32 fontsize, const char *fileout ); +LEPT_DLL extern PIXA * pixaDisplayMultiTiled ( PIXA *pixas, l_int32 nx, l_int32 ny, l_int32 maxw, l_int32 maxh, l_float32 scalefactor, l_int32 spacing, l_int32 border ); +LEPT_DLL extern l_int32 pixaSplitIntoFiles ( PIXA *pixas, l_int32 nsplit, l_float32 scale, l_int32 outwidth, l_int32 write_pixa, l_int32 write_pix, l_int32 write_pdf ); +LEPT_DLL extern l_int32 convertToNUpFiles ( const char *dir, const char *substr, l_int32 nx, l_int32 ny, l_int32 tw, l_int32 spacing, l_int32 border, l_int32 fontsize, const char *outdir ); +LEPT_DLL extern PIXA * convertToNUpPixa ( const char *dir, const char *substr, l_int32 nx, l_int32 ny, l_int32 tw, l_int32 spacing, l_int32 border, l_int32 fontsize ); +LEPT_DLL extern PIXA * pixaConvertToNUpPixa ( PIXA *pixas, SARRAY *sa, l_int32 nx, l_int32 ny, l_int32 tw, l_int32 spacing, l_int32 border, l_int32 fontsize ); +LEPT_DLL extern l_int32 pixaCompareInPdf ( PIXA *pixa1, PIXA *pixa2, l_int32 nx, l_int32 ny, l_int32 tw, l_int32 spacing, l_int32 border, l_int32 fontsize, const char *fileout ); +LEPT_DLL extern l_int32 pmsCreate ( size_t minsize, size_t smallest, NUMA *numalloc, const char *logfile ); +LEPT_DLL extern void pmsDestroy ( ); +LEPT_DLL extern void * pmsCustomAlloc ( size_t nbytes ); +LEPT_DLL extern void pmsCustomDealloc ( void *data ); +LEPT_DLL extern void * pmsGetAlloc ( size_t nbytes ); +LEPT_DLL extern l_int32 pmsGetLevelForAlloc ( size_t nbytes, l_int32 *plevel ); +LEPT_DLL extern l_int32 pmsGetLevelForDealloc ( void *data, l_int32 *plevel ); +LEPT_DLL extern void pmsLogInfo ( ); +LEPT_DLL extern l_int32 pixAddConstantGray ( PIX *pixs, l_int32 val ); +LEPT_DLL extern l_int32 pixMultConstantGray ( PIX *pixs, l_float32 val ); +LEPT_DLL extern PIX * pixAddGray ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixSubtractGray ( PIX *pixd, PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixThresholdToValue ( PIX *pixd, PIX *pixs, l_int32 threshval, l_int32 setval ); +LEPT_DLL extern PIX * pixInitAccumulate ( l_int32 w, l_int32 h, l_uint32 offset ); +LEPT_DLL extern PIX * pixFinalAccumulate ( PIX *pixs, l_uint32 offset, l_int32 depth ); +LEPT_DLL extern PIX * pixFinalAccumulateThreshold ( PIX *pixs, l_uint32 offset, l_uint32 threshold ); +LEPT_DLL extern l_int32 pixAccumulate ( PIX *pixd, PIX *pixs, l_int32 op ); +LEPT_DLL extern l_int32 pixMultConstAccumulate ( PIX *pixs, l_float32 factor, l_uint32 offset ); +LEPT_DLL extern PIX * pixAbsDifference ( PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixAddRGB ( PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern PIX * pixMinOrMax ( PIX *pixd, PIX *pixs1, PIX *pixs2, l_int32 type ); +LEPT_DLL extern PIX * pixMaxDynamicRange ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixMaxDynamicRangeRGB ( PIX *pixs, l_int32 type ); +LEPT_DLL extern l_uint32 linearScaleRGBVal ( l_uint32 sval, l_float32 factor ); +LEPT_DLL extern l_uint32 logScaleRGBVal ( l_uint32 sval, l_float32 *tab, l_float32 factor ); +LEPT_DLL extern l_float32 * makeLogBase2Tab ( void ); +LEPT_DLL extern l_float32 getLogBase2 ( l_int32 val, l_float32 *logtab ); +LEPT_DLL extern PIXC * pixcompCreateFromPix ( PIX *pix, l_int32 comptype ); +LEPT_DLL extern PIXC * pixcompCreateFromString ( l_uint8 *data, size_t size, l_int32 copyflag ); +LEPT_DLL extern PIXC * pixcompCreateFromFile ( const char *filename, l_int32 comptype ); +LEPT_DLL extern void pixcompDestroy ( PIXC **ppixc ); +LEPT_DLL extern PIXC * pixcompCopy ( PIXC *pixcs ); +LEPT_DLL extern l_int32 pixcompGetDimensions ( PIXC *pixc, l_int32 *pw, l_int32 *ph, l_int32 *pd ); +LEPT_DLL extern l_int32 pixcompDetermineFormat ( l_int32 comptype, l_int32 d, l_int32 cmapflag, l_int32 *pformat ); +LEPT_DLL extern PIX * pixCreateFromPixcomp ( PIXC *pixc ); +LEPT_DLL extern PIXAC * pixacompCreate ( l_int32 n ); +LEPT_DLL extern PIXAC * pixacompCreateWithInit ( l_int32 n, l_int32 offset, PIX *pix, l_int32 comptype ); +LEPT_DLL extern PIXAC * pixacompCreateFromPixa ( PIXA *pixa, l_int32 comptype, l_int32 accesstype ); +LEPT_DLL extern PIXAC * pixacompCreateFromFiles ( const char *dirname, const char *substr, l_int32 comptype ); +LEPT_DLL extern PIXAC * pixacompCreateFromSA ( SARRAY *sa, l_int32 comptype ); +LEPT_DLL extern void pixacompDestroy ( PIXAC **ppixac ); +LEPT_DLL extern l_int32 pixacompAddPix ( PIXAC *pixac, PIX *pix, l_int32 comptype ); +LEPT_DLL extern l_int32 pixacompAddPixcomp ( PIXAC *pixac, PIXC *pixc, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixacompReplacePix ( PIXAC *pixac, l_int32 index, PIX *pix, l_int32 comptype ); +LEPT_DLL extern l_int32 pixacompReplacePixcomp ( PIXAC *pixac, l_int32 index, PIXC *pixc ); +LEPT_DLL extern l_int32 pixacompAddBox ( PIXAC *pixac, BOX *box, l_int32 copyflag ); +LEPT_DLL extern l_int32 pixacompGetCount ( PIXAC *pixac ); +LEPT_DLL extern PIXC * pixacompGetPixcomp ( PIXAC *pixac, l_int32 index, l_int32 copyflag ); +LEPT_DLL extern PIX * pixacompGetPix ( PIXAC *pixac, l_int32 index ); +LEPT_DLL extern l_int32 pixacompGetPixDimensions ( PIXAC *pixac, l_int32 index, l_int32 *pw, l_int32 *ph, l_int32 *pd ); +LEPT_DLL extern BOXA * pixacompGetBoxa ( PIXAC *pixac, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixacompGetBoxaCount ( PIXAC *pixac ); +LEPT_DLL extern BOX * pixacompGetBox ( PIXAC *pixac, l_int32 index, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixacompGetBoxGeometry ( PIXAC *pixac, l_int32 index, l_int32 *px, l_int32 *py, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern l_int32 pixacompGetOffset ( PIXAC *pixac ); +LEPT_DLL extern l_int32 pixacompSetOffset ( PIXAC *pixac, l_int32 offset ); +LEPT_DLL extern PIXA * pixaCreateFromPixacomp ( PIXAC *pixac, l_int32 accesstype ); +LEPT_DLL extern l_int32 pixacompJoin ( PIXAC *pixacd, PIXAC *pixacs, l_int32 istart, l_int32 iend ); +LEPT_DLL extern PIXAC * pixacompInterleave ( PIXAC *pixac1, PIXAC *pixac2 ); +LEPT_DLL extern PIXAC * pixacompRead ( const char *filename ); +LEPT_DLL extern PIXAC * pixacompReadStream ( FILE *fp ); +LEPT_DLL extern PIXAC * pixacompReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixacompWrite ( const char *filename, PIXAC *pixac ); +LEPT_DLL extern l_int32 pixacompWriteStream ( FILE *fp, PIXAC *pixac ); +LEPT_DLL extern l_int32 pixacompWriteMem ( l_uint8 **pdata, size_t *psize, PIXAC *pixac ); +LEPT_DLL extern l_int32 pixacompConvertToPdf ( PIXAC *pixac, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, const char *fileout ); +LEPT_DLL extern l_int32 pixacompConvertToPdfData ( PIXAC *pixac, l_int32 res, l_float32 scalefactor, l_int32 type, l_int32 quality, const char *title, l_uint8 **pdata, size_t *pnbytes ); +LEPT_DLL extern l_int32 pixacompWriteStreamInfo ( FILE *fp, PIXAC *pixac, const char *text ); +LEPT_DLL extern l_int32 pixcompWriteStreamInfo ( FILE *fp, PIXC *pixc, const char *text ); +LEPT_DLL extern PIX * pixacompDisplayTiledAndScaled ( PIXAC *pixac, l_int32 outdepth, l_int32 tilewidth, l_int32 ncols, l_int32 background, l_int32 spacing, l_int32 border ); +LEPT_DLL extern PIX * pixThreshold8 ( PIX *pixs, l_int32 d, l_int32 nlevels, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixRemoveColormapGeneral ( PIX *pixs, l_int32 type, l_int32 ifnocmap ); +LEPT_DLL extern PIX * pixRemoveColormap ( PIX *pixs, l_int32 type ); +LEPT_DLL extern l_int32 pixAddGrayColormap8 ( PIX *pixs ); +LEPT_DLL extern PIX * pixAddMinimalGrayColormap8 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertRGBToLuminance ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertRGBToGray ( PIX *pixs, l_float32 rwt, l_float32 gwt, l_float32 bwt ); +LEPT_DLL extern PIX * pixConvertRGBToGrayFast ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertRGBToGrayMinMax ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixConvertRGBToGraySatBoost ( PIX *pixs, l_int32 refval ); +LEPT_DLL extern PIX * pixConvertRGBToGrayArb ( PIX *pixs, l_float32 rc, l_float32 gc, l_float32 bc ); +LEPT_DLL extern PIX * pixConvertRGBToBinaryArb ( PIX *pixs, l_float32 rc, l_float32 gc, l_float32 bc, l_int32 thresh, l_int32 relation ); +LEPT_DLL extern PIX * pixConvertGrayToColormap ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertGrayToColormap8 ( PIX *pixs, l_int32 mindepth ); +LEPT_DLL extern PIX * pixColorizeGray ( PIX *pixs, l_uint32 color, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixConvertRGBToColormap ( PIX *pixs, l_int32 ditherflag ); +LEPT_DLL extern PIX * pixConvertCmapTo1 ( PIX *pixs ); +LEPT_DLL extern l_int32 pixQuantizeIfFewColors ( PIX *pixs, l_int32 maxcolors, l_int32 mingraycolors, l_int32 octlevel, PIX **ppixd ); +LEPT_DLL extern PIX * pixConvert16To8 ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixConvertGrayToFalseColor ( PIX *pixs, l_float32 gamma ); +LEPT_DLL extern PIX * pixUnpackBinary ( PIX *pixs, l_int32 depth, l_int32 invert ); +LEPT_DLL extern PIX * pixConvert1To16 ( PIX *pixd, PIX *pixs, l_uint16 val0, l_uint16 val1 ); +LEPT_DLL extern PIX * pixConvert1To32 ( PIX *pixd, PIX *pixs, l_uint32 val0, l_uint32 val1 ); +LEPT_DLL extern PIX * pixConvert1To2Cmap ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvert1To2 ( PIX *pixd, PIX *pixs, l_int32 val0, l_int32 val1 ); +LEPT_DLL extern PIX * pixConvert1To4Cmap ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvert1To4 ( PIX *pixd, PIX *pixs, l_int32 val0, l_int32 val1 ); +LEPT_DLL extern PIX * pixConvert1To8Cmap ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvert1To8 ( PIX *pixd, PIX *pixs, l_uint8 val0, l_uint8 val1 ); +LEPT_DLL extern PIX * pixConvert2To8 ( PIX *pixs, l_uint8 val0, l_uint8 val1, l_uint8 val2, l_uint8 val3, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixConvert4To8 ( PIX *pixs, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixConvert8To16 ( PIX *pixs, l_int32 leftshift ); +LEPT_DLL extern PIX * pixConvertTo1 ( PIX *pixs, l_int32 threshold ); +LEPT_DLL extern PIX * pixConvertTo1BySampling ( PIX *pixs, l_int32 factor, l_int32 threshold ); +LEPT_DLL extern PIX * pixConvertTo8 ( PIX *pixs, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixConvertTo8BySampling ( PIX *pixs, l_int32 factor, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixConvertTo8Color ( PIX *pixs, l_int32 dither ); +LEPT_DLL extern PIX * pixConvertTo16 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertTo32 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertTo32BySampling ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern PIX * pixConvert8To32 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertTo8Or32 ( PIX *pixs, l_int32 copyflag, l_int32 warnflag ); +LEPT_DLL extern PIX * pixConvert24To32 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvert32To24 ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvert32To16 ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixConvert32To8 ( PIX *pixs, l_int32 type16, l_int32 type8 ); +LEPT_DLL extern PIX * pixRemoveAlpha ( PIX *pixs ); +LEPT_DLL extern PIX * pixAddAlphaTo1bpp ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixConvertLossless ( PIX *pixs, l_int32 d ); +LEPT_DLL extern PIX * pixConvertForPSWrap ( PIX *pixs ); +LEPT_DLL extern PIX * pixConvertToSubpixelRGB ( PIX *pixs, l_float32 scalex, l_float32 scaley, l_int32 order ); +LEPT_DLL extern PIX * pixConvertGrayToSubpixelRGB ( PIX *pixs, l_float32 scalex, l_float32 scaley, l_int32 order ); +LEPT_DLL extern PIX * pixConvertColorToSubpixelRGB ( PIX *pixs, l_float32 scalex, l_float32 scaley, l_int32 order ); +LEPT_DLL extern void l_setNeutralBoostVal ( l_int32 val ); +LEPT_DLL extern PIX * pixConnCompTransform ( PIX *pixs, l_int32 connect, l_int32 depth ); +LEPT_DLL extern PIX * pixConnCompAreaTransform ( PIX *pixs, l_int32 connect ); +LEPT_DLL extern l_int32 pixConnCompIncrInit ( PIX *pixs, l_int32 conn, PIX **ppixd, PTAA **pptaa, l_int32 *pncc ); +LEPT_DLL extern l_int32 pixConnCompIncrAdd ( PIX *pixs, PTAA *ptaa, l_int32 *pncc, l_float32 x, l_float32 y, l_int32 debug ); +LEPT_DLL extern l_int32 pixGetSortedNeighborValues ( PIX *pixs, l_int32 x, l_int32 y, l_int32 conn, l_int32 **pneigh, l_int32 *pnvals ); +LEPT_DLL extern PIX * pixLocToColorTransform ( PIX *pixs ); +LEPT_DLL extern PIXTILING * pixTilingCreate ( PIX *pixs, l_int32 nx, l_int32 ny, l_int32 w, l_int32 h, l_int32 xoverlap, l_int32 yoverlap ); +LEPT_DLL extern void pixTilingDestroy ( PIXTILING **ppt ); +LEPT_DLL extern l_int32 pixTilingGetCount ( PIXTILING *pt, l_int32 *pnx, l_int32 *pny ); +LEPT_DLL extern l_int32 pixTilingGetSize ( PIXTILING *pt, l_int32 *pw, l_int32 *ph ); +LEPT_DLL extern PIX * pixTilingGetTile ( PIXTILING *pt, l_int32 i, l_int32 j ); +LEPT_DLL extern l_int32 pixTilingNoStripOnPaint ( PIXTILING *pt ); +LEPT_DLL extern l_int32 pixTilingPaintTile ( PIX *pixd, l_int32 i, l_int32 j, PIX *pixs, PIXTILING *pt ); +LEPT_DLL extern PIX * pixReadStreamPng ( FILE *fp ); +LEPT_DLL extern l_int32 readHeaderPng ( const char *filename, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 freadHeaderPng ( FILE *fp, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 readHeaderMemPng ( const l_uint8 *data, size_t size, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 fgetPngResolution ( FILE *fp, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 isPngInterlaced ( const char *filename, l_int32 *pinterlaced ); +LEPT_DLL extern l_int32 fgetPngColormapInfo ( FILE *fp, PIXCMAP **pcmap, l_int32 *ptransparency ); +LEPT_DLL extern l_int32 pixWritePng ( const char *filename, PIX *pix, l_float32 gamma ); +LEPT_DLL extern l_int32 pixWriteStreamPng ( FILE *fp, PIX *pix, l_float32 gamma ); +LEPT_DLL extern l_int32 pixSetZlibCompression ( PIX *pix, l_int32 compval ); +LEPT_DLL extern void l_pngSetReadStrip16To8 ( l_int32 flag ); +LEPT_DLL extern PIX * pixReadMemPng ( const l_uint8 *filedata, size_t filesize ); +LEPT_DLL extern l_int32 pixWriteMemPng ( l_uint8 **pfiledata, size_t *pfilesize, PIX *pix, l_float32 gamma ); +LEPT_DLL extern PIX * pixReadStreamPnm ( FILE *fp ); +LEPT_DLL extern l_int32 readHeaderPnm ( const char *filename, l_int32 *pw, l_int32 *ph, l_int32 *pd, l_int32 *ptype, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 freadHeaderPnm ( FILE *fp, l_int32 *pw, l_int32 *ph, l_int32 *pd, l_int32 *ptype, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 pixWriteStreamPnm ( FILE *fp, PIX *pix ); +LEPT_DLL extern l_int32 pixWriteStreamAsciiPnm ( FILE *fp, PIX *pix ); +LEPT_DLL extern l_int32 pixWriteStreamPam ( FILE *fp, PIX *pix ); +LEPT_DLL extern PIX * pixReadMemPnm ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 readHeaderMemPnm ( const l_uint8 *data, size_t size, l_int32 *pw, l_int32 *ph, l_int32 *pd, l_int32 *ptype, l_int32 *pbps, l_int32 *pspp ); +LEPT_DLL extern l_int32 pixWriteMemPnm ( l_uint8 **pdata, size_t *psize, PIX *pix ); +LEPT_DLL extern l_int32 pixWriteMemPam ( l_uint8 **pdata, size_t *psize, PIX *pix ); +LEPT_DLL extern PIX * pixProjectiveSampledPta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixProjectiveSampled ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixProjectivePta ( PIX *pixs, PTA *ptad, PTA *ptas, l_int32 incolor ); +LEPT_DLL extern PIX * pixProjective ( PIX *pixs, l_float32 *vc, l_int32 incolor ); +LEPT_DLL extern PIX * pixProjectivePtaColor ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint32 colorval ); +LEPT_DLL extern PIX * pixProjectiveColor ( PIX *pixs, l_float32 *vc, l_uint32 colorval ); +LEPT_DLL extern PIX * pixProjectivePtaGray ( PIX *pixs, PTA *ptad, PTA *ptas, l_uint8 grayval ); +LEPT_DLL extern PIX * pixProjectiveGray ( PIX *pixs, l_float32 *vc, l_uint8 grayval ); +LEPT_DLL extern PIX * pixProjectivePtaWithAlpha ( PIX *pixs, PTA *ptad, PTA *ptas, PIX *pixg, l_float32 fract, l_int32 border ); +LEPT_DLL extern l_int32 getProjectiveXformCoeffs ( PTA *ptas, PTA *ptad, l_float32 **pvc ); +LEPT_DLL extern l_int32 projectiveXformSampledPt ( l_float32 *vc, l_int32 x, l_int32 y, l_int32 *pxp, l_int32 *pyp ); +LEPT_DLL extern l_int32 projectiveXformPt ( l_float32 *vc, l_int32 x, l_int32 y, l_float32 *pxp, l_float32 *pyp ); +LEPT_DLL extern l_int32 convertFilesToPS ( const char *dirin, const char *substr, l_int32 res, const char *fileout ); +LEPT_DLL extern l_int32 sarrayConvertFilesToPS ( SARRAY *sa, l_int32 res, const char *fileout ); +LEPT_DLL extern l_int32 convertFilesFittedToPS ( const char *dirin, const char *substr, l_float32 xpts, l_float32 ypts, const char *fileout ); +LEPT_DLL extern l_int32 sarrayConvertFilesFittedToPS ( SARRAY *sa, l_float32 xpts, l_float32 ypts, const char *fileout ); +LEPT_DLL extern l_int32 writeImageCompressedToPSFile ( const char *filein, const char *fileout, l_int32 res, l_int32 *pfirstfile, l_int32 *pindex ); +LEPT_DLL extern l_int32 convertSegmentedPagesToPS ( const char *pagedir, const char *pagestr, l_int32 page_numpre, const char *maskdir, const char *maskstr, l_int32 mask_numpre, l_int32 numpost, l_int32 maxnum, l_float32 textscale, l_float32 imagescale, l_int32 threshold, const char *fileout ); +LEPT_DLL extern l_int32 pixWriteSegmentedPageToPS ( PIX *pixs, PIX *pixm, l_float32 textscale, l_float32 imagescale, l_int32 threshold, l_int32 pageno, const char *fileout ); +LEPT_DLL extern l_int32 pixWriteMixedToPS ( PIX *pixb, PIX *pixc, l_float32 scale, l_int32 pageno, const char *fileout ); +LEPT_DLL extern l_int32 convertToPSEmbed ( const char *filein, const char *fileout, l_int32 level ); +LEPT_DLL extern l_int32 pixaWriteCompressedToPS ( PIXA *pixa, const char *fileout, l_int32 res, l_int32 level ); +LEPT_DLL extern l_int32 pixWritePSEmbed ( const char *filein, const char *fileout ); +LEPT_DLL extern l_int32 pixWriteStreamPS ( FILE *fp, PIX *pix, BOX *box, l_int32 res, l_float32 scale ); +LEPT_DLL extern char * pixWriteStringPS ( PIX *pixs, BOX *box, l_int32 res, l_float32 scale ); +LEPT_DLL extern char * generateUncompressedPS ( char *hexdata, l_int32 w, l_int32 h, l_int32 d, l_int32 psbpl, l_int32 bps, l_float32 xpt, l_float32 ypt, l_float32 wpt, l_float32 hpt, l_int32 boxflag ); +LEPT_DLL extern void getScaledParametersPS ( BOX *box, l_int32 wpix, l_int32 hpix, l_int32 res, l_float32 scale, l_float32 *pxpt, l_float32 *pypt, l_float32 *pwpt, l_float32 *phpt ); +LEPT_DLL extern void convertByteToHexAscii ( l_uint8 byteval, char *pnib1, char *pnib2 ); +LEPT_DLL extern l_int32 convertJpegToPSEmbed ( const char *filein, const char *fileout ); +LEPT_DLL extern l_int32 convertJpegToPS ( const char *filein, const char *fileout, const char *operation, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern l_int32 convertJpegToPSString ( const char *filein, char **poutstr, l_int32 *pnbytes, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern char * generateJpegPS ( const char *filein, L_COMP_DATA *cid, l_float32 xpt, l_float32 ypt, l_float32 wpt, l_float32 hpt, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern l_int32 convertG4ToPSEmbed ( const char *filein, const char *fileout ); +LEPT_DLL extern l_int32 convertG4ToPS ( const char *filein, const char *fileout, const char *operation, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 maskflag, l_int32 endpage ); +LEPT_DLL extern l_int32 convertG4ToPSString ( const char *filein, char **poutstr, l_int32 *pnbytes, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 maskflag, l_int32 endpage ); +LEPT_DLL extern char * generateG4PS ( const char *filein, L_COMP_DATA *cid, l_float32 xpt, l_float32 ypt, l_float32 wpt, l_float32 hpt, l_int32 maskflag, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern l_int32 convertTiffMultipageToPS ( const char *filein, const char *fileout, l_float32 fillfract ); +LEPT_DLL extern l_int32 convertFlateToPSEmbed ( const char *filein, const char *fileout ); +LEPT_DLL extern l_int32 convertFlateToPS ( const char *filein, const char *fileout, const char *operation, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern l_int32 convertFlateToPSString ( const char *filein, char **poutstr, l_int32 *pnbytes, l_int32 x, l_int32 y, l_int32 res, l_float32 scale, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern char * generateFlatePS ( const char *filein, L_COMP_DATA *cid, l_float32 xpt, l_float32 ypt, l_float32 wpt, l_float32 hpt, l_int32 pageno, l_int32 endpage ); +LEPT_DLL extern l_int32 pixWriteMemPS ( l_uint8 **pdata, size_t *psize, PIX *pix, BOX *box, l_int32 res, l_float32 scale ); +LEPT_DLL extern l_int32 getResLetterPage ( l_int32 w, l_int32 h, l_float32 fillfract ); +LEPT_DLL extern l_int32 getResA4Page ( l_int32 w, l_int32 h, l_float32 fillfract ); +LEPT_DLL extern void l_psWriteBoundingBox ( l_int32 flag ); +LEPT_DLL extern PTA * ptaCreate ( l_int32 n ); +LEPT_DLL extern PTA * ptaCreateFromNuma ( NUMA *nax, NUMA *nay ); +LEPT_DLL extern void ptaDestroy ( PTA **ppta ); +LEPT_DLL extern PTA * ptaCopy ( PTA *pta ); +LEPT_DLL extern PTA * ptaCopyRange ( PTA *ptas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern PTA * ptaClone ( PTA *pta ); +LEPT_DLL extern l_int32 ptaEmpty ( PTA *pta ); +LEPT_DLL extern l_int32 ptaAddPt ( PTA *pta, l_float32 x, l_float32 y ); +LEPT_DLL extern l_int32 ptaInsertPt ( PTA *pta, l_int32 index, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 ptaRemovePt ( PTA *pta, l_int32 index ); +LEPT_DLL extern l_int32 ptaGetRefcount ( PTA *pta ); +LEPT_DLL extern l_int32 ptaChangeRefcount ( PTA *pta, l_int32 delta ); +LEPT_DLL extern l_int32 ptaGetCount ( PTA *pta ); +LEPT_DLL extern l_int32 ptaGetPt ( PTA *pta, l_int32 index, l_float32 *px, l_float32 *py ); +LEPT_DLL extern l_int32 ptaGetIPt ( PTA *pta, l_int32 index, l_int32 *px, l_int32 *py ); +LEPT_DLL extern l_int32 ptaSetPt ( PTA *pta, l_int32 index, l_float32 x, l_float32 y ); +LEPT_DLL extern l_int32 ptaGetArrays ( PTA *pta, NUMA **pnax, NUMA **pnay ); +LEPT_DLL extern PTA * ptaRead ( const char *filename ); +LEPT_DLL extern PTA * ptaReadStream ( FILE *fp ); +LEPT_DLL extern PTA * ptaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 ptaWrite ( const char *filename, PTA *pta, l_int32 type ); +LEPT_DLL extern l_int32 ptaWriteStream ( FILE *fp, PTA *pta, l_int32 type ); +LEPT_DLL extern l_int32 ptaWriteMem ( l_uint8 **pdata, size_t *psize, PTA *pta, l_int32 type ); +LEPT_DLL extern PTAA * ptaaCreate ( l_int32 n ); +LEPT_DLL extern void ptaaDestroy ( PTAA **pptaa ); +LEPT_DLL extern l_int32 ptaaAddPta ( PTAA *ptaa, PTA *pta, l_int32 copyflag ); +LEPT_DLL extern l_int32 ptaaGetCount ( PTAA *ptaa ); +LEPT_DLL extern PTA * ptaaGetPta ( PTAA *ptaa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern l_int32 ptaaGetPt ( PTAA *ptaa, l_int32 ipta, l_int32 jpt, l_float32 *px, l_float32 *py ); +LEPT_DLL extern l_int32 ptaaInitFull ( PTAA *ptaa, PTA *pta ); +LEPT_DLL extern l_int32 ptaaReplacePta ( PTAA *ptaa, l_int32 index, PTA *pta ); +LEPT_DLL extern l_int32 ptaaAddPt ( PTAA *ptaa, l_int32 ipta, l_float32 x, l_float32 y ); +LEPT_DLL extern l_int32 ptaaTruncate ( PTAA *ptaa ); +LEPT_DLL extern PTAA * ptaaRead ( const char *filename ); +LEPT_DLL extern PTAA * ptaaReadStream ( FILE *fp ); +LEPT_DLL extern PTAA * ptaaReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 ptaaWrite ( const char *filename, PTAA *ptaa, l_int32 type ); +LEPT_DLL extern l_int32 ptaaWriteStream ( FILE *fp, PTAA *ptaa, l_int32 type ); +LEPT_DLL extern l_int32 ptaaWriteMem ( l_uint8 **pdata, size_t *psize, PTAA *ptaa, l_int32 type ); +LEPT_DLL extern PTA * ptaSubsample ( PTA *ptas, l_int32 subfactor ); +LEPT_DLL extern l_int32 ptaJoin ( PTA *ptad, PTA *ptas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern l_int32 ptaaJoin ( PTAA *ptaad, PTAA *ptaas, l_int32 istart, l_int32 iend ); +LEPT_DLL extern PTA * ptaReverse ( PTA *ptas, l_int32 type ); +LEPT_DLL extern PTA * ptaTranspose ( PTA *ptas ); +LEPT_DLL extern PTA * ptaCyclicPerm ( PTA *ptas, l_int32 xs, l_int32 ys ); +LEPT_DLL extern PTA * ptaSelectRange ( PTA *ptas, l_int32 first, l_int32 last ); +LEPT_DLL extern BOX * ptaGetBoundingRegion ( PTA *pta ); +LEPT_DLL extern l_int32 ptaGetRange ( PTA *pta, l_float32 *pminx, l_float32 *pmaxx, l_float32 *pminy, l_float32 *pmaxy ); +LEPT_DLL extern PTA * ptaGetInsideBox ( PTA *ptas, BOX *box ); +LEPT_DLL extern PTA * pixFindCornerPixels ( PIX *pixs ); +LEPT_DLL extern l_int32 ptaContainsPt ( PTA *pta, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 ptaTestIntersection ( PTA *pta1, PTA *pta2 ); +LEPT_DLL extern PTA * ptaTransform ( PTA *ptas, l_int32 shiftx, l_int32 shifty, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern l_int32 ptaPtInsidePolygon ( PTA *pta, l_float32 x, l_float32 y, l_int32 *pinside ); +LEPT_DLL extern l_float32 l_angleBetweenVectors ( l_float32 x1, l_float32 y1, l_float32 x2, l_float32 y2 ); +LEPT_DLL extern l_int32 ptaGetMinMax ( PTA *pta, l_float32 *pxmin, l_float32 *pymin, l_float32 *pxmax, l_float32 *pymax ); +LEPT_DLL extern PTA * ptaSelectByValue ( PTA *ptas, l_float32 xth, l_float32 yth, l_int32 type, l_int32 relation ); +LEPT_DLL extern PTA * ptaCropToMask ( PTA *ptas, PIX *pixm ); +LEPT_DLL extern l_int32 ptaGetLinearLSF ( PTA *pta, l_float32 *pa, l_float32 *pb, NUMA **pnafit ); +LEPT_DLL extern l_int32 ptaGetQuadraticLSF ( PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, NUMA **pnafit ); +LEPT_DLL extern l_int32 ptaGetCubicLSF ( PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pd, NUMA **pnafit ); +LEPT_DLL extern l_int32 ptaGetQuarticLSF ( PTA *pta, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pd, l_float32 *pe, NUMA **pnafit ); +LEPT_DLL extern l_int32 ptaNoisyLinearLSF ( PTA *pta, l_float32 factor, PTA **pptad, l_float32 *pa, l_float32 *pb, l_float32 *pmederr, NUMA **pnafit ); +LEPT_DLL extern l_int32 ptaNoisyQuadraticLSF ( PTA *pta, l_float32 factor, PTA **pptad, l_float32 *pa, l_float32 *pb, l_float32 *pc, l_float32 *pmederr, NUMA **pnafit ); +LEPT_DLL extern l_int32 applyLinearFit ( l_float32 a, l_float32 b, l_float32 x, l_float32 *py ); +LEPT_DLL extern l_int32 applyQuadraticFit ( l_float32 a, l_float32 b, l_float32 c, l_float32 x, l_float32 *py ); +LEPT_DLL extern l_int32 applyCubicFit ( l_float32 a, l_float32 b, l_float32 c, l_float32 d, l_float32 x, l_float32 *py ); +LEPT_DLL extern l_int32 applyQuarticFit ( l_float32 a, l_float32 b, l_float32 c, l_float32 d, l_float32 e, l_float32 x, l_float32 *py ); +LEPT_DLL extern l_int32 pixPlotAlongPta ( PIX *pixs, PTA *pta, l_int32 outformat, const char *title ); +LEPT_DLL extern PTA * ptaGetPixelsFromPix ( PIX *pixs, BOX *box ); +LEPT_DLL extern PIX * pixGenerateFromPta ( PTA *pta, l_int32 w, l_int32 h ); +LEPT_DLL extern PTA * ptaGetBoundaryPixels ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PTAA * ptaaGetBoundaryPixels ( PIX *pixs, l_int32 type, l_int32 connectivity, BOXA **pboxa, PIXA **ppixa ); +LEPT_DLL extern PTAA * ptaaIndexLabeledPixels ( PIX *pixs, l_int32 *pncc ); +LEPT_DLL extern PTA * ptaGetNeighborPixLocs ( PIX *pixs, l_int32 x, l_int32 y, l_int32 conn ); +LEPT_DLL extern PTA * numaConvertToPta1 ( NUMA *na ); +LEPT_DLL extern PTA * numaConvertToPta2 ( NUMA *nax, NUMA *nay ); +LEPT_DLL extern l_int32 ptaConvertToNuma ( PTA *pta, NUMA **pnax, NUMA **pnay ); +LEPT_DLL extern PIX * pixDisplayPta ( PIX *pixd, PIX *pixs, PTA *pta ); +LEPT_DLL extern PIX * pixDisplayPtaaPattern ( PIX *pixd, PIX *pixs, PTAA *ptaa, PIX *pixp, l_int32 cx, l_int32 cy ); +LEPT_DLL extern PIX * pixDisplayPtaPattern ( PIX *pixd, PIX *pixs, PTA *pta, PIX *pixp, l_int32 cx, l_int32 cy, l_uint32 color ); +LEPT_DLL extern PTA * ptaReplicatePattern ( PTA *ptas, PIX *pixp, PTA *ptap, l_int32 cx, l_int32 cy, l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * pixDisplayPtaa ( PIX *pixs, PTAA *ptaa ); +LEPT_DLL extern PTA * ptaSort ( PTA *ptas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex ); +LEPT_DLL extern l_int32 ptaGetSortIndex ( PTA *ptas, l_int32 sorttype, l_int32 sortorder, NUMA **pnaindex ); +LEPT_DLL extern PTA * ptaSortByIndex ( PTA *ptas, NUMA *naindex ); +LEPT_DLL extern PTAA * ptaaSortByIndex ( PTAA *ptaas, NUMA *naindex ); +LEPT_DLL extern l_int32 ptaGetRankValue ( PTA *pta, l_float32 fract, PTA *ptasort, l_int32 sorttype, l_float32 *pval ); +LEPT_DLL extern PTA * ptaUnionByAset ( PTA *pta1, PTA *pta2 ); +LEPT_DLL extern PTA * ptaRemoveDupsByAset ( PTA *ptas ); +LEPT_DLL extern PTA * ptaIntersectionByAset ( PTA *pta1, PTA *pta2 ); +LEPT_DLL extern L_ASET * l_asetCreateFromPta ( PTA *pta ); +LEPT_DLL extern PTA * ptaUnionByHash ( PTA *pta1, PTA *pta2 ); +LEPT_DLL extern l_int32 ptaRemoveDupsByHash ( PTA *ptas, PTA **pptad, L_DNAHASH **pdahash ); +LEPT_DLL extern PTA * ptaIntersectionByHash ( PTA *pta1, PTA *pta2 ); +LEPT_DLL extern l_int32 ptaFindPtByHash ( PTA *pta, L_DNAHASH *dahash, l_int32 x, l_int32 y, l_int32 *pindex ); +LEPT_DLL extern L_DNAHASH * l_dnaHashCreateFromPta ( PTA *pta ); +LEPT_DLL extern L_PTRA * ptraCreate ( l_int32 n ); +LEPT_DLL extern void ptraDestroy ( L_PTRA **ppa, l_int32 freeflag, l_int32 warnflag ); +LEPT_DLL extern l_int32 ptraAdd ( L_PTRA *pa, void *item ); +LEPT_DLL extern l_int32 ptraInsert ( L_PTRA *pa, l_int32 index, void *item, l_int32 shiftflag ); +LEPT_DLL extern void * ptraRemove ( L_PTRA *pa, l_int32 index, l_int32 flag ); +LEPT_DLL extern void * ptraRemoveLast ( L_PTRA *pa ); +LEPT_DLL extern void * ptraReplace ( L_PTRA *pa, l_int32 index, void *item, l_int32 freeflag ); +LEPT_DLL extern l_int32 ptraSwap ( L_PTRA *pa, l_int32 index1, l_int32 index2 ); +LEPT_DLL extern l_int32 ptraCompactArray ( L_PTRA *pa ); +LEPT_DLL extern l_int32 ptraReverse ( L_PTRA *pa ); +LEPT_DLL extern l_int32 ptraJoin ( L_PTRA *pa1, L_PTRA *pa2 ); +LEPT_DLL extern l_int32 ptraGetMaxIndex ( L_PTRA *pa, l_int32 *pmaxindex ); +LEPT_DLL extern l_int32 ptraGetActualCount ( L_PTRA *pa, l_int32 *pcount ); +LEPT_DLL extern void * ptraGetPtrToItem ( L_PTRA *pa, l_int32 index ); +LEPT_DLL extern L_PTRAA * ptraaCreate ( l_int32 n ); +LEPT_DLL extern void ptraaDestroy ( L_PTRAA **ppaa, l_int32 freeflag, l_int32 warnflag ); +LEPT_DLL extern l_int32 ptraaGetSize ( L_PTRAA *paa, l_int32 *psize ); +LEPT_DLL extern l_int32 ptraaInsertPtra ( L_PTRAA *paa, l_int32 index, L_PTRA *pa ); +LEPT_DLL extern L_PTRA * ptraaGetPtra ( L_PTRAA *paa, l_int32 index, l_int32 accessflag ); +LEPT_DLL extern L_PTRA * ptraaFlattenToPtra ( L_PTRAA *paa ); +LEPT_DLL extern l_int32 pixQuadtreeMean ( PIX *pixs, l_int32 nlevels, PIX *pix_ma, FPIXA **pfpixa ); +LEPT_DLL extern l_int32 pixQuadtreeVariance ( PIX *pixs, l_int32 nlevels, PIX *pix_ma, DPIX *dpix_msa, FPIXA **pfpixa_v, FPIXA **pfpixa_rv ); +LEPT_DLL extern l_int32 pixMeanInRectangle ( PIX *pixs, BOX *box, PIX *pixma, l_float32 *pval ); +LEPT_DLL extern l_int32 pixVarianceInRectangle ( PIX *pixs, BOX *box, PIX *pix_ma, DPIX *dpix_msa, l_float32 *pvar, l_float32 *prvar ); +LEPT_DLL extern BOXAA * boxaaQuadtreeRegions ( l_int32 w, l_int32 h, l_int32 nlevels ); +LEPT_DLL extern l_int32 quadtreeGetParent ( FPIXA *fpixa, l_int32 level, l_int32 x, l_int32 y, l_float32 *pval ); +LEPT_DLL extern l_int32 quadtreeGetChildren ( FPIXA *fpixa, l_int32 level, l_int32 x, l_int32 y, l_float32 *pval00, l_float32 *pval10, l_float32 *pval01, l_float32 *pval11 ); +LEPT_DLL extern l_int32 quadtreeMaxLevels ( l_int32 w, l_int32 h ); +LEPT_DLL extern PIX * fpixaDisplayQuadtree ( FPIXA *fpixa, l_int32 factor, l_int32 fontsize ); +LEPT_DLL extern L_QUEUE * lqueueCreate ( l_int32 nalloc ); +LEPT_DLL extern void lqueueDestroy ( L_QUEUE **plq, l_int32 freeflag ); +LEPT_DLL extern l_int32 lqueueAdd ( L_QUEUE *lq, void *item ); +LEPT_DLL extern void * lqueueRemove ( L_QUEUE *lq ); +LEPT_DLL extern l_int32 lqueueGetCount ( L_QUEUE *lq ); +LEPT_DLL extern l_int32 lqueuePrint ( FILE *fp, L_QUEUE *lq ); +LEPT_DLL extern PIX * pixRankFilter ( PIX *pixs, l_int32 wf, l_int32 hf, l_float32 rank ); +LEPT_DLL extern PIX * pixRankFilterRGB ( PIX *pixs, l_int32 wf, l_int32 hf, l_float32 rank ); +LEPT_DLL extern PIX * pixRankFilterGray ( PIX *pixs, l_int32 wf, l_int32 hf, l_float32 rank ); +LEPT_DLL extern PIX * pixMedianFilter ( PIX *pixs, l_int32 wf, l_int32 hf ); +LEPT_DLL extern PIX * pixRankFilterWithScaling ( PIX *pixs, l_int32 wf, l_int32 hf, l_float32 rank, l_float32 scalefactor ); +LEPT_DLL extern L_RBTREE * l_rbtreeCreate ( l_int32 keytype ); +LEPT_DLL extern RB_TYPE * l_rbtreeLookup ( L_RBTREE *t, RB_TYPE key ); +LEPT_DLL extern void l_rbtreeInsert ( L_RBTREE *t, RB_TYPE key, RB_TYPE value ); +LEPT_DLL extern void l_rbtreeDelete ( L_RBTREE *t, RB_TYPE key ); +LEPT_DLL extern void l_rbtreeDestroy ( L_RBTREE **pt ); +LEPT_DLL extern L_RBTREE_NODE * l_rbtreeGetFirst ( L_RBTREE *t ); +LEPT_DLL extern L_RBTREE_NODE * l_rbtreeGetNext ( L_RBTREE_NODE *n ); +LEPT_DLL extern L_RBTREE_NODE * l_rbtreeGetLast ( L_RBTREE *t ); +LEPT_DLL extern L_RBTREE_NODE * l_rbtreeGetPrev ( L_RBTREE_NODE *n ); +LEPT_DLL extern l_int32 l_rbtreeGetCount ( L_RBTREE *t ); +LEPT_DLL extern void l_rbtreePrint ( FILE *fp, L_RBTREE *t ); +LEPT_DLL extern SARRAY * pixProcessBarcodes ( PIX *pixs, l_int32 format, l_int32 method, SARRAY **psaw, l_int32 debugflag ); +LEPT_DLL extern PIXA * pixExtractBarcodes ( PIX *pixs, l_int32 debugflag ); +LEPT_DLL extern SARRAY * pixReadBarcodes ( PIXA *pixa, l_int32 format, l_int32 method, SARRAY **psaw, l_int32 debugflag ); +LEPT_DLL extern NUMA * pixReadBarcodeWidths ( PIX *pixs, l_int32 method, l_int32 debugflag ); +LEPT_DLL extern BOXA * pixLocateBarcodes ( PIX *pixs, l_int32 thresh, PIX **ppixb, PIX **ppixm ); +LEPT_DLL extern PIX * pixDeskewBarcode ( PIX *pixs, PIX *pixb, BOX *box, l_int32 margin, l_int32 threshold, l_float32 *pangle, l_float32 *pconf ); +LEPT_DLL extern NUMA * pixExtractBarcodeWidths1 ( PIX *pixs, l_float32 thresh, l_float32 binfract, NUMA **pnaehist, NUMA **pnaohist, l_int32 debugflag ); +LEPT_DLL extern NUMA * pixExtractBarcodeWidths2 ( PIX *pixs, l_float32 thresh, l_float32 *pwidth, NUMA **pnac, l_int32 debugflag ); +LEPT_DLL extern NUMA * pixExtractBarcodeCrossings ( PIX *pixs, l_float32 thresh, l_int32 debugflag ); +LEPT_DLL extern NUMA * numaQuantizeCrossingsByWidth ( NUMA *nas, l_float32 binfract, NUMA **pnaehist, NUMA **pnaohist, l_int32 debugflag ); +LEPT_DLL extern NUMA * numaQuantizeCrossingsByWindow ( NUMA *nas, l_float32 ratio, l_float32 *pwidth, l_float32 *pfirstloc, NUMA **pnac, l_int32 debugflag ); +LEPT_DLL extern PIXA * pixaReadFiles ( const char *dirname, const char *substr ); +LEPT_DLL extern PIXA * pixaReadFilesSA ( SARRAY *sa ); +LEPT_DLL extern PIX * pixRead ( const char *filename ); +LEPT_DLL extern PIX * pixReadWithHint ( const char *filename, l_int32 hint ); +LEPT_DLL extern PIX * pixReadIndexed ( SARRAY *sa, l_int32 index ); +LEPT_DLL extern PIX * pixReadStream ( FILE *fp, l_int32 hint ); +LEPT_DLL extern l_int32 pixReadHeader ( const char *filename, l_int32 *pformat, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 findFileFormat ( const char *filename, l_int32 *pformat ); +LEPT_DLL extern l_int32 findFileFormatStream ( FILE *fp, l_int32 *pformat ); +LEPT_DLL extern l_int32 findFileFormatBuffer ( const l_uint8 *buf, l_int32 *pformat ); +LEPT_DLL extern l_int32 fileFormatIsTiff ( FILE *fp ); +LEPT_DLL extern PIX * pixReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixReadHeaderMem ( const l_uint8 *data, size_t size, l_int32 *pformat, l_int32 *pw, l_int32 *ph, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 writeImageFileInfo ( const char *filename, FILE *fpout, l_int32 headeronly ); +LEPT_DLL extern l_int32 ioFormatTest ( const char *filename ); +LEPT_DLL extern L_RECOG * recogCreateFromRecog ( L_RECOG *recs, l_int32 scalew, l_int32 scaleh, l_int32 linew, l_int32 threshold, l_int32 maxyshift ); +LEPT_DLL extern L_RECOG * recogCreateFromPixa ( PIXA *pixa, l_int32 scalew, l_int32 scaleh, l_int32 linew, l_int32 threshold, l_int32 maxyshift ); +LEPT_DLL extern L_RECOG * recogCreateFromPixaNoFinish ( PIXA *pixa, l_int32 scalew, l_int32 scaleh, l_int32 linew, l_int32 threshold, l_int32 maxyshift ); +LEPT_DLL extern L_RECOG * recogCreate ( l_int32 scalew, l_int32 scaleh, l_int32 linew, l_int32 threshold, l_int32 maxyshift ); +LEPT_DLL extern void recogDestroy ( L_RECOG **precog ); +LEPT_DLL extern l_int32 recogGetCount ( L_RECOG *recog ); +LEPT_DLL extern l_int32 recogSetParams ( L_RECOG *recog, l_int32 type, l_int32 min_nopad, l_float32 max_wh_ratio, l_float32 max_ht_ratio ); +LEPT_DLL extern l_int32 recogGetClassIndex ( L_RECOG *recog, l_int32 val, char *text, l_int32 *pindex ); +LEPT_DLL extern l_int32 recogStringToIndex ( L_RECOG *recog, char *text, l_int32 *pindex ); +LEPT_DLL extern l_int32 recogGetClassString ( L_RECOG *recog, l_int32 index, char **pcharstr ); +LEPT_DLL extern l_int32 l_convertCharstrToInt ( const char *str, l_int32 *pval ); +LEPT_DLL extern L_RECOG * recogRead ( const char *filename ); +LEPT_DLL extern L_RECOG * recogReadStream ( FILE *fp ); +LEPT_DLL extern L_RECOG * recogReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 recogWrite ( const char *filename, L_RECOG *recog ); +LEPT_DLL extern l_int32 recogWriteStream ( FILE *fp, L_RECOG *recog ); +LEPT_DLL extern l_int32 recogWriteMem ( l_uint8 **pdata, size_t *psize, L_RECOG *recog ); +LEPT_DLL extern PIXA * recogExtractPixa ( L_RECOG *recog ); +LEPT_DLL extern BOXA * recogDecode ( L_RECOG *recog, PIX *pixs, l_int32 nlevels, PIX **ppixdb ); +LEPT_DLL extern l_int32 recogCreateDid ( L_RECOG *recog, PIX *pixs ); +LEPT_DLL extern l_int32 recogDestroyDid ( L_RECOG *recog ); +LEPT_DLL extern l_int32 recogDidExists ( L_RECOG *recog ); +LEPT_DLL extern L_RDID * recogGetDid ( L_RECOG *recog ); +LEPT_DLL extern l_int32 recogSetChannelParams ( L_RECOG *recog, l_int32 nlevels ); +LEPT_DLL extern l_int32 recogIdentifyMultiple ( L_RECOG *recog, PIX *pixs, l_int32 minh, l_int32 skipsplit, BOXA **pboxa, PIXA **ppixa, PIX **ppixdb, l_int32 debugsplit ); +LEPT_DLL extern l_int32 recogSplitIntoCharacters ( L_RECOG *recog, PIX *pixs, l_int32 minh, l_int32 skipsplit, BOXA **pboxa, PIXA **ppixa, l_int32 debug ); +LEPT_DLL extern l_int32 recogCorrelationBestRow ( L_RECOG *recog, PIX *pixs, BOXA **pboxa, NUMA **pnascore, NUMA **pnaindex, SARRAY **psachar, l_int32 debug ); +LEPT_DLL extern l_int32 recogCorrelationBestChar ( L_RECOG *recog, PIX *pixs, BOX **pbox, l_float32 *pscore, l_int32 *pindex, char **pcharstr, PIX **ppixdb ); +LEPT_DLL extern l_int32 recogIdentifyPixa ( L_RECOG *recog, PIXA *pixa, PIX **ppixdb ); +LEPT_DLL extern l_int32 recogIdentifyPix ( L_RECOG *recog, PIX *pixs, PIX **ppixdb ); +LEPT_DLL extern l_int32 recogSkipIdentify ( L_RECOG *recog ); +LEPT_DLL extern void rchaDestroy ( L_RCHA **prcha ); +LEPT_DLL extern void rchDestroy ( L_RCH **prch ); +LEPT_DLL extern l_int32 rchaExtract ( L_RCHA *rcha, NUMA **pnaindex, NUMA **pnascore, SARRAY **psatext, NUMA **pnasample, NUMA **pnaxloc, NUMA **pnayloc, NUMA **pnawidth ); +LEPT_DLL extern l_int32 rchExtract ( L_RCH *rch, l_int32 *pindex, l_float32 *pscore, char **ptext, l_int32 *psample, l_int32 *pxloc, l_int32 *pyloc, l_int32 *pwidth ); +LEPT_DLL extern PIX * recogProcessToIdentify ( L_RECOG *recog, PIX *pixs, l_int32 pad ); +LEPT_DLL extern SARRAY * recogExtractNumbers ( L_RECOG *recog, BOXA *boxas, l_float32 scorethresh, l_int32 spacethresh, BOXAA **pbaa, NUMAA **pnaa ); +LEPT_DLL extern PIXA * showExtractNumbers ( PIX *pixs, SARRAY *sa, BOXAA *baa, NUMAA *naa, PIX **ppixdb ); +LEPT_DLL extern l_int32 recogTrainLabeled ( L_RECOG *recog, PIX *pixs, BOX *box, char *text, l_int32 debug ); +LEPT_DLL extern l_int32 recogProcessLabeled ( L_RECOG *recog, PIX *pixs, BOX *box, char *text, PIX **ppix ); +LEPT_DLL extern l_int32 recogAddSample ( L_RECOG *recog, PIX *pix, l_int32 debug ); +LEPT_DLL extern PIX * recogModifyTemplate ( L_RECOG *recog, PIX *pixs ); +LEPT_DLL extern l_int32 recogAverageSamples ( L_RECOG **precog, l_int32 debug ); +LEPT_DLL extern l_int32 pixaAccumulateSamples ( PIXA *pixa, PTA *pta, PIX **ppixd, l_float32 *px, l_float32 *py ); +LEPT_DLL extern l_int32 recogTrainingFinished ( L_RECOG **precog, l_int32 modifyflag, l_int32 minsize, l_float32 minfract ); +LEPT_DLL extern PIXA * recogFilterPixaBySize ( PIXA *pixas, l_int32 setsize, l_int32 maxkeep, l_float32 max_ht_ratio, NUMA **pna ); +LEPT_DLL extern PIXAA * recogSortPixaByClass ( PIXA *pixa, l_int32 setsize ); +LEPT_DLL extern l_int32 recogRemoveOutliers1 ( L_RECOG **precog, l_float32 minscore, l_int32 mintarget, l_int32 minsize, PIX **ppixsave, PIX **ppixrem ); +LEPT_DLL extern PIXA * pixaRemoveOutliers1 ( PIXA *pixas, l_float32 minscore, l_int32 mintarget, l_int32 minsize, PIX **ppixsave, PIX **ppixrem ); +LEPT_DLL extern l_int32 recogRemoveOutliers2 ( L_RECOG **precog, l_float32 minscore, l_int32 minsize, PIX **ppixsave, PIX **ppixrem ); +LEPT_DLL extern PIXA * pixaRemoveOutliers2 ( PIXA *pixas, l_float32 minscore, l_int32 minsize, PIX **ppixsave, PIX **ppixrem ); +LEPT_DLL extern PIXA * recogTrainFromBoot ( L_RECOG *recogboot, PIXA *pixas, l_float32 minscore, l_int32 threshold, l_int32 debug ); +LEPT_DLL extern l_int32 recogPadDigitTrainingSet ( L_RECOG **precog, l_int32 scaleh, l_int32 linew ); +LEPT_DLL extern l_int32 recogIsPaddingNeeded ( L_RECOG *recog, SARRAY **psa ); +LEPT_DLL extern PIXA * recogAddDigitPadTemplates ( L_RECOG *recog, SARRAY *sa ); +LEPT_DLL extern L_RECOG * recogMakeBootDigitRecog ( l_int32 scaleh, l_int32 linew, l_int32 maxyshift, l_int32 debug ); +LEPT_DLL extern PIXA * recogMakeBootDigitTemplates ( l_int32 debug ); +LEPT_DLL extern l_int32 recogShowContent ( FILE *fp, L_RECOG *recog, l_int32 index, l_int32 display ); +LEPT_DLL extern l_int32 recogDebugAverages ( L_RECOG **precog, l_int32 debug ); +LEPT_DLL extern l_int32 recogShowAverageTemplates ( L_RECOG *recog ); +LEPT_DLL extern l_int32 recogShowMatchesInRange ( L_RECOG *recog, PIXA *pixa, l_float32 minscore, l_float32 maxscore, l_int32 display ); +LEPT_DLL extern PIX * recogShowMatch ( L_RECOG *recog, PIX *pix1, PIX *pix2, BOX *box, l_int32 index, l_float32 score ); +LEPT_DLL extern l_int32 regTestSetup ( l_int32 argc, char **argv, L_REGPARAMS **prp ); +LEPT_DLL extern l_int32 regTestCleanup ( L_REGPARAMS *rp ); +LEPT_DLL extern l_int32 regTestCompareValues ( L_REGPARAMS *rp, l_float32 val1, l_float32 val2, l_float32 delta ); +LEPT_DLL extern l_int32 regTestCompareStrings ( L_REGPARAMS *rp, l_uint8 *string1, size_t bytes1, l_uint8 *string2, size_t bytes2 ); +LEPT_DLL extern l_int32 regTestComparePix ( L_REGPARAMS *rp, PIX *pix1, PIX *pix2 ); +LEPT_DLL extern l_int32 regTestCompareSimilarPix ( L_REGPARAMS *rp, PIX *pix1, PIX *pix2, l_int32 mindiff, l_float32 maxfract, l_int32 printstats ); +LEPT_DLL extern l_int32 regTestCheckFile ( L_REGPARAMS *rp, const char *localname ); +LEPT_DLL extern l_int32 regTestCompareFiles ( L_REGPARAMS *rp, l_int32 index1, l_int32 index2 ); +LEPT_DLL extern l_int32 regTestWritePixAndCheck ( L_REGPARAMS *rp, PIX *pix, l_int32 format ); +LEPT_DLL extern char * regTestGenLocalFilename ( L_REGPARAMS *rp, l_int32 index, l_int32 format ); +LEPT_DLL extern l_int32 pixRasterop ( PIX *pixd, l_int32 dx, l_int32 dy, l_int32 dw, l_int32 dh, l_int32 op, PIX *pixs, l_int32 sx, l_int32 sy ); +LEPT_DLL extern l_int32 pixRasteropVip ( PIX *pixd, l_int32 bx, l_int32 bw, l_int32 vshift, l_int32 incolor ); +LEPT_DLL extern l_int32 pixRasteropHip ( PIX *pixd, l_int32 by, l_int32 bh, l_int32 hshift, l_int32 incolor ); +LEPT_DLL extern PIX * pixTranslate ( PIX *pixd, PIX *pixs, l_int32 hshift, l_int32 vshift, l_int32 incolor ); +LEPT_DLL extern l_int32 pixRasteropIP ( PIX *pixd, l_int32 hshift, l_int32 vshift, l_int32 incolor ); +LEPT_DLL extern l_int32 pixRasteropFullImage ( PIX *pixd, PIX *pixs, l_int32 op ); +LEPT_DLL extern void rasteropVipLow ( l_uint32 *data, l_int32 pixw, l_int32 pixh, l_int32 depth, l_int32 wpl, l_int32 x, l_int32 w, l_int32 shift ); +LEPT_DLL extern void rasteropHipLow ( l_uint32 *data, l_int32 pixh, l_int32 depth, l_int32 wpl, l_int32 y, l_int32 h, l_int32 shift ); +LEPT_DLL extern void shiftDataHorizontalLow ( l_uint32 *datad, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 shift ); +LEPT_DLL extern void rasteropUniLow ( l_uint32 *datad, l_int32 dpixw, l_int32 dpixh, l_int32 depth, l_int32 dwpl, l_int32 dx, l_int32 dy, l_int32 dw, l_int32 dh, l_int32 op ); +LEPT_DLL extern void rasteropLow ( l_uint32 *datad, l_int32 dpixw, l_int32 dpixh, l_int32 depth, l_int32 dwpl, l_int32 dx, l_int32 dy, l_int32 dw, l_int32 dh, l_int32 op, l_uint32 *datas, l_int32 spixw, l_int32 spixh, l_int32 swpl, l_int32 sx, l_int32 sy ); +LEPT_DLL extern PIX * pixRotate ( PIX *pixs, l_float32 angle, l_int32 type, l_int32 incolor, l_int32 width, l_int32 height ); +LEPT_DLL extern PIX * pixEmbedForRotation ( PIX *pixs, l_float32 angle, l_int32 incolor, l_int32 width, l_int32 height ); +LEPT_DLL extern PIX * pixRotateBySampling ( PIX *pixs, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotateBinaryNice ( PIX *pixs, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotateWithAlpha ( PIX *pixs, l_float32 angle, PIX *pixg, l_float32 fract ); +LEPT_DLL extern PIX * pixRotateAM ( PIX *pixs, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotateAMColor ( PIX *pixs, l_float32 angle, l_uint32 colorval ); +LEPT_DLL extern PIX * pixRotateAMGray ( PIX *pixs, l_float32 angle, l_uint8 grayval ); +LEPT_DLL extern PIX * pixRotateAMCorner ( PIX *pixs, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotateAMColorCorner ( PIX *pixs, l_float32 angle, l_uint32 fillval ); +LEPT_DLL extern PIX * pixRotateAMGrayCorner ( PIX *pixs, l_float32 angle, l_uint8 grayval ); +LEPT_DLL extern PIX * pixRotateAMColorFast ( PIX *pixs, l_float32 angle, l_uint32 colorval ); +LEPT_DLL extern void rotateAMColorLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 angle, l_uint32 colorval ); +LEPT_DLL extern void rotateAMGrayLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 angle, l_uint8 grayval ); +LEPT_DLL extern void rotateAMColorCornerLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 angle, l_uint32 colorval ); +LEPT_DLL extern void rotateAMGrayCornerLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 angle, l_uint8 grayval ); +LEPT_DLL extern void rotateAMColorFastLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 angle, l_uint32 colorval ); +LEPT_DLL extern PIX * pixRotateOrth ( PIX *pixs, l_int32 quads ); +LEPT_DLL extern PIX * pixRotate180 ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixRotate90 ( PIX *pixs, l_int32 direction ); +LEPT_DLL extern PIX * pixFlipLR ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixFlipTB ( PIX *pixd, PIX *pixs ); +LEPT_DLL extern PIX * pixRotateShear ( PIX *pixs, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotate2Shear ( PIX *pixs, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotate3Shear ( PIX *pixs, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern l_int32 pixRotateShearIP ( PIX *pixs, l_int32 xcen, l_int32 ycen, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixRotateShearCenter ( PIX *pixs, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern l_int32 pixRotateShearCenterIP ( PIX *pixs, l_float32 angle, l_int32 incolor ); +LEPT_DLL extern PIX * pixStrokeWidthTransform ( PIX *pixs, l_int32 color, l_int32 depth, l_int32 nangles ); +LEPT_DLL extern PIX * pixRunlengthTransform ( PIX *pixs, l_int32 color, l_int32 direction, l_int32 depth ); +LEPT_DLL extern l_int32 pixFindHorizontalRuns ( PIX *pix, l_int32 y, l_int32 *xstart, l_int32 *xend, l_int32 *pn ); +LEPT_DLL extern l_int32 pixFindVerticalRuns ( PIX *pix, l_int32 x, l_int32 *ystart, l_int32 *yend, l_int32 *pn ); +LEPT_DLL extern NUMA * pixFindMaxRuns ( PIX *pix, l_int32 direction, NUMA **pnastart ); +LEPT_DLL extern l_int32 pixFindMaxHorizontalRunOnLine ( PIX *pix, l_int32 y, l_int32 *pxstart, l_int32 *psize ); +LEPT_DLL extern l_int32 pixFindMaxVerticalRunOnLine ( PIX *pix, l_int32 x, l_int32 *pystart, l_int32 *psize ); +LEPT_DLL extern l_int32 runlengthMembershipOnLine ( l_int32 *buffer, l_int32 size, l_int32 depth, l_int32 *start, l_int32 *end, l_int32 n ); +LEPT_DLL extern l_int32 * makeMSBitLocTab ( l_int32 bitval ); +LEPT_DLL extern SARRAY * sarrayCreate ( l_int32 n ); +LEPT_DLL extern SARRAY * sarrayCreateInitialized ( l_int32 n, char *initstr ); +LEPT_DLL extern SARRAY * sarrayCreateWordsFromString ( const char *string ); +LEPT_DLL extern SARRAY * sarrayCreateLinesFromString ( const char *string, l_int32 blankflag ); +LEPT_DLL extern void sarrayDestroy ( SARRAY **psa ); +LEPT_DLL extern SARRAY * sarrayCopy ( SARRAY *sa ); +LEPT_DLL extern SARRAY * sarrayClone ( SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayAddString ( SARRAY *sa, char *string, l_int32 copyflag ); +LEPT_DLL extern char * sarrayRemoveString ( SARRAY *sa, l_int32 index ); +LEPT_DLL extern l_int32 sarrayReplaceString ( SARRAY *sa, l_int32 index, char *newstr, l_int32 copyflag ); +LEPT_DLL extern l_int32 sarrayClear ( SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayGetCount ( SARRAY *sa ); +LEPT_DLL extern char ** sarrayGetArray ( SARRAY *sa, l_int32 *pnalloc, l_int32 *pn ); +LEPT_DLL extern char * sarrayGetString ( SARRAY *sa, l_int32 index, l_int32 copyflag ); +LEPT_DLL extern l_int32 sarrayGetRefcount ( SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayChangeRefcount ( SARRAY *sa, l_int32 delta ); +LEPT_DLL extern char * sarrayToString ( SARRAY *sa, l_int32 addnlflag ); +LEPT_DLL extern char * sarrayToStringRange ( SARRAY *sa, l_int32 first, l_int32 nstrings, l_int32 addnlflag ); +LEPT_DLL extern l_int32 sarrayJoin ( SARRAY *sa1, SARRAY *sa2 ); +LEPT_DLL extern l_int32 sarrayAppendRange ( SARRAY *sa1, SARRAY *sa2, l_int32 start, l_int32 end ); +LEPT_DLL extern l_int32 sarrayPadToSameSize ( SARRAY *sa1, SARRAY *sa2, char *padstring ); +LEPT_DLL extern SARRAY * sarrayConvertWordsToLines ( SARRAY *sa, l_int32 linesize ); +LEPT_DLL extern l_int32 sarraySplitString ( SARRAY *sa, const char *str, const char *separators ); +LEPT_DLL extern SARRAY * sarraySelectBySubstring ( SARRAY *sain, const char *substr ); +LEPT_DLL extern SARRAY * sarraySelectByRange ( SARRAY *sain, l_int32 first, l_int32 last ); +LEPT_DLL extern l_int32 sarrayParseRange ( SARRAY *sa, l_int32 start, l_int32 *pactualstart, l_int32 *pend, l_int32 *pnewstart, const char *substr, l_int32 loc ); +LEPT_DLL extern SARRAY * sarrayRead ( const char *filename ); +LEPT_DLL extern SARRAY * sarrayReadStream ( FILE *fp ); +LEPT_DLL extern SARRAY * sarrayReadMem ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 sarrayWrite ( const char *filename, SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayWriteStream ( FILE *fp, SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayWriteMem ( l_uint8 **pdata, size_t *psize, SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayAppend ( const char *filename, SARRAY *sa ); +LEPT_DLL extern SARRAY * getNumberedPathnamesInDirectory ( const char *dirname, const char *substr, l_int32 numpre, l_int32 numpost, l_int32 maxnum ); +LEPT_DLL extern SARRAY * getSortedPathnamesInDirectory ( const char *dirname, const char *substr, l_int32 first, l_int32 nfiles ); +LEPT_DLL extern SARRAY * convertSortedToNumberedPathnames ( SARRAY *sa, l_int32 numpre, l_int32 numpost, l_int32 maxnum ); +LEPT_DLL extern SARRAY * getFilenamesInDirectory ( const char *dirname ); +LEPT_DLL extern SARRAY * sarraySort ( SARRAY *saout, SARRAY *sain, l_int32 sortorder ); +LEPT_DLL extern SARRAY * sarraySortByIndex ( SARRAY *sain, NUMA *naindex ); +LEPT_DLL extern l_int32 stringCompareLexical ( const char *str1, const char *str2 ); +LEPT_DLL extern SARRAY * sarrayUnionByAset ( SARRAY *sa1, SARRAY *sa2 ); +LEPT_DLL extern SARRAY * sarrayRemoveDupsByAset ( SARRAY *sas ); +LEPT_DLL extern SARRAY * sarrayIntersectionByAset ( SARRAY *sa1, SARRAY *sa2 ); +LEPT_DLL extern L_ASET * l_asetCreateFromSarray ( SARRAY *sa ); +LEPT_DLL extern l_int32 sarrayRemoveDupsByHash ( SARRAY *sas, SARRAY **psad, L_DNAHASH **pdahash ); +LEPT_DLL extern SARRAY * sarrayIntersectionByHash ( SARRAY *sa1, SARRAY *sa2 ); +LEPT_DLL extern l_int32 sarrayFindStringByHash ( SARRAY *sa, L_DNAHASH *dahash, const char *str, l_int32 *pindex ); +LEPT_DLL extern L_DNAHASH * l_dnaHashCreateFromSarray ( SARRAY *sa ); +LEPT_DLL extern SARRAY * sarrayGenerateIntegers ( l_int32 n ); +LEPT_DLL extern PIX * pixScale ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleToSizeRel ( PIX *pixs, l_int32 delw, l_int32 delh ); +LEPT_DLL extern PIX * pixScaleToSize ( PIX *pixs, l_int32 wd, l_int32 hd ); +LEPT_DLL extern PIX * pixScaleGeneral ( PIX *pixs, l_float32 scalex, l_float32 scaley, l_float32 sharpfract, l_int32 sharpwidth ); +LEPT_DLL extern PIX * pixScaleLI ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleColorLI ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleColor2xLI ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleColor4xLI ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleGrayLI ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleGray2xLI ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleGray4xLI ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleBySampling ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleBySamplingToSize ( PIX *pixs, l_int32 wd, l_int32 hd ); +LEPT_DLL extern PIX * pixScaleByIntSampling ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern PIX * pixScaleRGBToGrayFast ( PIX *pixs, l_int32 factor, l_int32 color ); +LEPT_DLL extern PIX * pixScaleRGBToBinaryFast ( PIX *pixs, l_int32 factor, l_int32 thresh ); +LEPT_DLL extern PIX * pixScaleGrayToBinaryFast ( PIX *pixs, l_int32 factor, l_int32 thresh ); +LEPT_DLL extern PIX * pixScaleSmooth ( PIX *pix, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleRGBToGray2 ( PIX *pixs, l_float32 rwt, l_float32 gwt, l_float32 bwt ); +LEPT_DLL extern PIX * pixScaleAreaMap ( PIX *pix, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleAreaMap2 ( PIX *pix ); +LEPT_DLL extern PIX * pixScaleBinary ( PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleToGray ( PIX *pixs, l_float32 scalefactor ); +LEPT_DLL extern PIX * pixScaleToGrayFast ( PIX *pixs, l_float32 scalefactor ); +LEPT_DLL extern PIX * pixScaleToGray2 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGray3 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGray4 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGray6 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGray8 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGray16 ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleToGrayMipmap ( PIX *pixs, l_float32 scalefactor ); +LEPT_DLL extern PIX * pixScaleMipmap ( PIX *pixs1, PIX *pixs2, l_float32 scale ); +LEPT_DLL extern PIX * pixExpandReplicate ( PIX *pixs, l_int32 factor ); +LEPT_DLL extern PIX * pixScaleGray2xLIThresh ( PIX *pixs, l_int32 thresh ); +LEPT_DLL extern PIX * pixScaleGray2xLIDither ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleGray4xLIThresh ( PIX *pixs, l_int32 thresh ); +LEPT_DLL extern PIX * pixScaleGray4xLIDither ( PIX *pixs ); +LEPT_DLL extern PIX * pixScaleGrayMinMax ( PIX *pixs, l_int32 xfact, l_int32 yfact, l_int32 type ); +LEPT_DLL extern PIX * pixScaleGrayMinMax2 ( PIX *pixs, l_int32 type ); +LEPT_DLL extern PIX * pixScaleGrayRankCascade ( PIX *pixs, l_int32 level1, l_int32 level2, l_int32 level3, l_int32 level4 ); +LEPT_DLL extern PIX * pixScaleGrayRank2 ( PIX *pixs, l_int32 rank ); +LEPT_DLL extern l_int32 pixScaleAndTransferAlpha ( PIX *pixd, PIX *pixs, l_float32 scalex, l_float32 scaley ); +LEPT_DLL extern PIX * pixScaleWithAlpha ( PIX *pixs, l_float32 scalex, l_float32 scaley, PIX *pixg, l_float32 fract ); +LEPT_DLL extern void scaleColorLILow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleGrayLILow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleColor2xLILow ( l_uint32 *datad, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleColor2xLILineLow ( l_uint32 *lined, l_int32 wpld, l_uint32 *lines, l_int32 ws, l_int32 wpls, l_int32 lastlineflag ); +LEPT_DLL extern void scaleGray2xLILow ( l_uint32 *datad, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleGray2xLILineLow ( l_uint32 *lined, l_int32 wpld, l_uint32 *lines, l_int32 ws, l_int32 wpls, l_int32 lastlineflag ); +LEPT_DLL extern void scaleGray4xLILow ( l_uint32 *datad, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleGray4xLILineLow ( l_uint32 *lined, l_int32 wpld, l_uint32 *lines, l_int32 ws, l_int32 wpls, l_int32 lastlineflag ); +LEPT_DLL extern l_int32 scaleBySamplingLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 d, l_int32 wpls ); +LEPT_DLL extern l_int32 scaleSmoothLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 d, l_int32 wpls, l_int32 size ); +LEPT_DLL extern void scaleRGBToGray2Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_float32 rwt, l_float32 gwt, l_float32 bwt ); +LEPT_DLL extern void scaleColorAreaMapLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleGrayAreaMapLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleAreaMapLow2 ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 d, l_int32 wpls ); +LEPT_DLL extern l_int32 scaleBinaryLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 ws, l_int32 hs, l_int32 wpls ); +LEPT_DLL extern void scaleToGray2Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab ); +LEPT_DLL extern l_uint32 * makeSumTabSG2 ( void ); +LEPT_DLL extern l_uint8 * makeValTabSG2 ( void ); +LEPT_DLL extern void scaleToGray3Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab ); +LEPT_DLL extern l_uint32 * makeSumTabSG3 ( void ); +LEPT_DLL extern l_uint8 * makeValTabSG3 ( void ); +LEPT_DLL extern void scaleToGray4Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_uint32 *sumtab, l_uint8 *valtab ); +LEPT_DLL extern l_uint32 * makeSumTabSG4 ( void ); +LEPT_DLL extern l_uint8 * makeValTabSG4 ( void ); +LEPT_DLL extern void scaleToGray6Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8, l_uint8 *valtab ); +LEPT_DLL extern l_uint8 * makeValTabSG6 ( void ); +LEPT_DLL extern void scaleToGray8Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8, l_uint8 *valtab ); +LEPT_DLL extern l_uint8 * makeValTabSG8 ( void ); +LEPT_DLL extern void scaleToGray16Low ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas, l_int32 wpls, l_int32 *tab8 ); +LEPT_DLL extern l_int32 scaleMipmapLow ( l_uint32 *datad, l_int32 wd, l_int32 hd, l_int32 wpld, l_uint32 *datas1, l_int32 wpls1, l_uint32 *datas2, l_int32 wpls2, l_float32 red ); +LEPT_DLL extern PIX * pixSeedfillBinary ( PIX *pixd, PIX *pixs, PIX *pixm, l_int32 connectivity ); +LEPT_DLL extern PIX * pixSeedfillBinaryRestricted ( PIX *pixd, PIX *pixs, PIX *pixm, l_int32 connectivity, l_int32 xmax, l_int32 ymax ); +LEPT_DLL extern PIX * pixHolesByFilling ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern PIX * pixFillClosedBorders ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern PIX * pixExtractBorderConnComps ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern PIX * pixRemoveBorderConnComps ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern PIX * pixFillBgFromBorder ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern PIX * pixFillHolesToBoundingRect ( PIX *pixs, l_int32 minsize, l_float32 maxhfract, l_float32 minfgfract ); +LEPT_DLL extern l_int32 pixSeedfillGray ( PIX *pixs, PIX *pixm, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixSeedfillGrayInv ( PIX *pixs, PIX *pixm, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixSeedfillGraySimple ( PIX *pixs, PIX *pixm, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixSeedfillGrayInvSimple ( PIX *pixs, PIX *pixm, l_int32 connectivity ); +LEPT_DLL extern PIX * pixSeedfillGrayBasin ( PIX *pixb, PIX *pixm, l_int32 delta, l_int32 connectivity ); +LEPT_DLL extern PIX * pixDistanceFunction ( PIX *pixs, l_int32 connectivity, l_int32 outdepth, l_int32 boundcond ); +LEPT_DLL extern PIX * pixSeedspread ( PIX *pixs, l_int32 connectivity ); +LEPT_DLL extern l_int32 pixLocalExtrema ( PIX *pixs, l_int32 maxmin, l_int32 minmax, PIX **ppixmin, PIX **ppixmax ); +LEPT_DLL extern l_int32 pixSelectedLocalExtrema ( PIX *pixs, l_int32 mindist, PIX **ppixmin, PIX **ppixmax ); +LEPT_DLL extern PIX * pixFindEqualValues ( PIX *pixs1, PIX *pixs2 ); +LEPT_DLL extern l_int32 pixSelectMinInConnComp ( PIX *pixs, PIX *pixm, PTA **ppta, NUMA **pnav ); +LEPT_DLL extern PIX * pixRemoveSeededComponents ( PIX *pixd, PIX *pixs, PIX *pixm, l_int32 connectivity, l_int32 bordersize ); +LEPT_DLL extern void seedfillBinaryLow ( l_uint32 *datas, l_int32 hs, l_int32 wpls, l_uint32 *datam, l_int32 hm, l_int32 wplm, l_int32 connectivity ); +LEPT_DLL extern void seedfillGrayLow ( l_uint32 *datas, l_int32 w, l_int32 h, l_int32 wpls, l_uint32 *datam, l_int32 wplm, l_int32 connectivity ); +LEPT_DLL extern void seedfillGrayInvLow ( l_uint32 *datas, l_int32 w, l_int32 h, l_int32 wpls, l_uint32 *datam, l_int32 wplm, l_int32 connectivity ); +LEPT_DLL extern void seedfillGrayLowSimple ( l_uint32 *datas, l_int32 w, l_int32 h, l_int32 wpls, l_uint32 *datam, l_int32 wplm, l_int32 connectivity ); +LEPT_DLL extern void seedfillGrayInvLowSimple ( l_uint32 *datas, l_int32 w, l_int32 h, l_int32 wpls, l_uint32 *datam, l_int32 wplm, l_int32 connectivity ); +LEPT_DLL extern void distanceFunctionLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 d, l_int32 wpld, l_int32 connectivity ); +LEPT_DLL extern void seedspreadLow ( l_uint32 *datad, l_int32 w, l_int32 h, l_int32 wpld, l_uint32 *datat, l_int32 wplt, l_int32 connectivity ); +LEPT_DLL extern SELA * selaCreate ( l_int32 n ); +LEPT_DLL extern void selaDestroy ( SELA **psela ); +LEPT_DLL extern SEL * selCreate ( l_int32 height, l_int32 width, const char *name ); +LEPT_DLL extern void selDestroy ( SEL **psel ); +LEPT_DLL extern SEL * selCopy ( SEL *sel ); +LEPT_DLL extern SEL * selCreateBrick ( l_int32 h, l_int32 w, l_int32 cy, l_int32 cx, l_int32 type ); +LEPT_DLL extern SEL * selCreateComb ( l_int32 factor1, l_int32 factor2, l_int32 direction ); +LEPT_DLL extern l_int32 ** create2dIntArray ( l_int32 sy, l_int32 sx ); +LEPT_DLL extern l_int32 selaAddSel ( SELA *sela, SEL *sel, const char *selname, l_int32 copyflag ); +LEPT_DLL extern l_int32 selaGetCount ( SELA *sela ); +LEPT_DLL extern SEL * selaGetSel ( SELA *sela, l_int32 i ); +LEPT_DLL extern char * selGetName ( SEL *sel ); +LEPT_DLL extern l_int32 selSetName ( SEL *sel, const char *name ); +LEPT_DLL extern l_int32 selaFindSelByName ( SELA *sela, const char *name, l_int32 *pindex, SEL **psel ); +LEPT_DLL extern l_int32 selGetElement ( SEL *sel, l_int32 row, l_int32 col, l_int32 *ptype ); +LEPT_DLL extern l_int32 selSetElement ( SEL *sel, l_int32 row, l_int32 col, l_int32 type ); +LEPT_DLL extern l_int32 selGetParameters ( SEL *sel, l_int32 *psy, l_int32 *psx, l_int32 *pcy, l_int32 *pcx ); +LEPT_DLL extern l_int32 selSetOrigin ( SEL *sel, l_int32 cy, l_int32 cx ); +LEPT_DLL extern l_int32 selGetTypeAtOrigin ( SEL *sel, l_int32 *ptype ); +LEPT_DLL extern char * selaGetBrickName ( SELA *sela, l_int32 hsize, l_int32 vsize ); +LEPT_DLL extern char * selaGetCombName ( SELA *sela, l_int32 size, l_int32 direction ); +LEPT_DLL extern l_int32 getCompositeParameters ( l_int32 size, l_int32 *psize1, l_int32 *psize2, char **pnameh1, char **pnameh2, char **pnamev1, char **pnamev2 ); +LEPT_DLL extern SARRAY * selaGetSelnames ( SELA *sela ); +LEPT_DLL extern l_int32 selFindMaxTranslations ( SEL *sel, l_int32 *pxp, l_int32 *pyp, l_int32 *pxn, l_int32 *pyn ); +LEPT_DLL extern SEL * selRotateOrth ( SEL *sel, l_int32 quads ); +LEPT_DLL extern SELA * selaRead ( const char *fname ); +LEPT_DLL extern SELA * selaReadStream ( FILE *fp ); +LEPT_DLL extern SEL * selRead ( const char *fname ); +LEPT_DLL extern SEL * selReadStream ( FILE *fp ); +LEPT_DLL extern l_int32 selaWrite ( const char *fname, SELA *sela ); +LEPT_DLL extern l_int32 selaWriteStream ( FILE *fp, SELA *sela ); +LEPT_DLL extern l_int32 selWrite ( const char *fname, SEL *sel ); +LEPT_DLL extern l_int32 selWriteStream ( FILE *fp, SEL *sel ); +LEPT_DLL extern SEL * selCreateFromString ( const char *text, l_int32 h, l_int32 w, const char *name ); +LEPT_DLL extern char * selPrintToString ( SEL *sel ); +LEPT_DLL extern SELA * selaCreateFromFile ( const char *filename ); +LEPT_DLL extern SEL * selCreateFromPta ( PTA *pta, l_int32 cy, l_int32 cx, const char *name ); +LEPT_DLL extern SEL * selCreateFromPix ( PIX *pix, l_int32 cy, l_int32 cx, const char *name ); +LEPT_DLL extern SEL * selReadFromColorImage ( const char *pathname ); +LEPT_DLL extern SEL * selCreateFromColorPix ( PIX *pixs, char *selname ); +LEPT_DLL extern PIX * selDisplayInPix ( SEL *sel, l_int32 size, l_int32 gthick ); +LEPT_DLL extern PIX * selaDisplayInPix ( SELA *sela, l_int32 size, l_int32 gthick, l_int32 spacing, l_int32 ncols ); +LEPT_DLL extern SELA * selaAddBasic ( SELA *sela ); +LEPT_DLL extern SELA * selaAddHitMiss ( SELA *sela ); +LEPT_DLL extern SELA * selaAddDwaLinear ( SELA *sela ); +LEPT_DLL extern SELA * selaAddDwaCombs ( SELA *sela ); +LEPT_DLL extern SELA * selaAddCrossJunctions ( SELA *sela, l_float32 hlsize, l_float32 mdist, l_int32 norient, l_int32 debugflag ); +LEPT_DLL extern SELA * selaAddTJunctions ( SELA *sela, l_float32 hlsize, l_float32 mdist, l_int32 norient, l_int32 debugflag ); +LEPT_DLL extern SELA * sela4ccThin ( SELA *sela ); +LEPT_DLL extern SELA * sela8ccThin ( SELA *sela ); +LEPT_DLL extern SELA * sela4and8ccThin ( SELA *sela ); +LEPT_DLL extern SEL * pixGenerateSelWithRuns ( PIX *pixs, l_int32 nhlines, l_int32 nvlines, l_int32 distance, l_int32 minlength, l_int32 toppix, l_int32 botpix, l_int32 leftpix, l_int32 rightpix, PIX **ppixe ); +LEPT_DLL extern SEL * pixGenerateSelRandom ( PIX *pixs, l_float32 hitfract, l_float32 missfract, l_int32 distance, l_int32 toppix, l_int32 botpix, l_int32 leftpix, l_int32 rightpix, PIX **ppixe ); +LEPT_DLL extern SEL * pixGenerateSelBoundary ( PIX *pixs, l_int32 hitdist, l_int32 missdist, l_int32 hitskip, l_int32 missskip, l_int32 topflag, l_int32 botflag, l_int32 leftflag, l_int32 rightflag, PIX **ppixe ); +LEPT_DLL extern NUMA * pixGetRunCentersOnLine ( PIX *pixs, l_int32 x, l_int32 y, l_int32 minlength ); +LEPT_DLL extern NUMA * pixGetRunsOnLine ( PIX *pixs, l_int32 x1, l_int32 y1, l_int32 x2, l_int32 y2 ); +LEPT_DLL extern PTA * pixSubsampleBoundaryPixels ( PIX *pixs, l_int32 skip ); +LEPT_DLL extern l_int32 adjacentOnPixelInRaster ( PIX *pixs, l_int32 x, l_int32 y, l_int32 *pxa, l_int32 *pya ); +LEPT_DLL extern PIX * pixDisplayHitMissSel ( PIX *pixs, SEL *sel, l_int32 scalefactor, l_uint32 hitcolor, l_uint32 misscolor ); +LEPT_DLL extern PIX * pixHShear ( PIX *pixd, PIX *pixs, l_int32 yloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixVShear ( PIX *pixd, PIX *pixs, l_int32 xloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixHShearCorner ( PIX *pixd, PIX *pixs, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixVShearCorner ( PIX *pixd, PIX *pixs, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixHShearCenter ( PIX *pixd, PIX *pixs, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixVShearCenter ( PIX *pixd, PIX *pixs, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern l_int32 pixHShearIP ( PIX *pixs, l_int32 yloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern l_int32 pixVShearIP ( PIX *pixs, l_int32 xloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixHShearLI ( PIX *pixs, l_int32 yloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixVShearLI ( PIX *pixs, l_int32 xloc, l_float32 radang, l_int32 incolor ); +LEPT_DLL extern PIX * pixDeskewBoth ( PIX *pixs, l_int32 redsearch ); +LEPT_DLL extern PIX * pixDeskew ( PIX *pixs, l_int32 redsearch ); +LEPT_DLL extern PIX * pixFindSkewAndDeskew ( PIX *pixs, l_int32 redsearch, l_float32 *pangle, l_float32 *pconf ); +LEPT_DLL extern PIX * pixDeskewGeneral ( PIX *pixs, l_int32 redsweep, l_float32 sweeprange, l_float32 sweepdelta, l_int32 redsearch, l_int32 thresh, l_float32 *pangle, l_float32 *pconf ); +LEPT_DLL extern l_int32 pixFindSkew ( PIX *pixs, l_float32 *pangle, l_float32 *pconf ); +LEPT_DLL extern l_int32 pixFindSkewSweep ( PIX *pixs, l_float32 *pangle, l_int32 reduction, l_float32 sweeprange, l_float32 sweepdelta ); +LEPT_DLL extern l_int32 pixFindSkewSweepAndSearch ( PIX *pixs, l_float32 *pangle, l_float32 *pconf, l_int32 redsweep, l_int32 redsearch, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta ); +LEPT_DLL extern l_int32 pixFindSkewSweepAndSearchScore ( PIX *pixs, l_float32 *pangle, l_float32 *pconf, l_float32 *pendscore, l_int32 redsweep, l_int32 redsearch, l_float32 sweepcenter, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta ); +LEPT_DLL extern l_int32 pixFindSkewSweepAndSearchScorePivot ( PIX *pixs, l_float32 *pangle, l_float32 *pconf, l_float32 *pendscore, l_int32 redsweep, l_int32 redsearch, l_float32 sweepcenter, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta, l_int32 pivot ); +LEPT_DLL extern l_int32 pixFindSkewOrthogonalRange ( PIX *pixs, l_float32 *pangle, l_float32 *pconf, l_int32 redsweep, l_int32 redsearch, l_float32 sweeprange, l_float32 sweepdelta, l_float32 minbsdelta, l_float32 confprior ); +LEPT_DLL extern l_int32 pixFindDifferentialSquareSum ( PIX *pixs, l_float32 *psum ); +LEPT_DLL extern l_int32 pixFindNormalizedSquareSum ( PIX *pixs, l_float32 *phratio, l_float32 *pvratio, l_float32 *pfract ); +LEPT_DLL extern PIX * pixReadStreamSpix ( FILE *fp ); +LEPT_DLL extern l_int32 readHeaderSpix ( const char *filename, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 freadHeaderSpix ( FILE *fp, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 sreadHeaderSpix ( const l_uint32 *data, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *piscmap ); +LEPT_DLL extern l_int32 pixWriteStreamSpix ( FILE *fp, PIX *pix ); +LEPT_DLL extern PIX * pixReadMemSpix ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixWriteMemSpix ( l_uint8 **pdata, size_t *psize, PIX *pix ); +LEPT_DLL extern l_int32 pixSerializeToMemory ( PIX *pixs, l_uint32 **pdata, size_t *pnbytes ); +LEPT_DLL extern PIX * pixDeserializeFromMemory ( const l_uint32 *data, size_t nbytes ); +LEPT_DLL extern L_STACK * lstackCreate ( l_int32 nalloc ); +LEPT_DLL extern void lstackDestroy ( L_STACK **plstack, l_int32 freeflag ); +LEPT_DLL extern l_int32 lstackAdd ( L_STACK *lstack, void *item ); +LEPT_DLL extern void * lstackRemove ( L_STACK *lstack ); +LEPT_DLL extern l_int32 lstackGetCount ( L_STACK *lstack ); +LEPT_DLL extern l_int32 lstackPrint ( FILE *fp, L_STACK *lstack ); +LEPT_DLL extern L_STRCODE * strcodeCreate ( l_int32 fileno ); +LEPT_DLL extern l_int32 strcodeCreateFromFile ( const char *filein, l_int32 fileno, const char *outdir ); +LEPT_DLL extern l_int32 strcodeGenerate ( L_STRCODE *strcode, const char *filein, const char *type ); +LEPT_DLL extern l_int32 strcodeFinalize ( L_STRCODE **pstrcode, const char *outdir ); +LEPT_DLL extern l_int32 l_getStructStrFromFile ( const char *filename, l_int32 field, char **pstr ); +LEPT_DLL extern l_int32 pixFindStrokeLength ( PIX *pixs, l_int32 *tab8, l_int32 *plength ); +LEPT_DLL extern l_int32 pixFindStrokeWidth ( PIX *pixs, l_float32 thresh, l_int32 *tab8, l_float32 *pwidth, NUMA **pnahisto ); +LEPT_DLL extern NUMA * pixaFindStrokeWidth ( PIXA *pixa, l_float32 thresh, l_int32 *tab8, l_int32 debug ); +LEPT_DLL extern PIXA * pixaModifyStrokeWidth ( PIXA *pixas, l_float32 targetw ); +LEPT_DLL extern PIX * pixModifyStrokeWidth ( PIX *pixs, l_float32 width, l_float32 targetw ); +LEPT_DLL extern PIXA * pixaSetStrokeWidth ( PIXA *pixas, l_int32 width, l_int32 thinfirst, l_int32 connectivity ); +LEPT_DLL extern PIX * pixSetStrokeWidth ( PIX *pixs, l_int32 width, l_int32 thinfirst, l_int32 connectivity ); +LEPT_DLL extern l_int32 * sudokuReadFile ( const char *filename ); +LEPT_DLL extern l_int32 * sudokuReadString ( const char *str ); +LEPT_DLL extern L_SUDOKU * sudokuCreate ( l_int32 *array ); +LEPT_DLL extern void sudokuDestroy ( L_SUDOKU **psud ); +LEPT_DLL extern l_int32 sudokuSolve ( L_SUDOKU *sud ); +LEPT_DLL extern l_int32 sudokuTestUniqueness ( l_int32 *array, l_int32 *punique ); +LEPT_DLL extern L_SUDOKU * sudokuGenerate ( l_int32 *array, l_int32 seed, l_int32 minelems, l_int32 maxtries ); +LEPT_DLL extern l_int32 sudokuOutput ( L_SUDOKU *sud, l_int32 arraytype ); +LEPT_DLL extern PIX * pixAddSingleTextblock ( PIX *pixs, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 location, l_int32 *poverflow ); +LEPT_DLL extern PIX * pixAddTextlines ( PIX *pixs, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 location ); +LEPT_DLL extern l_int32 pixSetTextblock ( PIX *pixs, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 x0, l_int32 y0, l_int32 wtext, l_int32 firstindent, l_int32 *poverflow ); +LEPT_DLL extern l_int32 pixSetTextline ( PIX *pixs, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 x0, l_int32 y0, l_int32 *pwidth, l_int32 *poverflow ); +LEPT_DLL extern PIXA * pixaAddTextNumber ( PIXA *pixas, L_BMF *bmf, NUMA *na, l_uint32 val, l_int32 location ); +LEPT_DLL extern PIXA * pixaAddTextlines ( PIXA *pixas, L_BMF *bmf, SARRAY *sa, l_uint32 val, l_int32 location ); +LEPT_DLL extern l_int32 pixaAddPixWithText ( PIXA *pixa, PIX *pixs, l_int32 reduction, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 location ); +LEPT_DLL extern SARRAY * bmfGetLineStrings ( L_BMF *bmf, const char *textstr, l_int32 maxw, l_int32 firstindent, l_int32 *ph ); +LEPT_DLL extern NUMA * bmfGetWordWidths ( L_BMF *bmf, const char *textstr, SARRAY *sa ); +LEPT_DLL extern l_int32 bmfGetStringWidth ( L_BMF *bmf, const char *textstr, l_int32 *pw ); +LEPT_DLL extern SARRAY * splitStringToParagraphs ( char *textstr, l_int32 splitflag ); +LEPT_DLL extern PIX * pixReadTiff ( const char *filename, l_int32 n ); +LEPT_DLL extern PIX * pixReadStreamTiff ( FILE *fp, l_int32 n ); +LEPT_DLL extern l_int32 pixWriteTiff ( const char *filename, PIX *pix, l_int32 comptype, const char *modestr ); +LEPT_DLL extern l_int32 pixWriteTiffCustom ( const char *filename, PIX *pix, l_int32 comptype, const char *modestr, NUMA *natags, SARRAY *savals, SARRAY *satypes, NUMA *nasizes ); +LEPT_DLL extern l_int32 pixWriteStreamTiff ( FILE *fp, PIX *pix, l_int32 comptype ); +LEPT_DLL extern l_int32 pixWriteStreamTiffWA ( FILE *fp, PIX *pix, l_int32 comptype, const char *modestr ); +LEPT_DLL extern PIX * pixReadFromMultipageTiff ( const char *fname, size_t *poffset ); +LEPT_DLL extern PIXA * pixaReadMultipageTiff ( const char *filename ); +LEPT_DLL extern l_int32 pixaWriteMultipageTiff ( const char *fname, PIXA *pixa ); +LEPT_DLL extern l_int32 writeMultipageTiff ( const char *dirin, const char *substr, const char *fileout ); +LEPT_DLL extern l_int32 writeMultipageTiffSA ( SARRAY *sa, const char *fileout ); +LEPT_DLL extern l_int32 fprintTiffInfo ( FILE *fpout, const char *tiffile ); +LEPT_DLL extern l_int32 tiffGetCount ( FILE *fp, l_int32 *pn ); +LEPT_DLL extern l_int32 getTiffResolution ( FILE *fp, l_int32 *pxres, l_int32 *pyres ); +LEPT_DLL extern l_int32 readHeaderTiff ( const char *filename, l_int32 n, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *pres, l_int32 *pcmap, l_int32 *pformat ); +LEPT_DLL extern l_int32 freadHeaderTiff ( FILE *fp, l_int32 n, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *pres, l_int32 *pcmap, l_int32 *pformat ); +LEPT_DLL extern l_int32 readHeaderMemTiff ( const l_uint8 *cdata, size_t size, l_int32 n, l_int32 *pwidth, l_int32 *pheight, l_int32 *pbps, l_int32 *pspp, l_int32 *pres, l_int32 *pcmap, l_int32 *pformat ); +LEPT_DLL extern l_int32 findTiffCompression ( FILE *fp, l_int32 *pcomptype ); +LEPT_DLL extern l_int32 extractG4DataFromFile ( const char *filein, l_uint8 **pdata, size_t *pnbytes, l_int32 *pw, l_int32 *ph, l_int32 *pminisblack ); +LEPT_DLL extern PIX * pixReadMemTiff ( const l_uint8 *cdata, size_t size, l_int32 n ); +LEPT_DLL extern PIX * pixReadMemFromMultipageTiff ( const l_uint8 *cdata, size_t size, size_t *poffset ); +LEPT_DLL extern PIXA * pixaReadMemMultipageTiff ( const l_uint8 *data, size_t size ); +LEPT_DLL extern l_int32 pixaWriteMemMultipageTiff ( l_uint8 **pdata, size_t *psize, PIXA *pixa ); +LEPT_DLL extern l_int32 pixWriteMemTiff ( l_uint8 **pdata, size_t *psize, PIX *pix, l_int32 comptype ); +LEPT_DLL extern l_int32 pixWriteMemTiffCustom ( l_uint8 **pdata, size_t *psize, PIX *pix, l_int32 comptype, NUMA *natags, SARRAY *savals, SARRAY *satypes, NUMA *nasizes ); +LEPT_DLL extern l_int32 setMsgSeverity ( l_int32 newsev ); +LEPT_DLL extern l_int32 returnErrorInt ( const char *msg, const char *procname, l_int32 ival ); +LEPT_DLL extern l_float32 returnErrorFloat ( const char *msg, const char *procname, l_float32 fval ); +LEPT_DLL extern void * returnErrorPtr ( const char *msg, const char *procname, void *pval ); +LEPT_DLL extern l_int32 filesAreIdentical ( const char *fname1, const char *fname2, l_int32 *psame ); +LEPT_DLL extern l_uint16 convertOnLittleEnd16 ( l_uint16 shortin ); +LEPT_DLL extern l_uint16 convertOnBigEnd16 ( l_uint16 shortin ); +LEPT_DLL extern l_uint32 convertOnLittleEnd32 ( l_uint32 wordin ); +LEPT_DLL extern l_uint32 convertOnBigEnd32 ( l_uint32 wordin ); +LEPT_DLL extern l_int32 fileCorruptByDeletion ( const char *filein, l_float32 loc, l_float32 size, const char *fileout ); +LEPT_DLL extern l_int32 fileCorruptByMutation ( const char *filein, l_float32 loc, l_float32 size, const char *fileout ); +LEPT_DLL extern l_int32 genRandomIntegerInRange ( l_int32 range, l_int32 seed, l_int32 *pval ); +LEPT_DLL extern l_int32 lept_roundftoi ( l_float32 fval ); +LEPT_DLL extern l_int32 l_hashStringToUint64 ( const char *str, l_uint64 *phash ); +LEPT_DLL extern l_int32 l_hashPtToUint64 ( l_int32 x, l_int32 y, l_uint64 *phash ); +LEPT_DLL extern l_int32 l_hashFloat64ToUint64 ( l_int32 nbuckets, l_float64 val, l_uint64 *phash ); +LEPT_DLL extern l_int32 findNextLargerPrime ( l_int32 start, l_uint32 *pprime ); +LEPT_DLL extern l_int32 lept_isPrime ( l_uint64 n, l_int32 *pis_prime, l_uint32 *pfactor ); +LEPT_DLL extern l_uint32 convertBinaryToGrayCode ( l_uint32 val ); +LEPT_DLL extern l_uint32 convertGrayCodeToBinary ( l_uint32 val ); +LEPT_DLL extern char * getLeptonicaVersion ( ); +LEPT_DLL extern void startTimer ( void ); +LEPT_DLL extern l_float32 stopTimer ( void ); +LEPT_DLL extern L_TIMER startTimerNested ( void ); +LEPT_DLL extern l_float32 stopTimerNested ( L_TIMER rusage_start ); +LEPT_DLL extern void l_getCurrentTime ( l_int32 *sec, l_int32 *usec ); +LEPT_DLL extern L_WALLTIMER * startWallTimer ( void ); +LEPT_DLL extern l_float32 stopWallTimer ( L_WALLTIMER **ptimer ); +LEPT_DLL extern char * l_getFormattedDate ( ); +LEPT_DLL extern char * stringNew ( const char *src ); +LEPT_DLL extern l_int32 stringCopy ( char *dest, const char *src, l_int32 n ); +LEPT_DLL extern l_int32 stringReplace ( char **pdest, const char *src ); +LEPT_DLL extern l_int32 stringLength ( const char *src, size_t size ); +LEPT_DLL extern l_int32 stringCat ( char *dest, size_t size, const char *src ); +LEPT_DLL extern char * stringConcatNew ( const char *first, ... ); +LEPT_DLL extern char * stringJoin ( const char *src1, const char *src2 ); +LEPT_DLL extern l_int32 stringJoinIP ( char **psrc1, const char *src2 ); +LEPT_DLL extern char * stringReverse ( const char *src ); +LEPT_DLL extern char * strtokSafe ( char *cstr, const char *seps, char **psaveptr ); +LEPT_DLL extern l_int32 stringSplitOnToken ( char *cstr, const char *seps, char **phead, char **ptail ); +LEPT_DLL extern char * stringRemoveChars ( const char *src, const char *remchars ); +LEPT_DLL extern l_int32 stringFindSubstr ( const char *src, const char *sub, l_int32 *ploc ); +LEPT_DLL extern char * stringReplaceSubstr ( const char *src, const char *sub1, const char *sub2, l_int32 *pfound, l_int32 *ploc ); +LEPT_DLL extern char * stringReplaceEachSubstr ( const char *src, const char *sub1, const char *sub2, l_int32 *pcount ); +LEPT_DLL extern L_DNA * arrayFindEachSequence ( const l_uint8 *data, size_t datalen, const l_uint8 *sequence, size_t seqlen ); +LEPT_DLL extern l_int32 arrayFindSequence ( const l_uint8 *data, size_t datalen, const l_uint8 *sequence, size_t seqlen, l_int32 *poffset, l_int32 *pfound ); +LEPT_DLL extern void * reallocNew ( void **pindata, l_int32 oldsize, l_int32 newsize ); +LEPT_DLL extern l_uint8 * l_binaryRead ( const char *filename, size_t *pnbytes ); +LEPT_DLL extern l_uint8 * l_binaryReadStream ( FILE *fp, size_t *pnbytes ); +LEPT_DLL extern l_uint8 * l_binaryReadSelect ( const char *filename, size_t start, size_t nbytes, size_t *pnread ); +LEPT_DLL extern l_uint8 * l_binaryReadSelectStream ( FILE *fp, size_t start, size_t nbytes, size_t *pnread ); +LEPT_DLL extern l_int32 l_binaryWrite ( const char *filename, const char *operation, void *data, size_t nbytes ); +LEPT_DLL extern size_t nbytesInFile ( const char *filename ); +LEPT_DLL extern size_t fnbytesInFile ( FILE *fp ); +LEPT_DLL extern l_uint8 * l_binaryCopy ( l_uint8 *datas, size_t size ); +LEPT_DLL extern l_int32 fileCopy ( const char *srcfile, const char *newfile ); +LEPT_DLL extern l_int32 fileConcatenate ( const char *srcfile, const char *destfile ); +LEPT_DLL extern l_int32 fileAppendString ( const char *filename, const char *str ); +LEPT_DLL extern FILE * fopenReadStream ( const char *filename ); +LEPT_DLL extern FILE * fopenWriteStream ( const char *filename, const char *modestring ); +LEPT_DLL extern FILE * fopenReadFromMemory ( const l_uint8 *data, size_t size ); +LEPT_DLL extern FILE * fopenWriteWinTempfile ( ); +LEPT_DLL extern FILE * lept_fopen ( const char *filename, const char *mode ); +LEPT_DLL extern l_int32 lept_fclose ( FILE *fp ); +LEPT_DLL extern void * lept_calloc ( size_t nmemb, size_t size ); +LEPT_DLL extern void lept_free ( void *ptr ); +LEPT_DLL extern l_int32 lept_mkdir ( const char *subdir ); +LEPT_DLL extern l_int32 lept_rmdir ( const char *subdir ); +LEPT_DLL extern void lept_direxists ( const char *dir, l_int32 *pexists ); +LEPT_DLL extern l_int32 lept_rm_match ( const char *subdir, const char *substr ); +LEPT_DLL extern l_int32 lept_rm ( const char *subdir, const char *tail ); +LEPT_DLL extern l_int32 lept_rmfile ( const char *filepath ); +LEPT_DLL extern l_int32 lept_mv ( const char *srcfile, const char *newdir, const char *newtail, char **pnewpath ); +LEPT_DLL extern l_int32 lept_cp ( const char *srcfile, const char *newdir, const char *newtail, char **pnewpath ); +LEPT_DLL extern l_int32 splitPathAtDirectory ( const char *pathname, char **pdir, char **ptail ); +LEPT_DLL extern l_int32 splitPathAtExtension ( const char *pathname, char **pbasename, char **pextension ); +LEPT_DLL extern char * pathJoin ( const char *dir, const char *fname ); +LEPT_DLL extern char * appendSubdirs ( const char *basedir, const char *subdirs ); +LEPT_DLL extern l_int32 convertSepCharsInPath ( char *path, l_int32 type ); +LEPT_DLL extern char * genPathname ( const char *dir, const char *fname ); +LEPT_DLL extern l_int32 makeTempDirname ( char *result, size_t nbytes, const char *subdir ); +LEPT_DLL extern l_int32 modifyTrailingSlash ( char *path, size_t nbytes, l_int32 flag ); +LEPT_DLL extern char * l_makeTempFilename ( ); +LEPT_DLL extern l_int32 extractNumberFromFilename ( const char *fname, l_int32 numpre, l_int32 numpost ); +LEPT_DLL extern PIX * pixSimpleCaptcha ( PIX *pixs, l_int32 border, l_int32 nterms, l_uint32 seed, l_uint32 color, l_int32 cmapflag ); +LEPT_DLL extern PIX * pixRandomHarmonicWarp ( PIX *pixs, l_float32 xmag, l_float32 ymag, l_float32 xfreq, l_float32 yfreq, l_int32 nx, l_int32 ny, l_uint32 seed, l_int32 grayval ); +LEPT_DLL extern PIX * pixWarpStereoscopic ( PIX *pixs, l_int32 zbend, l_int32 zshiftt, l_int32 zshiftb, l_int32 ybendt, l_int32 ybendb, l_int32 redleft ); +LEPT_DLL extern PIX * pixStretchHorizontal ( PIX *pixs, l_int32 dir, l_int32 type, l_int32 hmax, l_int32 operation, l_int32 incolor ); +LEPT_DLL extern PIX * pixStretchHorizontalSampled ( PIX *pixs, l_int32 dir, l_int32 type, l_int32 hmax, l_int32 incolor ); +LEPT_DLL extern PIX * pixStretchHorizontalLI ( PIX *pixs, l_int32 dir, l_int32 type, l_int32 hmax, l_int32 incolor ); +LEPT_DLL extern PIX * pixQuadraticVShear ( PIX *pixs, l_int32 dir, l_int32 vmaxt, l_int32 vmaxb, l_int32 operation, l_int32 incolor ); +LEPT_DLL extern PIX * pixQuadraticVShearSampled ( PIX *pixs, l_int32 dir, l_int32 vmaxt, l_int32 vmaxb, l_int32 incolor ); +LEPT_DLL extern PIX * pixQuadraticVShearLI ( PIX *pixs, l_int32 dir, l_int32 vmaxt, l_int32 vmaxb, l_int32 incolor ); +LEPT_DLL extern PIX * pixStereoFromPair ( PIX *pix1, PIX *pix2, l_float32 rwt, l_float32 gwt, l_float32 bwt ); +LEPT_DLL extern L_WSHED * wshedCreate ( PIX *pixs, PIX *pixm, l_int32 mindepth, l_int32 debugflag ); +LEPT_DLL extern void wshedDestroy ( L_WSHED **pwshed ); +LEPT_DLL extern l_int32 wshedApply ( L_WSHED *wshed ); +LEPT_DLL extern l_int32 wshedBasins ( L_WSHED *wshed, PIXA **ppixa, NUMA **pnalevels ); +LEPT_DLL extern PIX * wshedRenderFill ( L_WSHED *wshed ); +LEPT_DLL extern PIX * wshedRenderColors ( L_WSHED *wshed ); +LEPT_DLL extern PIX * pixReadStreamWebP ( FILE *fp ); +LEPT_DLL extern PIX * pixReadMemWebP ( const l_uint8 *filedata, size_t filesize ); +LEPT_DLL extern l_int32 readHeaderWebP ( const char *filename, l_int32 *pw, l_int32 *ph, l_int32 *pspp ); +LEPT_DLL extern l_int32 readHeaderMemWebP ( const l_uint8 *data, size_t size, l_int32 *pw, l_int32 *ph, l_int32 *pspp ); +LEPT_DLL extern l_int32 pixWriteWebP ( const char *filename, PIX *pixs, l_int32 quality, l_int32 lossless ); +LEPT_DLL extern l_int32 pixWriteStreamWebP ( FILE *fp, PIX *pixs, l_int32 quality, l_int32 lossless ); +LEPT_DLL extern l_int32 pixWriteMemWebP ( l_uint8 **pencdata, size_t *pencsize, PIX *pixs, l_int32 quality, l_int32 lossless ); +LEPT_DLL extern l_int32 pixaWriteFiles ( const char *rootname, PIXA *pixa, l_int32 format ); +LEPT_DLL extern l_int32 pixWrite ( const char *fname, PIX *pix, l_int32 format ); +LEPT_DLL extern l_int32 pixWriteAutoFormat ( const char *filename, PIX *pix ); +LEPT_DLL extern l_int32 pixWriteStream ( FILE *fp, PIX *pix, l_int32 format ); +LEPT_DLL extern l_int32 pixWriteImpliedFormat ( const char *filename, PIX *pix, l_int32 quality, l_int32 progressive ); +LEPT_DLL extern l_int32 pixChooseOutputFormat ( PIX *pix ); +LEPT_DLL extern l_int32 getImpliedFileFormat ( const char *filename ); +LEPT_DLL extern l_int32 pixGetAutoFormat ( PIX *pix, l_int32 *pformat ); +LEPT_DLL extern const char * getFormatExtension ( l_int32 format ); +LEPT_DLL extern l_int32 pixWriteMem ( l_uint8 **pdata, size_t *psize, PIX *pix, l_int32 format ); +LEPT_DLL extern l_int32 l_fileDisplay ( const char *fname, l_int32 x, l_int32 y, l_float32 scale ); +LEPT_DLL extern l_int32 pixDisplay ( PIX *pixs, l_int32 x, l_int32 y ); +LEPT_DLL extern l_int32 pixDisplayWithTitle ( PIX *pixs, l_int32 x, l_int32 y, const char *title, l_int32 dispflag ); +LEPT_DLL extern l_int32 pixSaveTiled ( PIX *pixs, PIXA *pixa, l_float32 scalefactor, l_int32 newrow, l_int32 space, l_int32 dp ); +LEPT_DLL extern l_int32 pixSaveTiledOutline ( PIX *pixs, PIXA *pixa, l_float32 scalefactor, l_int32 newrow, l_int32 space, l_int32 linewidth, l_int32 dp ); +LEPT_DLL extern l_int32 pixSaveTiledWithText ( PIX *pixs, PIXA *pixa, l_int32 outwidth, l_int32 newrow, l_int32 space, l_int32 linewidth, L_BMF *bmf, const char *textstr, l_uint32 val, l_int32 location ); +LEPT_DLL extern void l_chooseDisplayProg ( l_int32 selection ); +LEPT_DLL extern l_int32 pixDisplayWrite ( PIX *pixs, l_int32 reduction ); +LEPT_DLL extern l_int32 pixDisplayWriteFormat ( PIX *pixs, l_int32 reduction, l_int32 format ); +LEPT_DLL extern l_int32 pixDisplayMultiple ( l_int32 res, l_float32 scalefactor, const char *fileout ); +LEPT_DLL extern l_uint8 * zlibCompress ( l_uint8 *datain, size_t nin, size_t *pnout ); +LEPT_DLL extern l_uint8 * zlibUncompress ( l_uint8 *datain, size_t nin, size_t *pnout ); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* NO_PROTOS */ + + +#endif /* LEPTONICA_ALLHEADERS_H */ + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/alltypes.h b/third_party/leptonica/uos/sw_64/include/leptonica/alltypes.h new file mode 100644 index 00000000..a84c0bfa --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/alltypes.h @@ -0,0 +1,66 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_ALLTYPES_H +#define LEPTONICA_ALLTYPES_H + + /* Standard */ +#include +#include +#include + + /* General and configuration defs */ +#include "endianness.h" +#include "environ.h" + + /* Generic and non-image-specific containers */ +#include "array.h" +#include "bbuffer.h" +#include "heap.h" +#include "list.h" +#include "ptra.h" +#include "queue.h" +#include "rbtree.h" +#include "stack.h" + + /* Imaging */ +#include "arrayaccess.h" +#include "bmf.h" +#include "ccbord.h" +#include "dewarp.h" +#include "gplot.h" +#include "imageio.h" +#include "jbclass.h" +#include "morph.h" +#include "pix.h" +#include "recog.h" +#include "regutils.h" +#include "stringcode.h" +#include "sudoku.h" +#include "watershed.h" + + +#endif /* LEPTONICA_ALLTYPES_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/array.h b/third_party/leptonica/uos/sw_64/include/leptonica/array.h new file mode 100644 index 00000000..fa5074fd --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/array.h @@ -0,0 +1,159 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_ARRAY_H +#define LEPTONICA_ARRAY_H + +/*! + * \file array.h + * + *
+ *  Contains the following structs:
+ *      struct Numa
+ *      struct Numaa
+ *      struct L_Dna
+ *      struct L_Dnaa
+ *      struct L_DnaHash
+ *      struct Sarray
+ *      struct L_Bytea
+ *
+ *  Contains definitions for:
+ *      Numa interpolation flags
+ *      Numa and FPix border flags
+ *      Numa data type conversion to string
+ * 
+ */ + + +/*------------------------------------------------------------------------* + * Array Structs * + *------------------------------------------------------------------------*/ + +/*! Numa version for serialization */ +#define NUMA_VERSION_NUMBER 1 + + /*! Number array: an array of floats */ +struct Numa +{ + l_int32 nalloc; /*!< size of allocated number array */ + l_int32 n; /*!< number of numbers saved */ + l_int32 refcount; /*!< reference count (1 if no clones) */ + l_float32 startx; /*!< x value assigned to array[0] */ + l_float32 delx; /*!< change in x value as i --> i + 1 */ + l_float32 *array; /*!< number array */ +}; +typedef struct Numa NUMA; + + /*! Array of number arrays */ +struct Numaa +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 n; /*!< number of Numa saved */ + struct Numa **numa; /*!< array of Numa */ +}; +typedef struct Numaa NUMAA; + +/*! Dna version for serialization */ +#define DNA_VERSION_NUMBER 1 + + /*! Double number array: an array of doubles */ +struct L_Dna +{ + l_int32 nalloc; /*!< size of allocated number array */ + l_int32 n; /*!< number of numbers saved */ + l_int32 refcount; /*!< reference count (1 if no clones) */ + l_float64 startx; /*!< x value assigned to array[0] */ + l_float64 delx; /*!< change in x value as i --> i + 1 */ + l_float64 *array; /*!< number array */ +}; +typedef struct L_Dna L_DNA; + + /*! Array of double number arrays */ +struct L_Dnaa +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 n; /*!< number of L_Dna saved */ + struct L_Dna **dna; /*!< array of L_Dna */ +}; +typedef struct L_Dnaa L_DNAA; + + /*! A hash table of Dnas */ +struct L_DnaHash +{ + l_int32 nbuckets; + l_int32 initsize; /*!< initial size of each dna that is made */ + struct L_Dna **dna; /*!< array of L_Dna */ +}; +typedef struct L_DnaHash L_DNAHASH; + +/*! Sarray version for serialization */ +#define SARRAY_VERSION_NUMBER 1 + + /*! String array: an array of C strings */ +struct Sarray +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 n; /*!< number of strings allocated */ + l_int32 refcount; /*!< reference count (1 if no clones) */ + char **array; /*!< string array */ +}; +typedef struct Sarray SARRAY; + + /*! Byte array (analogous to C++ "string") */ +struct L_Bytea +{ + size_t nalloc; /*!< number of bytes allocated in data array */ + size_t size; /*!< number of bytes presently used */ + l_int32 refcount; /*!< reference count (1 if no clones) */ + l_uint8 *data; /*!< data array */ +}; +typedef struct L_Bytea L_BYTEA; + + +/*------------------------------------------------------------------------* + * Array flags * + *------------------------------------------------------------------------*/ + /*! Flags for interpolation in Numa */ +enum { + L_LINEAR_INTERP = 1, /*!< linear */ + L_QUADRATIC_INTERP = 2 /*!< quadratic */ +}; + + /*! Flags for added borders in Numa and Fpix */ +enum { + L_CONTINUED_BORDER = 1, /*!< extended with same value */ + L_SLOPE_BORDER = 2, /*!< extended with constant normal derivative */ + L_MIRRORED_BORDER = 3 /*!< mirrored */ +}; + + /*! Flags for data type converted from Numa */ +enum { + L_INTEGER_VALUE = 1, /*!< convert to integer */ + L_FLOAT_VALUE = 2 /*!< convert to float */ +}; + + +#endif /* LEPTONICA_ARRAY_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/arrayaccess.h b/third_party/leptonica/uos/sw_64/include/leptonica/arrayaccess.h new file mode 100644 index 00000000..87861264 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/arrayaccess.h @@ -0,0 +1,264 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_ARRAY_ACCESS_H +#define LEPTONICA_ARRAY_ACCESS_H + +/*! + * \file arrayaccess.h + * + *
+ *  1, 2, 4, 8, 16 and 32 bit data access within an array of 32-bit words
+ *
+ *  This is used primarily to access 1, 2, 4, 8, 16 and 32 bit pixels
+ *  in a line of image data, represented as an array of 32-bit words.
+ *
+ *     pdata:  pointer to first 32-bit word in the array
+ *     n:      index of the pixel in the array
+ *
+ *  Function calls for these accessors are defined in arrayaccess.c.
+ *
+ *  However, for efficiency we use the inline macros for all accesses.
+ *  Even though the 2 and 4 bit set* accessors are more complicated,
+ *  they are about 10% faster than the function calls.
+ *
+ *  The 32 bit access is just a cast and ptr arithmetic.  We include
+ *  it so that the input ptr can be void*.
+ *
+ *  At the end of this file is code for invoking the function calls
+ *  instead of inlining.
+ *
+ *  The macro SET_DATA_BIT_VAL(pdata, n, val) is a bit slower than
+ *      if (val == 0)
+ *          CLEAR_DATA_BIT(pdata, n);
+ *      else
+ *          SET_DATA_BIT(pdata, n);
+ *
+ *  Some compilers complain when the SET macros are surrounded by
+ *  parentheses, because parens require an evaluation and it is not
+ *  defined for SET macros.  If SET_DATA_QBIT were defined as a
+ *  compound macro, in analogy to l_setDataQbit(), it requires
+ *  surrounding bracces:
+ *     #define  SET_DATA_QBIT(pdata, n, val) \
+ *        {l_uint32 *_TEMP_WORD_PTR_; \
+ *         _TEMP_WORD_PTR_ = (l_uint32 *)(pdata) + ((n) >> 3); \
+ *         *_TEMP_WORD_PTR_ &= ~(0xf0000000 >> (4 * ((n) & 7))); \
+ *         *_TEMP_WORD_PTR_ |= (((val) & 15) << (28 - 4 * ((n) & 7)));}
+ *  but if used in an if/else
+ *      if (x)
+ *         SET_DATA_QBIT(...);
+ *      else
+ *         ...
+ *  the compiler sees
+ *      if (x)
+ *         {......};
+ *      else
+ *         ...
+ *  The semicolon comes after the brace and will not compile.
+ *  This can be fixed in the call by either omitting the semicolon
+ *  or requiring another set of braces around SET_DATA_QBIT(), but
+ *  both these options break compatibility with current code, and
+ *  require special attention by anyone using the macros.
+ *
+ *  There are (at least) two ways to fix this in the macro definitions,
+ *  suggested by Dave Bryan.
+ *  (1) Surround the braces in the macro above with
+ *         do {....} while(0)
+ *      Then the semicolon just terminates the expression.
+ *  (2) Reduce the blocks to a single expression; e.g,
+ *         *((l_uint32 *)(pdata) + ((n) >> 3)) = \
+ *           *((l_uint32 *)(pdata) + ((n) >> 3)) \
+ *           & ~(0xf0000000 >> (4 * ((n) & 7))) \
+ *           | (((val) & 15) << (28 - 4 * ((n) & 7)))
+ *      This appears to cause redundant computation, but the compiler
+ *      should evaluate the common subexpression only once.
+ *  All these methods have the same performance, giving about 300M
+ *  SET_DATA_QBIT operations per second on a fast 64 bit system.
+ *  Using the function calls instead of the macros results in about 250M
+ *  SET_DATA_QBIT operations per second, a performance hit of nearly 20%.
+ * 
+ */ + +#define USE_INLINE_ACCESSORS 1 + +#if USE_INLINE_ACCESSORS + + /*=============================================================*/ + /* Faster: use in line accessors */ + /*=============================================================*/ + + /*--------------------------------------------------* + * 1 bit access * + *--------------------------------------------------*/ +/*! 1 bit access - get */ +#define GET_DATA_BIT(pdata, n) \ + ((*((l_uint32 *)(pdata) + ((n) >> 5)) >> (31 - ((n) & 31))) & 1) + +/*! 1 bit access - set */ +#define SET_DATA_BIT(pdata, n) \ + *((l_uint32 *)(pdata) + ((n) >> 5)) |= (0x80000000 >> ((n) & 31)) + +/*! 1 bit access - clear */ +#define CLEAR_DATA_BIT(pdata, n) \ + *((l_uint32 *)(pdata) + ((n) >> 5)) &= ~(0x80000000 >> ((n) & 31)) + +/*! 1 bit access - set value (0 or 1) */ +#define SET_DATA_BIT_VAL(pdata, n, val) \ + *((l_uint32 *)(pdata) + ((n) >> 5)) = \ + ((*((l_uint32 *)(pdata) + ((n) >> 5)) \ + & (~(0x80000000 >> ((n) & 31)))) \ + | ((val) << (31 - ((n) & 31)))) + + /*--------------------------------------------------* + * 2 bit access * + *--------------------------------------------------*/ +/*! 2 bit access - get */ +#define GET_DATA_DIBIT(pdata, n) \ + ((*((l_uint32 *)(pdata) + ((n) >> 4)) >> (2 * (15 - ((n) & 15)))) & 3) + +/*! 2 bit access - set value (0 ... 3) */ +#define SET_DATA_DIBIT(pdata, n, val) \ + *((l_uint32 *)(pdata) + ((n) >> 4)) = \ + ((*((l_uint32 *)(pdata) + ((n) >> 4)) \ + & (~(0xc0000000 >> (2 * ((n) & 15))))) \ + | (((val) & 3) << (30 - 2 * ((n) & 15)))) + +/*! 2 bit access - clear */ +#define CLEAR_DATA_DIBIT(pdata, n) \ + *((l_uint32 *)(pdata) + ((n) >> 4)) &= ~(0xc0000000 >> (2 * ((n) & 15))) + + + /*--------------------------------------------------* + * 4 bit access * + *--------------------------------------------------*/ +/*! 4 bit access - get */ +#define GET_DATA_QBIT(pdata, n) \ + ((*((l_uint32 *)(pdata) + ((n) >> 3)) >> (4 * (7 - ((n) & 7)))) & 0xf) + +/*! 4 bit access - set value (0 ... 15) */ +#define SET_DATA_QBIT(pdata, n, val) \ + *((l_uint32 *)(pdata) + ((n) >> 3)) = \ + ((*((l_uint32 *)(pdata) + ((n) >> 3)) \ + & (~(0xf0000000 >> (4 * ((n) & 7))))) \ + | (((val) & 15) << (28 - 4 * ((n) & 7)))) + +/*! 4 bit access - clear */ +#define CLEAR_DATA_QBIT(pdata, n) \ + *((l_uint32 *)(pdata) + ((n) >> 3)) &= ~(0xf0000000 >> (4 * ((n) & 7))) + + + /*--------------------------------------------------* + * 8 bit access * + *--------------------------------------------------*/ +#ifdef L_BIG_ENDIAN +/*! 8 bit access - get */ +#define GET_DATA_BYTE(pdata, n) \ + (*((l_uint8 *)(pdata) + (n))) +#else /* L_LITTLE_ENDIAN */ +/*! 8 bit access - get */ +#define GET_DATA_BYTE(pdata, n) \ + (*(l_uint8 *)((l_uintptr_t)((l_uint8 *)(pdata) + (n)) ^ 3)) +#endif /* L_BIG_ENDIAN */ + +#ifdef L_BIG_ENDIAN +/*! 8 bit access - set value (0 ... 255) */ +#define SET_DATA_BYTE(pdata, n, val) \ + *((l_uint8 *)(pdata) + (n)) = (val) +#else /* L_LITTLE_ENDIAN */ +/*! 8 bit access - set value (0 ... 255) */ +#define SET_DATA_BYTE(pdata, n, val) \ + *(l_uint8 *)((l_uintptr_t)((l_uint8 *)(pdata) + (n)) ^ 3) = (val) +#endif /* L_BIG_ENDIAN */ + + + /*--------------------------------------------------* + * 16 bit access * + *--------------------------------------------------*/ +#ifdef L_BIG_ENDIAN +/*! 16 bit access - get */ +#define GET_DATA_TWO_BYTES(pdata, n) \ + (*((l_uint16 *)(pdata) + (n))) +#else /* L_LITTLE_ENDIAN */ +/*! 16 bit access - get */ +#define GET_DATA_TWO_BYTES(pdata, n) \ + (*(l_uint16 *)((l_uintptr_t)((l_uint16 *)(pdata) + (n)) ^ 2)) +#endif /* L_BIG_ENDIAN */ + +#ifdef L_BIG_ENDIAN +/*! 16 bit access - set value (0 ... 65535) */ +#define SET_DATA_TWO_BYTES(pdata, n, val) \ + *((l_uint16 *)(pdata) + (n)) = (val) +#else /* L_LITTLE_ENDIAN */ +/*! 16 bit access - set value (0 ... 65535) */ +#define SET_DATA_TWO_BYTES(pdata, n, val) \ + *(l_uint16 *)((l_uintptr_t)((l_uint16 *)(pdata) + (n)) ^ 2) = (val) +#endif /* L_BIG_ENDIAN */ + + + /*--------------------------------------------------* + * 32 bit access * + *--------------------------------------------------*/ +/*! 32 bit access - get */ +#define GET_DATA_FOUR_BYTES(pdata, n) \ + (*((l_uint32 *)(pdata) + (n))) + +/*! 32 bit access - set (0 ... 4294967295) */ +#define SET_DATA_FOUR_BYTES(pdata, n, val) \ + *((l_uint32 *)(pdata) + (n)) = (val) + + +#else + + /*=============================================================*/ + /* Slower: use function calls for all accessors */ + /*=============================================================*/ + +#define GET_DATA_BIT(pdata, n) l_getDataBit(pdata, n) +#define SET_DATA_BIT(pdata, n) l_setDataBit(pdata, n) +#define CLEAR_DATA_BIT(pdata, n) l_clearDataBit(pdata, n) +#define SET_DATA_BIT_VAL(pdata, n, val) l_setDataBitVal(pdata, n, val) + +#define GET_DATA_DIBIT(pdata, n) l_getDataDibit(pdata, n) +#define SET_DATA_DIBIT(pdata, n, val) l_setDataDibit(pdata, n, val) +#define CLEAR_DATA_DIBIT(pdata, n) l_clearDataDibit(pdata, n) + +#define GET_DATA_QBIT(pdata, n) l_getDataQbit(pdata, n) +#define SET_DATA_QBIT(pdata, n, val) l_setDataQbit(pdata, n, val) +#define CLEAR_DATA_QBIT(pdata, n) l_clearDataQbit(pdata, n) + +#define GET_DATA_BYTE(pdata, n) l_getDataByte(pdata, n) +#define SET_DATA_BYTE(pdata, n, val) l_setDataByte(pdata, n, val) + +#define GET_DATA_TWO_BYTES(pdata, n) l_getDataTwoBytes(pdata, n) +#define SET_DATA_TWO_BYTES(pdata, n, val) l_setDataTwoBytes(pdata, n, val) + +#define GET_DATA_FOUR_BYTES(pdata, n) l_getDataFourBytes(pdata, n) +#define SET_DATA_FOUR_BYTES(pdata, n, val) l_setDataFourBytes(pdata, n, val) + +#endif /* USE_INLINE_ACCESSORS */ + + +#endif /* LEPTONICA_ARRAY_ACCESS_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/bbuffer.h b/third_party/leptonica/uos/sw_64/include/leptonica/bbuffer.h new file mode 100644 index 00000000..945cbb0f --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/bbuffer.h @@ -0,0 +1,60 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_BBUFFER_H +#define LEPTONICA_BBUFFER_H + +/*! + * \file bbuffer.h + * + *
+ *      Expandable byte buffer for reading data in from memory and
+ *      writing data out to other memory.
+ *
+ *      This implements a queue of bytes, so data read in is put
+ *      on the "back" of the queue (i.e., the end of the byte array)
+ *      and data written out is taken from the "front" of the queue
+ *      (i.e., from an index marker "nwritten" that is initially set at
+ *      the beginning of the array.)  As usual with expandable
+ *      arrays, we keep the size of the allocated array and the
+ *      number of bytes that have been read into the array.
+ *
+ *      For implementation details, see bbuffer.c.
+ * 
+ */ + +/*! Expandable byte buffer for memory read/write operations */ +struct L_ByteBuffer +{ + l_int32 nalloc; /*!< size of allocated byte array */ + l_int32 n; /*!< number of bytes read into to the array */ + l_int32 nwritten; /*!< number of bytes written from the array */ + l_uint8 *array; /*!< byte array */ +}; +typedef struct L_ByteBuffer L_BBUFFER; + + +#endif /* LEPTONICA_BBUFFER_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/bilateral.h b/third_party/leptonica/uos/sw_64/include/leptonica/bilateral.h new file mode 100644 index 00000000..e5b5bbdd --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/bilateral.h @@ -0,0 +1,136 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_BILATERAL_H +#define LEPTONICA_BILATERAL_H + +/*! + * \file bilateral.h + * + *
+ *  Contains the following struct
+ *      struct L_Bilateral
+ *
+ *
+ *  For a tutorial introduction to bilateral filters, which apply a
+ *  gaussian blur to smooth parts of the image while preserving edges, see
+ *    http://people.csail.mit.edu/sparis/bf_course/slides/03_definition_bf.pdf
+ *
+ *  We give an implementation of a bilateral filtering algorithm given in:
+ *    "Real-Time O(1) Bilateral Filtering," by Yang, Tan and Ahuja, CVPR 2009
+ *  which is at:
+ *    http://vision.ai.uiuc.edu/~qyang6/publications/cvpr-09-qingxiong-yang.pdf
+ *  This is based on an earlier algorithm by Sylvain Paris and Frédo Durand:
+ *    http://people.csail.mit.edu/sparis/publi/2006/eccv/
+ *               Paris_06_Fast_Approximation.pdf
+ *
+ *  The kernel of the filter is a product of a spatial gaussian and a
+ *  monotonically decreasing function of the difference in intensity
+ *  between the source pixel and the neighboring pixel.  The intensity
+ *  part of the filter gives higher influence for pixels with intensities
+ *  that are near to the source pixel, and the spatial part of the
+ *  filter gives higher weight to pixels that are near the source pixel.
+ *  This combination smooths in relatively uniform regions, while
+ *  maintaining edges.
+ *
+ *  The advantage of the appoach of Yang et al is that it is separable,
+ *  so the computation time is linear in the gaussian filter size.
+ *  Furthermore, it is possible to do much of the computation as a reduced
+ *  scale, which gives a good approximation to the full resolution version
+ *  but greatly speeds it up.
+ *
+ *  The bilateral filtered value at x is:
+ *
+ *            sum[y in N(x)]: spatial(|y - x|) * range(|I(x) - I(y)|) * I(y)
+ *    I'(x) = --------------------------------------------------------------
+ *            sum[y in N(x)]: spatial(|y - x|) * range(|I(x) - I(y)|)
+ *
+ *  where I() is the input image, I'() is the filtered image, N(x) is the
+ *  set of pixels around x in the filter support, and spatial() and range()
+ *  are gaussian functions:
+ *          spatial(x) = exp(-x^2 / (2 * s_s^2))
+ *          range(x) = exp(-x^2 / (2 * s_r^2))
+ *  and s_s and s_r and the standard deviations of the two gaussians.
+ *
+ *  Yang et al use a separable approximation to this, by defining a set
+ *  of related but separable functions J(k,x), that we call Principal
+ *  Bilateral Components (PBC):
+ *
+ *             sum[y in N(x)]: spatial(|y - x|) * range(|k - I(y)|) * I(y)
+ *    J(k,x) = -----------------------------------------------------------
+ *             sum[y in N(x)]: spatial(|y - x|) * range(|k - I(y)|)
+ *
+ *  which are computed quickly for a set of n values k[p], p = 0 ... n-1.
+ *  Then each output pixel is found using a linear interpolation:
+ *
+ *    I'(x) = (1 - q) * J(k[p],x) + q * J(k[p+1],x)
+ *
+ *  where J(k[p],x) and J(k[p+1],x) are PBC for which
+ *    k[p] <= I(x) and k[p+1] >= I(x), and
+ *    q = (I(x) - k[p]) / (k[p+1] - k[p]).
+ *
+ *  We can also subsample I(x), create subsampled versions of J(k,x),
+ *  which are then interpolated between for I'(x).
+ *
+ *  We generate 'pixsc', by optionally downscaling the input image
+ *  (using area mapping by the factor 'reduction'), and then adding
+ *  a mirrored border to avoid boundary cases.  This is then used
+ *  to compute 'ncomps' PBCs.
+ *
+ *  The 'spatial_stdev' is also downscaled by 'reduction'.  The size
+ *  of the 'spatial' array is 4 * (reduced 'spatial_stdev') + 1.
+ *  The size of the 'range' array is 256.
+ * 
+ */ + + +/*------------------------------------------------------------------------* + * Bilateral filter * + *------------------------------------------------------------------------*/ + +/*! Bilateral filter */ +struct L_Bilateral +{ + struct Pix *pixs; /*!< clone of source pix */ + struct Pix *pixsc; /*!< downscaled pix with mirrored border */ + l_int32 reduction; /*!< 1, 2 or 4x for intermediates */ + l_float32 spatial_stdev; /*!< stdev of spatial gaussian */ + l_float32 range_stdev; /*!< stdev of range gaussian */ + l_float32 *spatial; /*!< 1D gaussian spatial kernel */ + l_float32 *range; /*!< one-sided gaussian range kernel */ + l_int32 minval; /*!< min value in 8 bpp pix */ + l_int32 maxval; /*!< max value in 8 bpp pix */ + l_int32 ncomps; /*!< number of intermediate results */ + l_int32 *nc; /*!< set of k values (size ncomps) */ + l_int32 *kindex; /*!< mapping from intensity to lower k */ + l_float32 *kfract; /*!< mapping from intensity to fract k */ + struct Pixa *pixac; /*!< intermediate result images (PBC) */ + l_uint32 ***lineset; /*!< lineptrs for pixac */ +}; +typedef struct L_Bilateral L_BILATERAL; + + +#endif /* LEPTONICA_BILATERAL_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/bmf.h b/third_party/leptonica/uos/sw_64/include/leptonica/bmf.h new file mode 100644 index 00000000..0fdd300f --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/bmf.h @@ -0,0 +1,63 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_BMF_H +#define LEPTONICA_BMF_H + +/*! + * \file bmf.h + * + * Simple data structure to hold bitmap fonts and related data + */ + + /*! Constants for deciding when text block is divided into paragraphs */ +enum { + SPLIT_ON_LEADING_WHITE = 1, /*!< tab or space at beginning of line */ + SPLIT_ON_BLANK_LINE = 2, /*!< newline with optional white space */ + SPLIT_ON_BOTH = 3 /*!< leading white space or newline */ +}; + + +/*! Data structure to hold bitmap fonts and related data */ +struct L_Bmf +{ + struct Pixa *pixa; /*!< pixa of bitmaps for 93 characters */ + l_int32 size; /*!< font size (in points at 300 ppi) */ + char *directory; /*!< directory containing font bitmaps */ + l_int32 baseline1; /*!< baseline offset for ascii 33 - 57 */ + l_int32 baseline2; /*!< baseline offset for ascii 58 - 91 */ + l_int32 baseline3; /*!< baseline offset for ascii 93 - 126 */ + l_int32 lineheight; /*!< max height of line of chars */ + l_int32 kernwidth; /*!< pixel dist between char bitmaps */ + l_int32 spacewidth; /*!< pixel dist between word bitmaps */ + l_int32 vertlinesep; /*!< extra vertical space between text lines */ + l_int32 *fonttab; /*!< table mapping ascii --> font index */ + l_int32 *baselinetab; /*!< table mapping ascii --> baseline offset */ + l_int32 *widthtab; /*!< table mapping ascii --> char width */ +}; +typedef struct L_Bmf L_BMF; + +#endif /* LEPTONICA_BMF_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/bmfdata.h b/third_party/leptonica/uos/sw_64/include/leptonica/bmfdata.h new file mode 100644 index 00000000..30e2b5ad --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/bmfdata.h @@ -0,0 +1,636 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +/*! + * \file bmfdata.h + * + *
+ *  This file contains data for constructing the bitmap fonts.
+ *
+ *  The fontdata string holds all 9 sets of bitmap fonts in a base64
+ *  encoding of a pixacomp representation of the tiff compressed images.
+ *  It was generated by prog/genfonts and pasted in.  This allows
+ *  the use of the bitmap fonts for image labelling without accessing
+ *  stored versions of either the tiff images for each set, or the pixa
+ *  of the 95 printable character images that was derived from the tiff image.
+ *
+ *  In use, to get the bmf for a specific font size, from the encoded
+ *  string in this file, call
+ *      bmfCreate(NULL, fontsize);
+ * 
+ */ + +#ifndef LEPTONICA_BMFDATA_H +#define LEPTONICA_BMFDATA_H + +#define NUM_FONTS 9 +static const char *inputfonts[] = {"chars-4.tif", "chars-6.tif", + "chars-8.tif", "chars-10.tif", + "chars-12.tif", "chars-14.tif", + "chars-16.tif", "chars-18.tif", + "chars-20.tif"}; +static const char *outputfonts[] = {"chars-4.pa", "chars-6.pa", + "chars-8.pa", "chars-10.pa", + "chars-12.pa", "chars-14.pa", + "chars-16.pa", "chars-18.pa", + "chars-20.pa"}; +static const l_int32 baselines[NUM_FONTS][3] = {{11, 12, 12}, {18, 18, 18}, + {24, 24, 24}, {30, 30, 30}, + {36, 36, 36}, {42, 42, 42}, + {48, 48, 48}, {54, 54, 54}, + {60, 60, 60}}; + +static const char fontdata_4[] = + "SUkqACYFAAAmoHICP///////////////////////kFcchgc45Bgc45AgcgxBY5DY5DY5Agcg" + "jkM45A8GocgxBA8M45BfCGgchhzOQxZBiNe/CDQRT6RQ+k4QV6BHcgvBBjCC+KoSjQI7wjj/" + "16I+EUPTpV0rI4LilVtAjjyPuR58jg3CRd6dJkcDMCj+v//qlVsMgQPVY6vugih9Lr/8RCF+" + "OqUUK6C/fHFV9RStf8MulG10fKcN6X+lXOBg+GexX71wxSPCf4/+kE0uR5zE0rtfCFg3oIp0" + "R+GF5DSmQaMS/oG1xen0X2wyh8WXwoI46VPt/kNYcf9J4h/pUHB///2H+t+lkCByDj/r9ZBX" + "H1BAtUr7u/IEOQanrS0eByO16tpVaSWtaEVsNiG66WrBgg05wM4bCYNWDCWIiDCER6HGhERE" + "RER3ZHBfXjaSQ7iOP/////////////////////////////////////////////////////+Q" + "JgK95DIDRZAjCDccgRMhn4g5yC9CD0IL+QxhuIfCCYQTC4IJhBiyLBB7J4QX4gvQgxxBehBi" + "yGDkPhdkEw1kPZY5cEHck5BIJOQc9aI+wjE7DL7RdsMu2GXoZehGDYaDCDQaDSCDQdIOGEEX" + "bDLzCLthl5ojzkeL0NMJhNNbVoJ6kclXuggyOGfugnw3vugv/0u+9IN7pBvdJ//brT3VtdLy" + "B4NxyGsOPRnv9R7xx3/9L+EU/3/f4jj/t+3TdDvkFZyC7hYdKkCCKHQI76SW/pD/6XCKdAin" + "29L9L6/9eEUOrD0kv8IIMNKkq/j/zD5h+P4r//99LfBKcDR9utK62NLxEIIhnmGGlpek3Lz/" + "jj5cv/ul7f+EvimH///0l6CENpfrHt/y9l7kr/4RT/f7f+PwRTkG7/tpav26XtrxoVI5/vSx" + "xsP/7ful7fdd1tv/7FRoj//DLgQZgQCFhlYlfv1kx9//28mPx/7ruu3/t9K3pEh/IKzkF3DL" + "g2BENDtBr9Jh4S12H/+3+17GwwltpbZBx0u0unr0v9IMjhrBYYpO0KZmDikMJsYTCDCeE2Gh" + "p6DTdiEE2KCdo8GcNj3pJsJofjiIiIiIiIiI4iIiIiIhhCIiIiIiIr1SMwyQbOkEiGQCvd4i" + "I//////////////////////////////////////////////////////+QVo7IEDkGwchpOQV" + "nIa0ENKCGhyC7kHchocgZschnHIMPtKk7oIP7ulv6f9Yj5DIDaH/3gjjr///+rI4aiIEXngg" + "RZBfCBEWQXsofKggu5DD5Y+Qw5UHghiCoIEYQw5VkCMIO5TkF7shhzOQxZ4IJZxy3IO5nIJZ" + "4IP//1iiPOGd0R+iPQgR3TQIIXZ3/S7BBnezui87MOiPbKHRHqftNNXvTTUjy/9JkcFjTpOk" + "9NsKmFTu+Etppw06VtMjhhO0OLCd3S+rSdIUvyDD+Iha8fQ//+K//3/+D/vbQRT7d9LsjhgI" + "7nH8Ivf/lw0bS/4RT////7f//pfq+lhr6/v/Yf/t//3/+D/sO2NNhpfiP66Xat8L/2//3S0r" + "XIMD/rvUEd9Isf/4Mp5wCDgYBlOzgO0fB3aem2mmnYTtipwCAZQ6DnAXDgynapwk20h/+IiI" + "iIy9ERxEREREZHDLiIiIiIjjj6kNWdP//qP/pMjhq8bSXwojsGkEwmliIiP/////////////" + "/////////////////////////wAQAQ4AAAEDAAEAAACSAwAAAQEDAAEAAAA2AgAAAgEDAAEA" + "AAABAAAAAwEDAAEAAAAEAAAABgEDAAEAAAABAAAAEQEEAAEAAAAIAAAAEgEDAAEAAAABAAAA" + "FQEDAAEAAAABAAAAFgEDAAEAAAA2AgAAFwEEAAEAAAAeBQAAGgEFAAEAAADUBQAAGwEFAAEA" + "AADcBQAAHAEDAAEAAAABAAAAKAEDAAEAAAACAAAAAAAAAAAAwBIAAAQAAADAEgAABAA="; + +static const char fontdata_6[] = + "SUkqAMoGAAAmoHVf///////////////////////////////IZAUfsgeBdyGdyDjkMgI+QPKC" + "GIO5AhzOgyGiCMcgYtUrIKHohowhschs4hnwgXcgRQhsgguQQXwhov6/QYQI7qgRUUk2QIfV" + "F5hQmmugqCMTCBHj/9F8j9JuknWm7rSbCBFPLtou2sjhlBSOKkE3Qf3+kv9fpcMQaXY9PTwR" + "T6WvpX/0v19aVbeQ0D6X7+v/X//QIQfj6xSS4QLS3xx69IVtL/EQy8CvbSqhq4I7//pJeVnT" + "Dr/+Niloufj9fpJLxalYrDtdr2DGk/etf6CDrkduzQkw21/w2prRfYZcNbj1+kQMQuL03hF5" + "sQRT+CEMMj7pAjuk/5DVDINfr+k9b06Stj+GXgW6pN9/kNsdL/XQg/+nSx/0v20vxSv0v/S3" + "/yDA/19sV/6WkQ0D5DY/6+lkDyf/SX9h65BRBDTdJ/StLILuk2lWkl399U2kw0Thpa0r7S0U" + "A7S20rSVtJL/iGrFMSPJv+qYoEaA+KBA4pikmKCWIiDVCINaQ0KiIiIiIoFhoRfSodbS1xbp" + "Id0hx8f///////////////////////////////////////////////////IHMFnMgTA0hyGQ" + "G45DLcg0jkQfyGQDNxBv5DLcg3QQ2EEHDIEaEHDIaDkMTJzIeZBJkEmTwh5kNmEPhB7ITCGi" + "ZDOghsmQ0IIbJhHUEMzPAh8jYOeIuRsEZFHCZEHBDhdoww1DLm0bOGXGwZccGXHCMDgwQMED" + "BAwQMEi4ZwQdAg2GEEbYYZc2EbYYZcwwjB5dmDgwQMIMJoNbQNqHuRxF6I7YQIN+6BBrDf+E" + "E//pf3oEG9tAg3vC9//126bQWlXh0gyODd+l7fXwv/0u1gio0m90m916x9uu60nXXyB4G7kN" + "tx6JwU9oEU/4944qP/pcEU8EU+37f7f4j/q6q2tpDXhYaShBBDer1XfJD5IdL/0vtf9L9L//" + "ergin9JukvIHk5BiAggw+kn1fSr///9L3r2/fS30of9r1exWqXp4QQYaWl9XH/a2vH+l9/t/" + "6X58mgN//r07dJe04QRDYGGGgvpVeXb/jj5gT8X7r7f+CX6CDD/bp6bXY/xEIIQw16Xq8N/y" + "5ZcvT/Lp/de3/j+2QMd/r/p0l6CDdf0h73//ZF7/w37r99/fuD/vVq9SP3S9hpd+lLj/6444" + "a/9v7r39L0tt/7Xq9b0vDDIbAwQQu2ElKHq/fr3f/2/dfb39/b/V6jjSb1Io/hhiEFbEECFK" + "r/euRR+//28ivxXt913XZBcf/jaevr8geTkCHDDCCIF3bEk9XpN6X7f/7f7+xtpbaW+l2l9K" + "3pfpqGGEErBhJfCTBk4wl+wf/7f9fsMJba7cMJbDSa9JvSX2sPCwxCQYQaFBikIQQwQMMYIG" + "CBggeCBsNCgg3CBhBuGKBA2KBA24hAgbFdOlYIGh+NCIiIiIiIiI4iIiIhxEGCERERERER9L" + "GHfVBF0Tgtg0dSBoDTYk+h40PiP/////////////////////////////////////////////" + "//////5A887IHkOQbLIE8EFaCGvBBmsgosgaDcg3HIbHwaIbIvVVIZTkGHVUtv9IOHRHBU+D" + "g5DJBx//QRTr69fr/+3X+I+v/pa//v/9N0Q2XnshsshsjIaMyGjMhlOQIHycZAhyDUOQy+IZ" + "xzWQUWUOQYc7kGMyGdyTkH41kH4scnZB4JwQxhrIYp/64hF56DCLzBF4aLzQNF8+DyuCguuF" + "Kw/ApXIvMFTCI7FhU0XmgYUL/ap0tow3/6TdN2XCTpB0rVJqJHmHD6BYbNhoDEjzSbDDLhJo" + "NnHSdQ4cMJoMJQ0DpBphVC//x9v/ScMEkwqf9Lpp6dJum18cQwX3V9XXWv/pN9OkKX/9f6X1" + "1/TpdX+6umrDdRSS2yBGFv4iQZu/9D//4r//f/58CP3XI/p7pL9F9peEYv/zAF8NL/hFP///" + "/t/utrrutN6SQYr0F//7Ff+3////g3/11dJ+l+I/+ld7ey4KP+3//fpX5DOOD/3sb8j+6X/9" + "en1+v/b//dLr//Vuo0rY0ib//aphKGYdtAinbLfROC//Yf/8NKGEmwvaUOwvtK3SX/7DPcUG" + "NjhsUEHhBwwg8JuEGEGEHDCDhhiopiCKcIOKeJHTd8JNuh/+IiIiIsubERxEREREZcNKIiIi" + "IiNDj+En/X/IbQdf/+Cj/9Npd6SXq3WLDSrwSEdigkEGCDrEREf/////////////////////" + "///////4AIAIAA4AAAEDAAEAAABBBAAAAQEDAAEAAAA6AgAAAgEDAAEAAAABAAAAAwEDAAEA" + "AAAEAAAABgEDAAEAAAABAAAAEQEEAAEAAAAIAAAAEgEDAAEAAAABAAAAFQEDAAEAAAABAAAA" + "FgEDAAEAAAA6AgAAFwEEAAEAAADBBgAAGgEFAAEAAAB4BwAAGwEFAAEAAACABwAAHAEDAAEA" + "AAABAAAAKAEDAAEAAAACAAAAAAAAAAAAwBIAAAQAAADAEgAABAA="; + +static const char fontdata_8[] = + "SUkqALIIAAAmoHcGf/////////////////////////////////kMgMsfUgeDaOQLjkHHIZAN" + "T5A8K5AiDQQ0OW7kMqCEHIZthNJkcMwuGQG8g34gYcgo8go4hmwQIDIGIIL1EGOIKO1/wRmG" + "cvBqEX3S3dBGJhUwmlQSpGINF2/9cIxkfa9U+k2Q2OlpNgqaNzWwgWk2k33Veluk2q6STadJ" + "U2jHlzcJtZcGlS4RJOt9f9f9L62GMw+vC0np5HXS/0n/6Vf9dapwxpdj7rr6Wl/f//v9dJLa" + "kG76X/XXpf//v/j62kl4I2i4ZVd8caX8UrS/xEgvV7aVMUP19f615+S7/6BmGXBh70tK21ev" + "60lxefkmGla/8WxVZM9Y31/RDYOEl5uappMV/1sGKhNfYX/1EOuEHiR57DbXfUMOieIxwZgN" + "vjpfrI7a9XQdJF9sSOv+QL+qLzSt//9IW6x6tUg21+Q2qpHnS3Tf5BtTkNSi/06710rYpeDM" + "MuBi6pNq3+QZX6/S0J8DHdUn8f+v3S/Fb9L/63r8hnH9f26/rS0sgXj9fXpV+vuP9X9Igofy" + "DD1el6WQPCR/pL+w7XIZUEGx660nS3V0vSrv/qm0m2UBr61T7S0dAd13XSTdBL+r0l6YYX+t" + "JtK1hhK7CTDCSthJLpeIpIMUGJHaf9rYohsQsQiBhDEIMQtiECCxESCjKESKPdDQqIiIiIig" + "sGhF1Wh16pfbSSrFtKh3odkcHWI/////////////////////////////////////////////" + "////5A7AyfkDqG265DJBRxDKmQanIZWpDKDIOnIaBhB05BQGQwgkcgiCCIIIglxBEEG/kGPI" + "J5DzIN6EG+pDKoQ2akDFCGBBBDkdCCUI5kE8iuRfIPxCwCZBHIYGMFhMI2w8M42COFBnCDIN" + "7JWQz2SsEcKQzwDBENEENkENkQRDRANwQNgwQRthhnDYRthgzZhhGG5cjZQYIGXDOCBhNYYW" + "k2rMBNcu2ECBhptBtAgdoGHQPQdFwTv+l6T4QIGG0Gwi4UOg2gg0777dNXg2gg9Qq+m0g37p" + "eG/8Jf/pd96Cb7Sb9f//1pvbS0vV0rT9L3/0v/0vWCKjV91fdJ//dK/0n1Xx6eXX0vvHGv/0" + "uXTkde9Jv0m//6+/T20rSevIZCggrxpErPFpX+O36j/6C/X2//7/Ecf95dUnSdIUvCsNLCCC" + "I6vvpL+RR8ij//pe3++lfpev+2l1ffdJeQPCOQ0OEEw9Un6+q3/0v/S/S9v/S/q//tfYp1S9" + "NMIIMNKkq1uwS////0vb/b9+t9KZg0fdL3Wm0v/CCDBpdfvF/wwsMLx/pfpff+Evz+ygMr9+" + "ldPdJe00EEQbpww0tV0rmDf8cfNhfxD9/2/8/foEw//f/Y0vEQQQgw6+l3wb/mB5gfoP8wn9" + "pe/+P4bBv90vfvS9Ag2l10lff++//7fv+3/3+Qau/vtK0kXTaX6bq9ePe9L/shZ/+39pfff/" + "th/3S9/+vhhL/SkcJ//HHBr/2/f9v0vS23/vdL0m9LwwwgmRwb20R1SW/f/d//b+0vff2/b/" + "3r70m9LwwyDdOEENsHpHH3+9LIUfv/9vIUff9vuvryGcf9dY2KX1IUfwYMQgnFik0r1b0v2/" + "/2++K+9tLbXbuu+Oum9L8geEchogMMEEQzXbFBb9N6Wvf/7f7+xvX1t6+k0+k/X6ahhhAk2G" + "kt6TZDj4S/b//b0v92GEttLb0tgwvTS3pL/QbQWGDBL7CQYMFTCVhbDBrffbaYW2r3YYSthh" + "K7gwguKr0m9Jfaw8JoMQgQYIMIQgxCQhAhkHQGIRBhBI5BEZBhAYaGCB4IGQSmGIRBugMQiG" + "hDDiiCg4YT+EoZDOhD8aERERERERERxERERDiIMIRERERERH1xb+qQfpJBF2UAZhn9EDUFTK" + "B7xoQYSB7Qjj/////////////////////////////////////////////////kDxf7IHgQOQ" + "VbIH1kCSyCrZA8cEMyCBqHcgYcgYfIHh7IF4TChVCkM1yGhwoVe+loHBwi8gdNMOHS2/tL6H" + "/yGSCkP/6BFOvrtNeE//Sv9cR+v/p1////W6////p1zZkNnZAv2bCDcchsHyLGQ2DmwnZAuO" + "bCBfiBcc3EGochoHNBAjsg3HIQcguOSHLHLHIJMm5LiC7kMocmOWOWOQXciv/62JDZPQZBv5" + "DYhF5z4Zy8yr0yDGEGM1yDGJoMgxyYRiDIEYmQboIYxNF2HPg8lkaH6hMjhDjQ//p0Xb0XmE" + "YmEYcJNhNJj0Xn+gtUXqL3ReaQbVF5ou1qk4TVQwgYQYWDCDoIMIMKXH/9bSbig6CDoIOlyO" + "jAbFVthw+gsG4qwbbSsGKDYQQcMSPJRSBwd6dPbSfpL/6f6tdXqx1YVf6XTCevem168GYDR9" + "fSutLS/9WxeuqrV/9/wl/7pXXXQ/91p7pXjSW5DRhFH+sLuor///6C//33X4P91bl1pjdJKt" + "hovBr4iQPKn/x/X/F////7NAz/v0tavW9aYaXhG3/+YDM2l/zCf///+3+9e3TvSTeglDFegv" + "//bS/9v//+vw3/q3Wt6pf0PpfV3+xX/t//3635DNv9utb0R9t1X4/+vreyOGZ/2//+uvyGx3" + "/16elvVIjH//Xp3/X/2//3X3//WKjjSeNb/+10rtWyMfX/2//7q0rX6u1d2kraSr/3RdYaTD" + "LdsIv2GvJAZ/+w//2GErCCbCLr2EoNiR161b0l/9g0HI6FBimKg2KCB2CBwwQPBA2wQMEDBA" + "4MEDhhiFFBisETgwITTCg2vCTDaQ//ERERERZg2IjiIiIiIzAa8REREREccfwgg/9f6X+v+Q" + "ZK///0x/+m0sF0q9W0sW6XyGSGkOkI7YSr4rYhAkEGCDrFhCI4//////////////////////" + "///////////8AEAEDgAAAQMAAQAAAP8EAAABAQMAAQAAAFUCAAACAQMAAQAAAAEAAAADAQMA" + "AQAAAAQAAAAGAQMAAQAAAAEAAAARAQQAAQAAAAgAAAASAQMAAQAAAAEAAAAVAQMAAQAAAAEA" + "AAAWAQMAAQAAAFUCAAAXAQQAAQAAAKoIAAAaAQUAAQAAAGAJAAAbAQUAAQAAAGgJAAAcAQMA" + "AQAAAAEAAAAoAQMAAQAAAAIAAAAAAAAAAADAEgAABAAAAMASAAAEAA=="; + +static const char fontdata_10[] = + "SUkqAGwKAAAmoFQGz///////////////////////////5DIBocgZg0PkDwy3JvkFdyB4Qchl" + "DkGB7yB5OnZBQ5J8hmckQ0rBNUyDSOkQWnIZXkMqZBrghs0INDkM/kdkDfsLqqhGYKDEHp0k" + "G0HkFEwoQaaqCcWQzzCMMPXfwg0m0gi89KyCgekkYmCpppYQKgjc0m//0Yy8/16VtP0EGwqN" + "to22ugtBBtJv2vpLdJtJJ1SbTpJKwjnoOgg2swGmFLgiStb3+lXf/69v1bYLpuuR1pLVX//X" + "r/S60mwYorKXH/dfS69J/2vX/9UvYyGU699PXXpa/3//4+l1S2EcXqvXHX1qr/8RIMCP17SS" + "pwggnqvj1XpClpf1+3SWlS2l/v6S+btbr/IKbknv62KH2Fel/VJeEGlTDS/1W9tJKiGL8f/1" + "Sri83qxVr/sQ2K1JBpXel/RAuOFXm29On//YMUk/dhf+qEOuEHQtWG2v+w9GEwZuXj1/Uuw1" + "6bnzaSDtF1/wbSI+Sdx/X9IQ6WPCb0YbYr38MvvCMTVv8gqlyGsR/pX/ukkHaS8gqiMOkk2l" + "f/pfpOlvXSTYa/9/b2/yBO9f9cTQMzuu4/RBSgnHpJe2l+KX6Wv6ST1j//7f/2lpdf/pfkM8" + "el+xVr0/pEMofIZV16+v//9tda/pdZAh1vS+sge4/0kv3fyGbBBVeutK126dLtJLuq+ttJuH" + "+FTV/SOR19dJPSWqr6SX2gyx+ur7S0LbS20n/oJf8PS20mwjeNtf0noINYMJBBwwk2kk2kEF" + "texFJBiExCYXXTWwwkCBrEIEDimGEErDCQILERBgsQwgafFRSDEIRDCEMIMUIYhQWQyAaHER" + "bSrERER/0q90tfukqxbWh3odtLbSxH//////////////////////////////////////////" + "////yBTDMpkFsFhyB4YOQyAboILYFByB4hyB4vkMgCIK4iOQsFWQ07IZxyBEeQyQ1PINNLIZ" + "icEDIMeWcgoBkFy4IGQIIIoZByCDhkHIInkMEEDFCGyhBJkFzggyDcYCDINxgQMgwoIIGRDk" + "EIIp0O0MhjrIPyZDCj0GCD4aOEHEN3CPDDaDTQaapp6bwjxByc2EeIOTmGEcbw1TTT7ppJ1U" + "4B46aPGGmQabJeECIJZDPZEmDNhIM2JQIHBggwQMEDBAwSBAwQNo4DdkCHQIGyCiw2gQNkFF" + "htBB5cZwWGCIMOGCBhBglBggdBA6U2Ca5c2EbDvwbSayCZh8Ogg+/6C329JvbSb3SD777/q3" + "TdQq9INoIN/oL2/9J//S7W9IN9pBvv//tJ720m0tL/SbT3X2/9L/9L+XXSvdK90v//1p0nrS" + "+npuXX0vb66X/9Ll0176b/b///eu++1/yGQxyBwOOk63+++ONV/6X8uu3r+l/iOP2t6uk9Cl" + "4WHqR8e7r6SH/Uf/S+19v3/f/96dGF7q0kvCw0qCBAn6vpff//pe9e39/3pX/a9XTaTql5A9" + "wQ2QEmHWgmKer6X8iPkR1/9L7X30vSS///991bpL1TCCDBpKv76Vb/9f+l719+/W+lD/erXW" + "K0v7wggw0qS9K4YIL////QX3+3/pfpMoBq/a9XTTapfWCCIFy4MNL694g/44+P9fdL2/8Jfn" + "mzoGZ96dX+6S92ggsMNLS9bmyD///i/v9v/P/6BMP+/r22KS8RCBCGGl+teDf84POD82DH79" + "1//5HDL+Gw3+6/a/XhBBhpddK+/9PT//N7/r2/8b9yGpT/q1ek2l9BBuvS6vu9f+yDuRj/+3" + "9r7ff/2D/2r16MLpfT9+kh7/X/xf/t+9e39fW2/71q2qV6XsML+qV//jjkCM/9h/a+36+u2/" + "/9dU3peGDCCbdtalw/2/93/9v3r/f2/b/20r71frwwyGWXBBVbaL8JK/+l9//t/a+33X1//7" + "G+levhh4QIXYqKNFX7fWQR9v/9vIO+9e3uu2ltkND/rHUaTekQw/hhiEE2IpK+l6///7elx+" + "33X+313TXX6X5A9uQUQGGEEQa4tKr9vS/b//b/a9jbS20tvX16dJvS/TChgwgk2Gkr6TDILj" + "4S/Yf/7f/+2ltpfdbaX6Tfr90GwgtsJd4JNhcEtLb//b/r3YaWw0tu0uDBJp9fSX/B4WGeNB" + "NNCEGZkghCCGEGGZlCDCDCDwg2GhhN0GE3YYJBBsMEEEGw4YJBBsV00kw0Gh+1QeE0xCCDBB" + "hBMQkCChBsQggwQYQeEG2FBA8IGCBuGIQQYYoINuIQINr8JWCBr4qIiDCERBhCIgygDw1IiI" + "tCLhghBghEGEIMJrxER+hEaERDiIiPpaB/0g/SIGwCcdJFzOgGgr6jEGvGgamgH2EL4j////" + "//////////////////////////////////////////+QP6EDob+QPBoHIElkDw9kCyyBJBA8" + "F7INVkDYDEZDLjyGVCZBXmCqQZPIaUENEAoKlt5A8sTSfV00/S2/6BwdF3D+Dg//pr6Q/+QW" + "wbj//MKvrtNeC/9JN1/iP//+vr//+k3////9r///+k9ZeECzPy+IZY5BuP5AuOXhHhDKHL4g" + "tOXxBowscg3HLjIGByHHIG9CMci+Qzv/+3BEMyeEGQMUCGQLzyBimgwUgRmRewVNBgqDIZXg" + "qYQsFTIEUyGzAUgucuippgmRLIOcuhDFX/pYhPTChGHCNzROBBuKAXpgoLoLBU0wVMIwwwVN" + "Fzgqow2icEgoYIGCDBYMK0EGEDClxP/7YRtvl20YOgg6CDYVBNaMXfQXovNGK6MUIJt0XbCT" + "WqCDhX336B6apJL/0ug3bpB0nSsGbDZZsNghBsHB9BYNhiE2GIQbSbBsNoJwYkergzYN4P1p" + "9pXXX/q3vTaWrr6V1/pf9at02vTX/t7fTaT+l/9Y/rr0370/6XTT0/fr44/6WnuukKpdkFFk" + "K/pN+9DWv//6C//S/rq/7+XVJum9Kt0DXxEF9V///9f/991+ZgY+6Tf8VrQSww0YwaXkDwOE" + "f/H3X/H////sH/+k2k1dJN6SQYrwjj//Ng1dL/m0////9h/t1/tvpN6SQa9Av//ev/b////w" + "3/rpN6ekrelQ+v//sMJf+3///X4N/3t+lt6X4+l6V33hiF/7f/9+t+D/ulr6L70q////+XBp" + "/7f//XX5BQO/9/TdJNvpER//16d1fS/9v/919//1emONK71r//0rtb1/9h//3Wla/XrHWrxS" + "S//YRdbpsijtourZFfT/9v/9+0E2vrZ3hourW0k26X/7aWgwgmGFYaVsMJJzWBDtPTYaaYTt" + "O20oaTYRhUGnUUxV76V0kF/9ioOXQpigxUNiggbYQOGEDwg3CBggwg4MIHDYaCimIWEHDCCa" + "ah9OrDeP/2ENBoNMIQwhbERxkcMgYqbQTCxDEJpoX8RocfxEREUYE4jiOIiIj/2En/r/IG5d" + "J/1/////H69JtLIH9NJf3S6uq9ISh0CxdL8gt46iO2kl6FbYSCQIMIHWGISCTCbWIiI/////" + "/////////////////////////wAQAQ4AAAEDAAEAAACoBQAAAQEDAAEAAABCAgAAAgEDAAEA" + "AAABAAAAAwEDAAEAAAAEAAAABgEDAAEAAAABAAAAEQEEAAEAAAAIAAAAEgEDAAEAAAABAAAA" + "FQEDAAEAAAABAAAAFgEDAAEAAABCAgAAFwEEAAEAAABkCgAAGgEFAAEAAAAaCwAAGwEFAAEA" + "AAAiCwAAHAEDAAEAAAABAAAAKAEDAAEAAAACAAAAAAAAAAAAwBIAAAQAAADAEgAABAA="; + +static const char fontdata_12[] = + "SUkqAFAMAAAmoFsNP/////////////////////////////////kMgNpyBoLGQPBocjfIEkED" + "wU3ILjrkDxwmnkGmKIa+ENfFshpj0Qy5kNIcg0UIHhxyCjCLhDSHIa9kG8yGZPCqpAvBK4YR" + "oCU0km4PTChBkMqgJxhMhnCBBhB6u/QIoBubbpPSb0gjbYKmEH4S0bNo43/rhBpNqjHpKyBh" + "/SDYVNNLCBUkG0EG//0Yi7fdJOqt3S02CzjaPNroLSdJv6qtLDS2qT1TaaVLo5UEDwQb5gGx" + "TAYXdf/ql9PS+t3rVwurp0XXS6SdW+v9f9fpJwxRcUrj7/9JUv/7v1X/Wkl2DGv9aTpel16X" + "v66/6/pbkMyK79/S+tf2///H6tJLbBHv6/4/66Vpf4iQYUfqulXhAioHSrx6S9If//9uq0kk" + "tL/f0v9K0v/v62KHbq9f60vNNdhpX+QJ4JXe6pV7X1+qSXhB0kw0tf6Ye2l0RNFxb1/oEF8W" + "pf0xC/14gwxCSTXv6/yBiiXON4Qattr/sGOmtcL/0oNeEDappMO1+thpIxyIRuOl+kjDdcJ4" + "lzemwwjC/4byL6TbNgp//6ENpY3CDpBG5sV/qQaCEgjc0rfyDKTIbWiX6T+9WqCDbVbkGRRL" + "t6Tav/1/pWl9PShsNL14dJK6b/1X9LXLHf1Scf//bVv8gtRVfpPEX71vXRAnslG6SX2l+K39" + "a/qlrjX/+3/1paX/pb1+Qbj+l+2la/+lkM26/9L1T/+26/Sf1IZg9f6X//0l+xT1/6VrkNDp" + "N0vSWQPOOvX+2/yGlBBkdetLr/WrVLTX+km0m2H+Cp1a6RB3b+0n1eku/9L+0DLHtLpNXrQu" + "0t6tKrUJfXD0knpgwQt/+rSTW0EnYSbpW0kF/weEtsJMTcF/Tqw0iBepYYSIZurDCTDCSsMJ" + "BLa1DEQkgxCYQa0taoMV8QriExVMQiCjsREGFiGEGm8aHaEQYQsIMIQwoWQyA2nER6pIRERH" + "3Vf26pf0kq9v1xbSSHdKFtpDt11WI///////////////////////////////////////////" + "/kC0GD5AzAxBA8DCCGQCoQQMw0yCB4EEEDwYoQyA1YNxDuQ8Hwg2YQ24vIZILHkNQ+QaS4IG" + "QzqyGWkILkwQMhs1ITUg+pB9SD6kJQhjUhmHIGDkMUIZyAgyBgGEGQMBAgZDPQhaEEqIQggm" + "hCoQ1QyBFqQX5MgwGQl1hBgg7hhHyBw/CPkD///vCPEHDCPEHDRxhx/r+CeE6i5wDwxTCPkG" + "pDSmT9GwSQ0TIzkMuZF8homR+EcB2Q2eQI8g38g38g3+cBQfDUaPgoZDZYQIGGQMTJTCBAwy" + "BiZKaBA+QI4hnsGfAgEDBWQe00CbWvRttGwR7CDYQQdhEE9hA0wgaQQdpppppBNPTtIINsIN" + "oINsINpPLhDgmmnaaVyGzkgepgCPwg2EEGHe2k+GHvuk//pdrek3uk3uk//6/t02lSX7aTa+" + "l4f/Sf/0v70m9tJvbX/967SbV60vS0nvdL2/9Kv/S9b0n9J//3+9td0m0tL90m5dfX2/9L/9" + "Ll0+XT9vfb3Sr/3S/ur9J8erX9L7xxX/9L+XXb1/X/f6/+6dJ0q/IZAdyBY+pCQ9X+O/0P/o" + "L7X36v6v8Rx+/RhVbW0hS8LD6BBny1fpL/X/0vevb1f1f/90r/un0vCw0lRyddXr9//+l9r9" + "/f96V/3ule6TaSXkDzggogJMHVIJjdX6/yFfIV//0vf9vS9JL//dL3Suuv00wggw1Vf7wku/" + "+l/6X2l7f//pQ//691bVL1sEEGGlpVpeEFX///6Xv+/vpb6TB/36t7FaSX+EEDDqkv3iv//h" + "hf0vtL2/9L8IKdQ0/uk39U3SXvhBEMomGGgv+rg/44+P9ff+/8JfnOynBp/f1q+qXtMIIFhh" + "paXq84Qf//8X9pe3/nP/BBv961b7Yr8RCCww0vSXvITv58efH5wNH79/2/9hfuG/9ev3S8II" + "QwaX9Je3/CDwg//zif2l7/4/tkNQP9vbXpPS8IINpdfvvf///7fv+339/kNqf+l7a20l8IN1" + "fpJX36/9kGCP/Df6Xt//7Yf+/r0Y//v+lx7/X/3/7f3/fpeltv+9at0lel8MEt/ST9/33chs" + "//2/evb39/b/9f1pvS8MMIJvbRHWpgMfv8cbD/+39r79/f7/t02l6vpeGGQaSYQT3YXX/9L/" + "/9v3r2/r62//X29K9Lww8IIXYrCR4Sv2/9v/9h5Bgftfb3XbXbINx/1/rpX8gw/hg8IKwwmI" + "S76V6WQXf//29divvuvrbuu9uo46vS/DDEIJsWkkr9vS12//2//29tLbrtV+o3dJvS/IHnBA" + "vYMMEEQ04bFLfpvS62//2/39jettLfrdWqpX0v0woYYQSbaS3pNkM4+l+3/+3/Xu2l2lt69p" + "fpXr+tBhhArbCVPhJhhcJft//t67+7DS20tu62GvT030v+G0FsMJLagkygWmRaYLsNdf21BV" + "q12GEsMMJd2EtgwSafX0gv9B4WGfMIEUAgNCgxSEIhlkyC+oZoOQY0IXQhjXIZ9GDQyGEOCI" + "YYKAIsGCRAvoydogX0YcGEiGXoxX0CTBkC+iH7Sh4TQYhJqgQYSBLhiCu/t1vTtwxCsMQrbY" + "hWwunSbv8aERDCERBghEQZIA8GWIiNCLhghBghEGCEGF+IiP0IjQiJA8C+CIiK64QP6pB+kk" + "gf+i4zUBoDN0iBKb0INfCigak4HhI0QMw1IvYQjj////////////////////////////////" + "////////////kD9BA6hrjkM2CGYP5DIDUggeBiyB9hBYsgeGVBDVggbQ2ZiVHkGiCB4rkDfy" + "B4bJqQN5kNdyCiCBEyDVNBbeQPHyqqqqaf/e6aRBYsgeBfEXcgUYnZDRZDUtLb/90hf//9NL" + "1/8gtgsP/8xtfS2mvBf/X/8R//6ptfX+v/Xr///+m1////V////9K0iGb/kMz8g0fkD4fyB4" + "ZxyG3MhmjkDwUp5DMHIYHIHgTj//uwQTycyDTMhl0wnhPLmQy4BcheyBeC5kfgpcwQYKXMg1" + "0M5DZBPAg8FBSBBBM5DCCK5EoQx5C4QcgmcguI/9KxT0wQYQ0bmiQGgwyGBFMhsmQInpZDPN" + "NBkNk00cYZAiaDCGQXmFRttEgHkWbuune7//7hGDeEGEbOEbOEEGwqQfT10C9NNU0EG1QYRs" + "uqQcL4YIGCBgkyFsG0CDBAwUwFX/pXQfRt0EGggg6V6TWjDZBRZDZmlkFFow2jDkFGIw2k5D" + "RiMG0EGiGy1p1Bwd6fp0n6S/+n24hBtXSDpNgzYF84CgQg3voLiEGIQbYhBtJtiEDaTxLuuQ" + "0W76991paX/rdPCdLp/0un/S6rp+6dLhP//WtNq36//TY+366X71/pdNPWr02vjtft72rpdV" + "SXZAxhBx/X66f9v/f8Jf+9X/1Y/62i602lqKXug0/pv9RS1///QX/6/pfD/br3WKbpJBbaDS" + "8RIHgYPv/DC//+v//7/ygDH/dbprVIJYbRuBhLwRmv/x9pf8X//v/7B/6V17vShh4QVBj8I8" + "f/4L6/5tP////Yf7fq2vfTeqQa9Av/5wNS2l/7f///+G/9J66vVK9KgYXpf/+w0v/b///r8G" + "/2+9+26Sf8fX6u/2K/9v/+/W/Iav/6WlaSL71S/H69f7wwv/b//66/D///pb0v//16vouGp/" + "2//3X/yGU7+rdOrGrd9EKP/+vttr6/+3//daTf/36xVJNukkv/66Xe3pf+3///Wv16sfpXGl" + "//aLraTbYRhYZCPp/+3/+2laTYX1u0XWmnV9L/+wl3CbIjsMJbDCXIwG//Yf/7aVoKGEbXus" + "zthLfqm2kl/9iFMwXBhJhhJiFMwzjIMEWQYRBkMEZBghhkEIIYIMRMwwDg2GlDCTELIMaQwS" + "ioqZgY7glB6H/7XL4pimlYVtp3fbV3dp2xCimF6EJ2uq92v/2hoMIMINCGEIbERxDBCIiIhh" + "TeEGsQwmgwhd6EccfsREREIwE4jiOIiIjX+Egf//1f9f8gVq6/6////S1H0vSb8gfo0v90vu" + "v0m4WLrXkFsGsdRHtJL7S2GCCr4rDFEDwUYQyQ0yCCqGlhgqXaxERH//////////////////" + "/////////////////////4AIAIAOAAABAwABAAAAYwYAAAEBAwABAAAAeAIAAAIBAwABAAAA" + "AQAAAAMBAwABAAAABAAAAAYBAwABAAAAAQAAABEBBAABAAAACAAAABIBAwABAAAAAQAAABUB" + "AwABAAAAAQAAABYBAwABAAAAeAIAABcBBAABAAAASAwAABoBBQABAAAA/gwAABsBBQABAAAA" + "Bg0AABwBAwABAAAAAQAAACgBAwABAAAAAgAAAAAAAAAAAMASAAAEAAAAwBIAAAQA"; + +static const char fontdata_14[] = + "SUkqAKINAAAmoCAz/////////////////////////yGQBw/kMgGYcgw5DJBpvIHg1wR3kCuC" + "B4NFhbrIHiwnZAxZFjIafUQ2+BJJshrRkGnyGtBBqmQ05kNqyBcQQ1YINyZBRMhpfhf1CMwz" + "S5hqg9W4aggwoIGCDCWC4QYIPXrwR1BQm6Wkm6pGzYKmn2EFQRsgwjhB/9UjeXg0m1RifVkM" + "t1VBNhUGE1pAtBBtBN//hBYdboJOkk2nVJNgj3R4s8b8JUk6TftfpYfdafV09VbQXCDcEHWX" + "BWCmAIraTf/9eldL0ld1VcLp6bRddKkqff91Vf9fXbDeqtwum0v9L11v/+v+uqSwxR+rx/3S" + "9LS+vfqtf9da7DHr+/pel/79f1/9dKr5Boha9Lr/9L1/a/8fXSqsI/ev/HS9Kkrrv/IZ0n9V" + "aSXYIEU467ePX6j2v+I/tqulSulfX+qX0ldf/e9U6Q9wr1X6pfJ+u2l/kFqyO/tJYr2vr/qv" + "BA9JhpX/XeG0qqtq9f1SS9NIl3DS1/pg8MQlyJWuP/9JfF4QaTFN//EMaVd36/SIZrhNLnCe" + "EGob1/2U4bUJ/cLX/iDXQQb06Ydr0uw6RvZCaePX6V106EwdK2GF38NqQnJOzgE/1/SkcbS2" + "nhBtQjc2JfX6kGrSgjDDW3/r+hDfi3CekEG2v62XmoQTdN/kDgCIKtS/pOl+2qQba/IHCTD0" + "rat//X6Ta/XSuGEl/htaur/0v9et91SbH/+l1evIH0a/pOhJAaf0t/ogtWRY3Wm9v/GutLX/" + "S0sdfpfbS/X9L/0t/r9L9v/pv63r19L8gXH//tL9ddKiDVn9fX19JfbFPXXWkQan+npekv//" + "99df0tLIbHW+vXIHjj11S6bf8hrWQJHp/Sb/rVfS01/rddu/BUH2lpaW2k9JNpJa63pJX3D6" + "6TX9IoZddrf+gvrvS3psIMk7/9N1odpbpOkraQS/70km0mGEcxWvWrpJqwwknDCCbSStJL+o" + "PCW2EmKDXWtUwwkQy06xCINQyKYaWGGEECC2vDEQkgxBMINN/TSsV9bCYhJMUCBYiJBppiGC" + "DC0hxoMIRBghYIMIQwULIZAHDiIvpKIiIj91X7qtfdUvuklXtrS4t0o+lC20h263SxH/////" + "////////////////////////////////////yBlyPyBmCy5A8NUMhkrQgaA6CB4NKCB4ZhyG" + "QBxZCDkHcg8EUcg3cgr35BbB5kGw6kNRQQ1QZAgwQaBogwBkGgGQ0VkPWQxWQxWQxWQShBes" + "g0oINBBDCCDcMhmJyGWrIaichmKwQMhoEyD1kEDIPUQQiPjIMTIaOIL0IKMIEDc8B4WCBggd" + "sMIMMgYZkOCDDQYQaDCDShoNwg7QQMMGEDYYQeGE0GEGg0mGk1uutMIPBnthGYRAzwIGQaMO" + "nIKMPWEZhiQL8DBEMrgYIhldOBlngbcEDZDKgIzEYM8EYRmIyGbhCURwJwZ4C5gFAIGEGCwY" + "QNoEHSr7CMxA03ISYQIgxjkGJ5BiMgvCBB6apqkqtK9AgYbg2gQMPBsIINTAU8FT70/T0G1m" + "A2L5gbRwF34dBB8N/4QT/+gv70E3toJveuv/XT20m6pfSDhBBhp7aT4b/pBV/6Xa3oIN7oIN" + "7aT/+3X7aTpaX02k/ul7f+k//pf+k/aT+v1/+qT1daX/TaML6Xt/6X/6XMJowswnre63vX/7" + "ave2rpaXi6Tffpff///hL/9vSb9Jv1//6/0m168hkA3H0np/r3xxS//S9tL2/f9/xHH/tGF2" + "6ehXwpA/foh7bW/Ue/Uf/S//b0r9K//20vtK0rSS8LDpIEzZ19Vv9f+l9pf+/7//+9e6vpeF" + "hrhHmR/at6r/r/6Xv+3r9L9X+2lq3t1aSXkDyggYgJMHSSCjf+vvIO+Qd//0v0vb6/q9f/79" + "LSbSr00wggw10mtJ9Kt/+v/QXt/t/ev6V//pPtpevqmEEGGlr/eEl//X/0v0vb1fpX6Yf7aT" + "98baSX3ggQYaSSXpPhAv///9L2/2/9L8JSQCr/+vadJL/CCDDS6r7j//+P9L9L//S/CTNYa/" + "3S1dJq+vpoIIg0AQYaWv1yXDZ+OP/0/b/b/wl+ZDIgNP999+6S+00EFhh116vOCB///xf6Xt" + "/5Z/4Jh//pe3el4iEFhhpaql3g3//OAX/ft/t/8L9wb/bSferYpLwghBg0F9aT7f84D5wH//" + "Ob/S9v/H9shr1f/1arpeEEGGvX97f1///t+3///7kFU/7pWr6MJtV4QINpeqST7////7f6Xt" + "9/f2Df9//7r8IPX1xfd6/9kNGn/t+3+39fW2//ulaSTel9+36Xu//7//t/17e/v7b/tpe+k3" + "pfDBf1pf+scchld/7ftr7fr2u7//1ev14MMIJvdUpgGH96/b/+3//919d/71a9U3peGGEE7d" + "yOqSX79e//7ftpe3v/7f/avuqV6+GDINYEEEO2EnCW39/9//t//t91t1t/09aV6vpeGHhArY" + "qKLtL6fSyGd9//28hoftL2/X12yDd69bX/Sb0iGx/DDwQTYaYSW3rel/f/7f7/t7dbdf/f8b" + "1V9fhhiEE2IpL9N6/t//hv+K9vbXtdv/V6qNX0vyB5QQy7DDCCINsWtPq3pft//sPXf/tLet" + "vS26jd0r1/TBIGGEEm2l3pN6X7f/7f9extpbaW3a9r1Svpfrhgwgk20l9JhkNj4S12H/+3+/" + "u2lsNL+uwk19N6S/dBhhBbDCVN4JMMJYIL9h//t6XXuw0ttLbhhLYYS/Svpf8PBYYMIJO0KY" + "MFQhIUmwYVNNPTbQ03TTdhhBJsMJJtwwkmxVNOraaH9JB4TTFEFAZDGqCDEIIIg0AZBisMUQ" + "z1kPWQxXkNlbBhSC+mQlRDGmGKIZVYZQwiGVWwcQiDTW0/QJQZDKrX2sPCaBgvRTg2BIhA0u" + "GS4KP+/te4YLDEL2Fhr+n/xoREGCERIKgYiJBVDERxERxEODBCDBCIMEIMF04iI+oiNCIkDw" + "1bEREfrCB/WEH60gf0qMMH6VIIGU4GoKfSIEsGKCDV9UQNA9IeNA1JAHnhD4j///////////" + "//////////////////////////////+QPkEDMFW+yGQBPBA8NSAmQZ4IHhqQQ2oIEoDFkGuC" + "GlHkDwN4ILMyB4NM1ILMyB4NMyGrNLYeQPF4g14kFC4UgqQQLwFCpbe9pEGbiB4NfIu5As5N" + "Mg34hr9X+qu6Qd1t3Xb+0vUf//9G1/S+vIGYZj//tr67TXhf/S6/xH1//bX///9L/X///bX/" + "//9Lr///9Jtf/////8l/kNTiHwg2f/+k3LhpGgZclMhqeQaJ5Bp/INU9BkGiCBeMgnZDLgIM" + "IMhmwgyDXMg1QSmQ1KE3IF4JYQUHyGbBBdyBGhJBDXchrcQfCC4ZGggwE//xCDwgwQMIYIPJ" + "OCD0wUF1yCj00wVMEDBUGEMFCgg8gY8h+8hjRSEQE1//9JsJ6YUKEcMMIYRsjqBFMhsOC6BY" + "KmmQ0HTRsgwUINSDB1RgcI6BiCgz4OCBnwSDBBtAgz4OCmARf/thGxvTCOFCODoINhJJrRg3" + "+gvRt0YN6MGwgg3phGxVqkGgvvvbh6dqkv/S6D6MDaCDoIHS9J9BByBjCDfNLIGJhtJyBfEE" + "HSbIKMRgVoIHIKMVJ1IaMIJnTrTaTpaX/7e8Qm0mknSbIN8VnAMCn/S6YpuKem4hB0uJdpcg" + "oz3+9tb//9Lq6DpaTr9XV/hBdV1avTaXQff+61S66pL/9t3r/6b1en/S6aenW/Xof/dW/bSd" + "dL8gpD+lj7aTrr//+l//T02vVj/1ownTaV0KSW2QzMv6b/xr///0F//39ff9r1r060luEDXx" + "ELuq///+l/+vv/B/vTa3TFeqWw0DS8hkBoI/+Gv1/xf/+/r7JAZn7+n2m6Sr0bMMJeQyAXmb" + "P/j7X/v////Z1Av90v19UmHhBJBj8I8P/8iAMXr/nE////9h/3tpN03dJN/QYXoL/+cBs2l/" + "7f///+G/3S/W3XfSSBr0vr/2GEv/b///r8H//W6+kr9ofS//9iF/7f///+Q16f39Poum3pfj" + "6X93+GC/9v/+61vwf90m10lb1S//9L+9mA1v+3///X7/39N6T3SX//07r6X/t//v+/kMt3/d" + "LX0rdVId//11u9vS/9v/+0tK//19jikm+q//16bbX1/9v/9/rX69YqnVtvS//tdL0XWyDj6/" + "+3//aVpNr39our/XFJf/6L+GgmGQo7aW2vf/t//t1DSsIwvpWW8NL6pJt0l/9sJcMJMMKwwl" + "sMLyXAv/2H/+2lDCCYaX2lFMVbTurdKl/7EKDiExTFScNAogRrDIMazQMHUGJAjVsg+pDGpt" + "JOCHUQ0DQGEopiFkCKoYSdqThlfBKD0P/60Y07WGFt/+wuv9iFCDXxCaa3pqnf/8MIWgYQME" + "DCEMEIcRHFghEREQwU5BBhYhhNBhDT4jQ4/iIiIhGw7xHEcRERH/0g/9f4Sf//yB+Bf+l/6X" + "/9f/+ra+PVfXWCf/q2uC6r9NoLpuq9RHHS/IGeOltpV9rtpJehWwwSIHg08EDCDrDEKECDIM" + "tVYYIfaxER/////////////////////////////+ACACAA4AAAEDAAEAAAATBwAAAQEDAAEA" + "AABKAgAAAgEDAAEAAAABAAAAAwEDAAEAAAAEAAAABgEDAAEAAAABAAAAEQEEAAEAAAAIAAAA" + "EgEDAAEAAAABAAAAFQEDAAEAAAABAAAAFgEDAAEAAABKAgAAFwEEAAEAAACZDQAAGgEFAAEA" + "AABQDgAAGwEFAAEAAABYDgAAHAEDAAEAAAABAAAAKAEDAAEAAAACAAAAAAAAAAAAwBIAAAQA" + "AADAEgAABAA="; + +static const char fontdata_16[] = + "SUkqAHAPAAAmoCQP/////////////////////////////////IZJx0QyQzjkM45DJA3vIHhr" + "2RbyB9BA8Gy00/IHg8XZDMsiXkGzqIK/Akk2Q2nSINUyG25DVoQ1aEGSCGUoINjkFEyGPIZU" + "yGrPBVXqwQahNUm4PCBhQQYQMFwQcYIGED131IZoaNsOk6SbVII4bBQgwmlhAtHDDCOEH79Y" + "QNINqnrZBoHrQQbCpp+EFSCDYQQb/1wjkXbSekbfSbT9JsFTR82uEFpOk3/+gsOtqk6STadJ" + "LYR9Z4bhBv0FSTdX9fpYf6SeltP6cILhBtBOswCkpsNFdX666S+m1/p7pJbgtJ6bRddBVVNp" + "X++v69LpK2G164XT1/pa/v79a/69dWGKJ2krY+3ul6XS6V/69f9a0uGP/rX/Wkv//9f9fSps" + "Ol/vWl6Wv7/X//1pa6kGu9f/0vS69f+v8fW6S8Izf6/xr/1uu99yGga/qtaSbBH1HS28fS9I" + "atf8R/dVdJLwlf/6S+q9f/fdVpD9PpL9VXkvqmGl//uqxCW2r//18EDVbSv8gerIl3tpVW7C" + "vS/VKvQekw0tevb7SVrx//pBJcXRH9MNBf/yhQxCrIUZXf/0kvahA1Ypv/qIMMJQmv+l+pBp" + "cIOueG8J0w9f1ZLgyJNVuC/9JCDXhB9NWG2v1sNQjnIWvx0v6uug3EwTSu19cMNIh/SsGcF/" + "/6UuNpdaBB8I5hsMI2lv4N4QaTeP6X6iG1xbptJBBtiF/5DU1SCON07//9But61SDtfkFgal" + "29INrf5BZEyDInS/S1/bpINtJf4dJK1b/0v9JuvrVXBhf+303Tf//6Wu+9U2P/ukv3X6pdaT" + "oSGDZ9JXrogerIl79Orf5A8S6/0v/Wtev9Jb3S/FJ/S1/pXrH//2//v0t69fX/0v20v0tdKi" + "Gl36/X0Qyn/+20nr+tIhpj/v16XS/SX8f6X9L5BQ9dL0lr//7Vr7+k2l6V9euQPDx/pJdNv+" + "Q2o7rS62/VdUsJ//trbD/BSBPiWulf6T0k3SXfrpJdWw3rVPetIhiel3V/0gv+9LdWwgyKP/" + "qlfobaW2k6STapa9XpJXTDCH/XulrDSuwk3S6QS3pYelthJibabS10m0kGsMIJOwk2ltpBBd" + "LyjggkgxCaDX9PtpEMwGsUQ0xDEJsJJMNBBBbXgxFYYTCYT/tbFfC4TELDFEMueIiQa0JCGC" + "Bq6FIUgwhEGCEMIMIQwUFkMk3ERdaxEREf60vbVL/qkvbSX9+ku7SS8W0qHekttIdtLbS3ax" + "H//////////////////////////////////////8gMBZD1yBoDQ5A8GXQhkg31IGgFAZA8G0" + "MgeGsQQyQ2oIG45AkvyC2GvMgqoTIa6QhtBCGgbINQqQYFCDWoIbBBBBBDAghgQQwIIOgguI" + "INYZDTIIYIIGKgREA0EwDYRANBMBqgyGgoIYGEMVEHrIY0IYqyC+hAiZBvMhg5DL4gQLMzA8" + "PBAyGsn4MIHIqGZoED//9bwQcGCDgwgf/64J9pcLCYQOyG0kBGgeQboIQgg1AZBQYCMweQLz" + "IGJkMuZDLmQy5o+GWZgqOZgYZDNxHwoZBpORaI+FDINJyKdHhNENlCBjAZoBgEDNAzyGzNHA" + "zuv7CNBA1Z8I0CB2CIMHZ4GEzwLwgQO00001CadJtoIIGHBA2EEDDYIG0EDzYc+HtNNU1dEC" + "9EgdJmwUL5smEfBh24NhIO4N4fDoIP/6Xe+gg27aCDe2k01u+364eg3wkvQQbQQYfugnww9/" + "Sa/9L1vSb20m90n//p/tJ0v+nQTa7aTW3/pP/6Xfek3uk3tpfX7/dNq3Wl+2kG79L2/9L/9L" + "1vSb7Sb///tK1V6tJUvS0nRhd0vf/S//CXcwswnpPuk+6X///tpOlpfugm+/r2/9L/9LmFX3" + "6b+m/3/9unutJv68dbS/X28cV//S+69vW/W/X//XRhdv0tfIZAaQ5A8Ufp9/r/6//QXuv30/" + "q/4jj7/2raTdCl4WHpEH5tb6Ue/Uf/S+69vX+v/+6tf0nVLwsOlBM3dP9b/X/pe6+39/3//q" + "9PTdWkl4WDWkeb/vSX/X/0vuvb1fpX0r/br79XqvIHhYIZdhWHWEE6TaT//kF3yC7//pe6+/" + "XrX//rpatpWkvhNMIIMNUko/vS9v/r/0vuvv7670r/er3punVL7UEEGHXvpegq////S9/2//" + "/ph/3ut+k3SX9hAgw0tKvfCS//r/6X2l7er6SvpQ/9enVjtKvXCCDDSSS9bhggX///+l7/t/" + "6X4SZ1BW+3X/T6++EEQaBMMNL/p4h/668f6C+0vf/S/CTIgGz+ut01aSX00EFhg0tV+4P+OP" + "/0/f+/8JfmIYP96un23SS9poILDDS6rSeeCB///xf6Xt/5ZH8Ew/73XXvS8RCCwYaX6XeDf/" + "88GX+H7f7f+wX7hh/69XVsVXgghDDrX0vb/ngfPA//57f6+3/j+2Q2hH717+6+EEGGl0l77f" + "++//7ft17f/+2QV9f7W19PS8IEGHX6S3v9b7//t/r//9bkFNH709NqjabSXwg2v/T93///+3" + "7de33Xf2G/7/6S9L4Qer1SQvf1/7IN6v/b/X2//9sP+66V9N9fa79V+/X+9/+37de3v7+2//" + "Xvrevwwv6pX/+OOQzJ/+3+vt+l6W//e2ukk+l4YMIJ7fVGwz/vX7D/+37df9/f2/7pdXpN6X" + "hhhArfRdUqf36///b/X29//b/7/f768MMhqiYIJrbS0Et/f+//7ft17fpb1t/7paWqT6+GHh" + "BC22lpU/vpff/7f6+339r/6X33SV6RBRHhh4QVsUxCJ2t9XrkG77f/7eQUPt17e9b1tkC8V/" + "exv76VeDB4QThhMJa9W+v//9vX/77S7S2/73Sr0m9L8MMQgmxGlf70tdv/9v+K99v39vS3X9" + "ikr6/IHhYINEBhhBEFS7S70m9L9v/9v9/b3S20v/umKrV9fwmChhggSbaVP03hLrv/9v+vY7" + "S7S29L136b0v7UMMILYYSW9WGQLvpft//sPS3/bS20tu67S90r0l/oMMIJNtL8JMMJYS1ww/" + "/2//3YaW2lt2lthWqpX0v1w2gsMMElbwSYMElIOfW2Gt3fbarbXuGwgrYaCu7CVsGEv0r6C+" + "6QPC2DCSpoQgxoQkNWDCqq6txrppuwYSUMMElbgwknFe6tpof1h4TQYhEDGpBisIIMIIIg1C" + "hBgQGIRDQIIIIIYEZBuIDBhSC9TRDjCD1OxCIZohiEQzRDBxCINYwNNNUCTBkMsQvtUHhNBh" + "eiXBVClWGrwZCAX/7r/4YWGFuGFhhf1/44iIMEIiDOoZIaDUGQEQiIuIhwYISCmGIgwQhgvx" + "ER9IRHERIHgrwIiI11hGgGwCzroO+qCB+loP9JGCNQGwGXpECYGYPSCBkuBsBt9Q0qBr0ooS" + "GciHjQMJHQDx6IGobv8IRx///////////////////////////////////////yB49PIZIsED" + "wZIIHgxxA8rIHgqWQVrIEsM2yGnZDUvyGQoIM8yB4KnhSB/MgeDZMhtTCWw8geCTIamBIFIH" + "g2IUgzEEFeCGXAKC1t7rXrpp+v9WpA+4geCryMHIHvk0yBfiCp1b7ql6Q/+vf2vr///o4tel" + "015AzBmj/6tf9prwv/q/64j4X/0rS//r/vf9f//0rX///+m////9df///6b////1dL///+rg" + "iGpTIvkG2ZDS/IaX5DUpkpkNOCGXGQf8hmOR+QTyGnBKZDXoQ04I5kNqhJyGVBLiBc+QanIZ" + "4IZ4ISCOCOCDa5BUwgvxBeCJBFciuQz8Qxf/q4gg8EDBAwgeCB4IPCBgoLrkC/BBhBgoQMED" + "BQgYIGFBQoI1gokMzgWOMg9VkKGQwdY44//qwnphQhhHDDR1BQbJnnpkFCCGdGlkM6EGgyDc" + "hMI4QZBuhNDIEIVGx0ageQqAZoGAQMEoMJuCDBBhL/6unphHChGxwgg2FCCY9P9AvCp6aCDe" + "gwjg1qEg0F9pphbCB0mg1MBhf/Vo2K9GyYQQcIIHScKrWjZMgY8go/QWQUejhsI4bIGPRsmk" + "2QL+jZNAg5BR/ThSBHkMe9PbtpPX/+r0H0EG0g6TpXLx4MtPEJ3fS2IJiE7EJ0m3QINhIO6p" + "Pu/6039aX/pXvEJtLSDpNj8+GWn/S6aenVuKDpcS5pXIF+9tJu1dJ1pL/6em6etf9J6b/QXT" + "TdNpPTa9P//61aXWl/63/q6Wl/1/pdV19/XQ//dft039Vv9Nj03/7evv+l7vuk9Nr9j+6ujC" + "aTpaQqvZBp4gQ/q/6Qpa///hL///S6v/q/7SvSrcINP6t9////9Bf/97/3+2vTdN06SSWw0D" + "S8RIHgrU9f///0v/1//ZQDX/3XVj9IILDDQa+QyAatP15OJ+v+L//39fmoMz91dfTdbegpsg" + "0vIZAZlDd/8ff/3////sH+66tpPqkw8JJBivCPj//KgDF0v+eT////2/+nr3fSb0kga8IF//" + "PBt7S/9v//+vwb/bW1bq7SSfqg16X1/7df+3////hv/r9b9K/wwvS//9hhL/2////8g2Eft+" + "l0rdfq0P/93+GIX/t//39X5BUn/pN7ejG9Uvx9L0v/Bgv/b//61/D/39apNvSX//7v3y4bf/" + "b//f6/IZkP+6Wr0t2qX//1d74S/9v//X2//19+1Sf0iGH//XX1vS/9v/+60v//bWK6Stuv//" + "+m219f/b//f1tfrtetjpvVJf/sLpdq3r/7f/9pXTa/+sbWk2xSX/9owtoJttGFhkHfT/9v/9" + "urQTa9pWSHbRftbS+lX/thBcNJhkOOwwlsMJcqwyv+w//20rCUMI2v9pwwl9aTbS//xXDCCb" + "CsQrY1hra6sGtrrbaUNBMQtbSYpit/VvSBf/ak4ZzCFMbUkBsRDPU2QYrlAOawzyBFbkHrIP" + "WauUA5rCFAbGlEINZAhPDCCpqUBmp2gSg9D/9hdNNBrDC2//YXX+xCnkmF8U01vXTtf/hoaB" + "hAwQaEMEIOIjiyOGQCwCwwgYWIMEGgwh/EccfoREREI2CPEcRxEREa/wgg///hJ/6/0n/X/I" + "HiiX///pdfT+n/+tpePX9fhfX1bSyB49NKvptL7/1IHg1wEYA1CxdKvSEdtJLyBmDU/2l/YS" + "2wkl8eGGEEQPDXcJBBhBpYYhMQgQMgUVwsGaAeCsF7WIiI//////////////////////////" + "////////wAQAQA4AAAEDAAEAAADOBwAAAQEDAAEAAAB3AgAAAgEDAAEAAAABAAAAAwEDAAEA" + "AAAEAAAABgEDAAEAAAABAAAAEQEEAAEAAAAIAAAAEgEDAAEAAAABAAAAFQEDAAEAAAABAAAA" + "FgEDAAEAAAB3AgAAFwEEAAEAAABoDwAAGgEFAAEAAAAeEAAAGwEFAAEAAAAmEAAAHAEDAAEA" + "AAABAAAAKAEDAAEAAAACAAAAAAAAAAAAwBIAAAQAAADAEgAABAA="; + +static const char fontdata_18[] = + "SUkqAEARAAAmoCq/////////////////////////////////+QyXe5DJDVchncgthMyB4NFk" + "TMgeJBA8FKE06yB9ad5DbxIgScCpNkFYdSGnQgrOQbKENqhA3ghmWQz2QVRyBxZDMoQbJ4XU" + "g0YQl4IHhBhUm4OggwoIGCBhYwQZBuJggYIHhf1CJwazjaSdJNpqEGFQaYWgSwmg9d6yGanQ" + "Qb10m+gjxMKEGEGlhAtHhhhHyf/4QaVpIw3rZBpelQQbCpp+EFSCDaCDe/XSOMwbSfSDpJN3" + "1TYKqMyraC0nQTfT/pYaW0gv06dKk4Iz8+K4Qb9BUk2k/+ugt9+npbTXVtBcINwnWYAnTNg3" + "77f1+v1aS+k3dVXC6em0YXSqkrSv99UvX/S8N6q3C6dJ/0tLff/r9/S6pJsN0RB6rH2/S9JL" + "XX/r0v//WwxRfqt6XvXpaX1fv9f+lqlThjrf+tfS//////6pbdf910vrS9X+tdf9LSWsKQ1L" + "pfr/9fuv/f/H+1rcIzH+v8aS9LS17/yGwU96S0klsEf2OvePpfj3X+I/2v0l4Svr/S/SStf3" + "/9JwkwqevXf9L+m6/rvdLihW6vpfpL8jmktpf5A8WyEu+6She16/rSXggekwwgr/XvbXr2E3" + "1/SSXgmpHPYaWv+3tpJNEGt/H1/0viHhBpMU3/SyGoYhaZCg/v/0gSS7oINWtf9ifDVCSC6T" + "/X+Qa9Pnx1Tph3/0yEAkpr3Ba/pQaXBBvTUMNtf9hpI4mD+PX9JpdBvRwnV2F/2HhBOQxhFj" + "9f1mBh/TxBB0gjiDYYRxL1wbSIP6Tdj/X9QgbSxdJtQgg2xX/kNVPhA6t/1/SF/unpINwvel" + "DMbSCOG1b/IM4vX6TaW9OqQOGvogzCmD6TaT/5BSEL+k6X7aSTbSX+G0km+/9f+r/r0nDBf+" + "/TpN/9V1paWRjv6VNj/+kt7deQPBe9f0nQj99XS8geWEJe/6t/61fpf+lv8Kv0qb7S/FX9LX" + "9a6x1/X2/+v0v/pXX/0v20v110t69fr6IZcf/vv9daVENXt+vX/9L7bS6/9SGo//S9Ja/1/s" + "Va6t+ldZAu6V6X1/+l9tf11069//pZA9RX6/T/5BWhddK63p0krSSwnr9JOtsP8hteQLMpv0" + "tLtL1fS7/177Yb1qmvWk3X7aXaSWv/SXTcHr0nTa6IGBDXbSvSeku+r0ttJhhFIBO/XXS1tL" + "tJ0u1CXroPSSdWwj3f/ptJNbQSbaTaSTaQQXXw8JbDCCiE1117DSIZsBBlusMJENSAg4YQTD" + "CSsMIIILetkNQgkmKDQYVr00rFRGrEKJrDOKYqmKCC2FqDEVgwmEGnodqmvhbCYShhAgsREh" + "phpCDBBr0hUMIRBhCGCDCEMKCyGScCItpdCIiI/6S+9VX2uvvSX9qlXvWuraSVYtpUO9Jdqw" + "ttIdtLbS2mFiP///////////////////////////////////+QEwate8gaApwQPArqIZINtZ" + "A1DU1IHgpaEDwZCCC3wQPA4jyC2DJMgpIyG0BkFdQQUCZDUKSBAuQ1CZAuDIYBkFwZDAMguD" + "IYDIMAyGpqQ19CGNCGYnINYMg1DRBUBkGsbBEYDchgQQwwQYLlOGCIBc1A5GAxIEVENnkMqZ" + "BihBp4hoK5OB4KJBbBU1IbYxODBEaDORcGpYIH//63hA4MIHBhBxX/+QXDUJrwsIMIPDYRoC" + "MMGHDhGgOGGCDBBggYQYLDBBuEHDCCMw4YYYRmHYYaPBA3DBBhMEGEwSYYV9112EGpBXoQiA" + "gQNEDFCD6EGs5B6EDByCUCBA5AxQhmUIZtCGbQhmUR8Gg0BSonDMZBrwIEDZBqOQiAgQMMg1" + "nIQgIINZBvQg0YDNAzQgYLIKMdHAb0p8C69MIoCB3QdBB6IaEMIhsthEM9oIGmmmmmqaurpB" + "NsIG0E2wgbSDzAOZg9qnp9yGUdIN1BV84NozBh7hsJB3BvfdBP/6W6tukGHhtIMPDaT/7v6a" + "Qbw2lST8INhAgbT3QT7f+En/9LvvSb2wk3vXX/dfuk/pekHSb+0vDf+k//pevpP9P6T9P/uv" + "aVpf/aCDae6Xt/6X/4QX96Te2k3t//7pd09OlpfTpN/r7f/X/6XazCek36Tfpf/3+rat1per" + "03ML9Vv/r/+lzCza7et7re//+62vaTrpeOk636X3/pf/pfaXt9P9P///dPe9XX/pXT+vt44/" + "/6Xv/et+t+uOP/zCpNpNpCl5DIBocgeDj9Ot/Q/6Q/+l917+/7/j+6Wt+rpJeFh6RDH3T9Lt" + "+v/pe6+3pfpf/9/vt1el4WDrCDNzrev/1/6X3Xt/f99f7q2l1pOlXhYapBH2//qv//9L3X2/" + "fpX1v+6+1bStJfCw60E6t0/r+QIfIEOv/oL7r719f6//6fb30vIM9kM2wQQYaSQSf1vS9v//" + "/S919vr+vW/3S1/SbSS9BoMIIMNLqK6fSr/+v/S+/3++u+r/vvum0ukvVMIEGGuvreEF////" + "pe6Xt++kr6TD/enp16bX+8IIMOtL6fBBL+uq/+l9/t6+v6UH/a6+x2kl/hBBg0kl9eIL////" + "S/S+/9L8JMpwZf/03tPSX1hBEGsXDDS6r7lQGX/XXj/S9v9v/S/BBSoDb+6WvVWqXu0EFgw0" + "Fr6Twf8cf/p/pe/+EvzaMH/e36tulX1ggsMNLr+58IH///F+3+3/k9P4IMP966b9ul9oMIIL" + "DDS+kqfIPT/+fBo/f6Xt/8L9oN/669NxpeIhBCGGlqut7f8+GM+GP/z6fv/f+wX7YN/39Wqb" + "SXhAgw6/6fb/w+H//b+69v/H9sgyDP3SbV/0vCCDBpdaS+39f//7fuvt//7kDjT+//Ta/CCD" + "aXpfe////+w/uvfuu67B/3rq6Rvel8IP/0k/d6773/4b919v//bDf+urql6Xwgem/WL3f/9y" + "BhP/7f3X3v7+2/7f1+3pfDC/qkv/X/3/7fuvt+v+7/9Nq3SX18ML71V/f445Bqp/7f3Xt/Xp" + "dv/a/6Tevhhggm71WbBv/1+w//t+6+/f37f+9daq3peGGEFvouqSX79e//7f3+3v7+2/+urf" + "XpeGGEE3bfpd/f///t/pff19bf+3TddJN6XgwZDXFwghd0nCST++l9//t+3+32va/+l+uq3/" + "hh4QVthKIRPO+r/3//byBiPS9vet62yGU9f2vt6T6RDKjww8IJtimkkn1b0sgXjv/9v9v9vt" + "Lutv+9666pvS/DDwQLDCYKv/0v2//2/4r32/ddv911Y2Nb0vwwxCCbEV76b0utv/9v9/vdLt" + "L7S7pivSfr8gz2Qa4DDCCIMsNpJPq3r///t6XXt91t1t/69aV6/pkMzwwYQSDYaXfTelrt//" + "t/v7G2ltpbeltpe1vpV9qDDCCVsJK+kwyGaPpft//t/17tpbaW312rVaTekvrQYYIJNhpVeE" + "mwuEF+w//2/3920ttL7S20t9K9L+8MNBbDCS+kwYSUJa7f/7D16/bS20tu0uDBfaV9Jf0g8F" + "sGEltQSYYLZB0KFTDDVNNPTbUFTdNN2GEk2GEk24YSTYpqtK01C/WHhYYhINNCEGKCEJDTDB" + "gqaaem2hp6abhhhBJsGEEmw0GEk2v1YacfvQeE0GEQzKyGcQEEGEECIahQQIBhiCINwZDAMg" + "uDyCgGwYUgwnaIOIIYTuxCINQMMUQagcOIRDUBhhbuCTBkMwGvsLDwg0GF8hAZQUgQNnDJMM" + "r9b1/uGFhgtwwthNVVf+KQiIMEIiDBCIgynDIBRERGhFwYISBPqIkNGogwX4iI/QiOIiQPBZ" + "cRER9cIzA2gb+qCB/WEH60g/rQNdJJGxlOMjgbQaOpAmg2D1CBj+oaQg1egoogahpaXjQMKa" + "gPBjRA1Dbv4Q1sIcf////////////////////////////////////IFmpA0AkvUhkhlQQPAr" + "gIMgeFsgeBxBAkggTA0rIa9kNe/IZAJBA8vIHgT+FIHjmQPBS8gyeEth5A9HIa2BIgfkEDwU" + "iFIHxBA8FUghtkJbeHtL/IElkMu/S291UgeL5A8Cf0YOQPHNNMhleQZP/+6d0g4dNN3rfpV9" + "If/Xv00vX/yBoGoP/84n+l7+v/q6/7VeC//V167CDXhf/V1/xH//6tf/3/9J69f//9tf///0" + "v////2////9LX////br///9LQIg2UyE8gqTIa08hqzyDZTIsyGu5BofyDU5F8hiZDVgizIbZ" + "yGs5EmQVqEVyGa5F4hlnyGlBDZBA8G1yDJhA8NQC/9W2CBB4IGCBhA8EDwQPCBgpDK7ILzwU" + "IGEGChAwQMFCBhA1BQoI1hlQUFIaE5F0IOnUgmpBc5BjyDFZE0//rEIPCYQYQwnNQGHpkMs5" + "BufXIFzhNMhlnTCDIGDhMIZDYdQuSsgwcg5/77///VtPQYUI2OEeGwoR4iIDRTChdAsKmmFT" + "CPjYVBhHBYVUcFhDBQUgXoQI4hsopBjg2jYFzQCDwNzYLr/6sI4L0wjwsIIHQQbBUEGsJ/oL" + "008JoIMPTQQa1QQcF9qmug6TtV/+k9BvRwdBBoJB0m9J9HBshl+QMfoLIZfo4VHCZDL9HBtB" + "NkC/o4NhBByBj1UOkQ2eQY+6bvbV/S//bp9Ag2gg6TpWQL8k1EJ3fS2IQYhOxCdJ3QIG0ndG" + "3rmI8DTd/Wm60qS/9Lp4p10nSfVngzQg/6XCYTwg6txTpcQnVx/6e90m66//b7ptL3W+rq/0" + "F003V03Ta9Nf+2utWlrpf+l709Ol/77/hL/3Xp66f/+r1bS61//bHq1/SfXr/S6p69utfHH9" + "906em8UktshpYQ2P6X7+v/+/6Xv7pe2vV/2lzH7S1S/CBw/2/0hr7//9Bf/7/X3/9+k2r1SW" + "2g0vS//1///CX//e/8H+2lpXvVqkltoNLxEgeBORvr/9f+v/6//shhp/991iulBAsGDRww18" + "hkArV+vx9//F//7+vynBo+3WldNN6QMPBAkgwYS8ETv/9el/3////sP/W999JJh6SQYrwjMX" + "/8pAy7f/zyf///+w//dLSt+m9Kg16Bf/z4K+0v/b////w3+3X3vapfpA16X1/7aX/t///1+D" + "f+tq9JvSvpYYXpf/+wwgv/b////wb/bp6tbvSv2h9L+7/Yr/2//7/vyCuR/r/oxvSS/H/1/8" + "ML/2//+tL8H/erSel36/9L6/vBgv/b//f/8P/a/apN2kl//+22r6MArf9v//+n8g1O//TpPS" + "Tb6//+l/4S/9v/+0tb//Vtb7VK70iC8f/r1drel/7f//9f/69R/vpJf//q2+3pf+3/+3Wtr9" + "er7T0k231//YXS7X1/9v//StJtf+1j40rikq//RhbQTDbRhWyBA+n/7f/7faCte0rtG1qnSb" + "df/20uGk3YS4YXv/2//20oaTYRtf7Juwwgt3pXpJf/sILgwgmGQsMMILYYXlIGZ/2H/+2lDB" + "BMMJfDSjiuvSt0l/9irMPFMUxVsUnDTtPTYaaYTtO2GEopiFoM0WEmqTenVukP/6jmEnakgC" + "6IaE7DIMDJIDBThokNCdyC6cgunLGSGCEYHJANxChMLIaBEQqakMGn0CTB6/+wtIMINBrYLf" + "/2F/+wp5INegmmFtNPW//4NDQMIMEDQgwhBsRHEMjhkhAsGEDCxBhBoGEP4jQ4/iIiIhHATi" + "OI4iIiP/hBA3/X+Eg///pf9f6T/r/ZA8OLf//+l16T1///a6/S+k9aj//a5Arq0q9JtL//wY" + "YQLdtKvSEcdV8gaIddtKvTS20kvj2GEkvrYYIIgeCsOEggyB6sIOtiFFEFsGRPE6AZgsLDCY" + "XsFkDYDScREcf////////////////////////////////////4AIAIAADgAAAQMAAQAAAIEI" + "AAABAQMAAQAAAIsCAAACAQMAAQAAAAEAAAADAQMAAQAAAAQAAAAGAQMAAQAAAAEAAAARAQQA" + "AQAAAAgAAAASAQMAAQAAAAEAAAAVAQMAAQAAAAEAAAAWAQMAAQAAAIsCAAAXAQQAAQAAADcR" + "AAAaAQUAAQAAAO4RAAAbAQUAAQAAAPYRAAAcAQMAAQAAAAEAAAAoAQMAAQAAAAIAAAAAAAAA" + "AADAEgAABAAAAMASAAAEAA=="; + +static const char fontdata_20[] = + "SUkqABATAAAmoDgf////////////////////////////+QyQy7IGwGXPIZILLkNA/kDwVrIW" + "3IHgvBA8FqE00sgeC9pp5BWhIFSvIHhpOQPDToQK3ILYb01TTINOELmCJwypBY8FVsgy2kQ1" + "6BSCocEDBSDQBEFfCBcWINJwQeF/qDCDSCD0m4eCBhSDZWEGFwTwQMIPC1VKQa6keMPTpJu8" + "IMKEGmuECwg0fIP3dcIGgg2kE9JukkeGwqDQaWECwj42EEG//wiRhpN6ON0lZDSetBBsFTXw" + "gqQQNoJv/9HnJetpIJ1201SSbCpo0JroLQTdP/+EFh6b1ekm060mwRp5mNwQb8JUrat//1uu" + "kk+laeklhBaBA6QdZsCsKcAwqdK/qukv3/pXuvbgum4TaMLpUq3T7u9KltVaS61bfpcLp6/p" + "Kqp1vr1/1/qlcNpJK2wvfdL0tf3//X/S+qsGMjvrHpuuvS6XS//6//SVWw0c6X/q6+lr/f//" + "/pfXhj1b/9L0uv3+mvX/9JJLyGtiX9PX+uvr+/VePpf7YR9f/XGkvqquv/1X+u0klwUi3pL/" + "/66Wvf+Qbi/uqWklVhGaY/Xj6XpD3X+I/37pVuCT/36SX6Vpf1/0lpQl6vS/qv0lbr/720u0" + "h7hX+/6XkWfVpfv+6pRVbXpL+kvggaqw0r/IHgmELd7aWwldq//SSrwg6qGEtfpW9hoJKu2v" + "S/0kvCakWisNL/Xg8MV5DNp43/9IL4h4QaTEJv/4wwSSkEUf6X6gklrhB0+v+yXBmprW//0Q" + "06l5mK0k1DDv/5JgUIJrvBf+lB1wgbwnTDbX/Yejyh1sdL/tLhPo8TW9fqgw0kmyC/Eu//0k" + "cGH9NxBA2kEGwwjaX6hvIYmleP6X6UEG0tp0n0cbYYS+lyGqESCDSd//+hbXF4TaQSBuK/8h" + "tJ8I8w0m/9L9J673VINsL3+YbSQQbSv8gflZA4Cf6TaW9dJBsNJeQPyjD6Te3//+k/+2lTtf" + "+G0ltW/+kv0rS+vScGC/9+nr/1+utcijv6STY1/6S3t1/SX6vEfuu6/7/q3+QPDZi7fpa/0r" + "/RA8PhC56VN7a/il+lr+tdY/+vt1//pf+krr/6X9/6WulvX/1//+2k9f1pUQ1/30vS6ohmv6" + "X22tr/6RBsH//9dV+v9uv0m/SC110vVf/pfsV/9aTrkDELel6X//9tfpfpXX06/pZA8Hj/SS" + "6d+sgy3uul+39JWklhB//utsN/BSB4b4lddNpeler6X1rpJX3DtcKn/pdbaTpVpJaa1f/TcP" + "+k0m0tItXaX16t0l+vpJOkw0GQg71r7+h2l2k2lVqEv+Hpba2EeRrn/S9patpJthJulbSQX/" + "D0km0mdWmKtfdJtJNYYSuGEmwkraQS/4eEttBMQg09de2kQaUiygwiGuTk5ptKmGEEEFtexE" + "JJimgwv+kmDBLWK2ITBgkrBgkCC2qwYWDBBhBp0hSDWGIXwuExCoMQgQLERIauiQhggwr8Ug" + "whEGCEMEGEIYUFkMkMyMgeC/EMu+qwZwMgMo4B4axwDg8B4axwG0LxEW1SxEREfetL9Uv26p" + "L9JfVvpL20kv+qSxbSUd1S26YW2kO2ltpb1tNYj/////////////////////////////////" + "5AWBqr/IGoNMEMgk5DJBaGEDYGKyB4FlZA8CjQgtgYghkJ/IGYMUIHCLIKgkCKsC5QCHQNcl" + "oaCWBqlIGYVAMFWGCoBcqwXKgCDoDBUBrmoFQ1Bg6g1EQGmVYaREwZCrDSIoCqDIGKEDGhDC" + "ghnGEMAyBCchgvZDRWQUYQzaENEyGlMhsBfBAyCuMWRYFYjYaBKcNPQIiYF//9reEHIuG0HB" + "hA4r//IMEZBcOuaAeGnCDBA+wg4Pwgf//94QcMIOGEH/+uE1tetMIOyDIKBFAPIKCwhpi5DK" + "FQigGEQzCCGaQQzCCGaQQzCEaBmkgFNokBlshpzCJwcMhpaEQgInBwyGloRA0fD8g3IINPkD" + "CCDTMgoRkDE7R8NDqvTCJAQNQzMEYQIhsoGZgQ2aBns0BiEaAXtNNNNIJp6baQIG2aAu0CBt" + "mgLsIIPtNO01YaIGEUQMISnwy9do0Ah3g6CD5BQG5BRbkG9oIO00000gmm0naCCDDcNhBBhu" + "G6DzYYNAxap91oNpN1BV84Kwggw120EHwb38JB//S/vSb20m90nrf+vVtJtKkvQQbQQbvcJN" + "Yb/0E//pdrekG9tIN7aT7/v970/pfToIN+2gvb/pL/9L+9Jv0m/S//tLWm1bS/90m19L2/9J" + "//S770n3Sb2+v/fb/SetL0m0E97r7f+l/+l6za9X3X6//3Xat02lpf6TaNr9e3/1/+lzac2u" + "3pN7pN71//rrTaT1pfT0336Xv/r/+l+v3q/q///71362vXjq6T+vt44pf/pe3Xt9X9X6v/9q" + "2u3Tpa/pX39ff//6C/X29X9X/Ecff5tV0m6FLyGQCm5A8ND9Pv0kO/pD/6XvXv9/3//1902l" + "apeFg9SGKE/tL////0vtfb0n9J//7XSfb3pLwsOqCe+3qvfr/6XvXt/fXfX+9P/pWlXhYapB" + "GZv0vqt/r/0vtfv//1v+1vbSbSdV8Fh1oJtb76X///S969vV9Vfr/f0v6bpLyB4eyDVYSDDS" + "0unSfS/yGeMhnhf/S+19+vqvW/+v9tdJL0GEwggw9JJ//S+///9L3r2/v++r/tdJtXSbWvWw" + "gQYaWsfTelW//X/pfa+3/pfpMP96b+rrpL7TCCDBpaXr4QS////oL+vvV/V9KH/f/sU2kv+E" + "EGGlpV7eCC//1/9L2/2/9L9JmoFn7paTf6pf4QQYaSX6XEwGn//4/0v0vf/S/CCkICn//0mm" + "6S/wQRDTKwYaWl/cgQZn8Lhf/X2/2/8JfkKdlICt+1dJvabSr6aCCww6+tJ4P+OP/0/0vb/y" + "xwQX5tWD/er/v196CCww0tf+Zh3///F+3/f+1/BMP+//bbSS9hBoILDDS6VJJvBh//zMGv9/" + "pe3/wX7Qb/dLSbS40vEQgWGDS//w3///37f7/8F+2Df/v/bVeEEIYaWte3hv6mYEZmBH/5nv" + "9fb/x/bIKYT9tbX09Lwggw0v0kvf/ff/9v3r2//9yCwn/66bSV18EEDDrpf+////9v7X77ru" + "tsgtB///zadJfCDaX9JN73r/3/7fvXt//9h/20tL0ndL4Qer0lj7fr/yGZZBI/9v7X3//22/" + "/fb1vr4Qff6T3//3i//b969vf39sP+2trpJN6XwwX/X/////2/tfb9L0t//rp+vX4YX3SSv7" + "6xxshpI/9h+9e339/b//96t6XhhhBPeqSNgY++v4f/2/v+//9v/bSdLpJvrwwYQVu3LrX/fX" + "u//t+6XvvW9bb//e2qXpeGGEE3elqrf+v3/+39/t9r37/7df+3peGGQ2ysIJp20sIKv2+v//" + "2/0vb+vS7/9dL0vXww8ECG2wk4SW+vX2//w37f7e37+3Xpe+2kk3peGHhBOGKiieU/t9ZDLj" + "//28hmR6X36XaW2Qy4+9tLS9W+iDRHhg8IKwwmEq3revW3/+3+3+3t+3W///G/Xpfgw8IJsW" + "Cqn6b0tdv/9vXivfdL/vrdW640m9fwwxCCbFL9X0v7//b639vuttLb17qOqW9L8geHshp2GG" + "CCIG92q31D0v2//2/69vbS20tvr13dJ9L9MFDDCCCbaVPq3pft//t/v8baXpbeu3tV031+mE" + "gwwgVtpLek3hL9v/9v/920tuvtLtL6W9JfvDBhBK2Et9JhkM2NLXb//b12ve0thpbd1tha3S" + "fX/wwwgrYYSSvCTDCWCX7D//b//dhpbYS27CXaTtaV6S/pA6Cwwwl+CQYMElIEB6Ww17/bVd" + "rtw2Ethpd2lsMJf7ekF/w8LYMIJO0ITIwXEJDTDBqmqemw409Ndgwgkwwwgk24MJJs1DTW0k" + "2mh/0HhUGQyQIGCDoMUEECkNCgM1iyGxQQYVkMKMgXUMMJAgbQIiAzg3ZqJEGpQwYJEGpQw5" + "0JENRWxXWCwyDWo/aw8JoMQgQMhsBggQYSBAuGJIAxrrYYUgQIwUhgQ4YhYYhbDQhbX4SYev" + "sJIPBNBgvkmBwCkaBU4ZAgzf+/v7hgsMLcGFhhU71/40IiDCERILIgREgsCBEcREaEQ4MEJA" + "sTiIMEIME9UIiP5BbBq8hkhnmQWy/EgeDIOQUuIZAuEDwZuIHgz0ER9IRHEWZgSgVf0ED9LQ" + "f1QQP0tB/pI5g/SpBEQuSe+iByA3HqEDH6UMKINfUUQNgJXfCoGFNYHhlxhAwvogbAzB/hCO" + "P/////////////////////////////////+QPFrIGoF8IHYG3PIZIbdkMg4CZA8CIIZAb2QJ" + "7IE0NbMg255DIAw5A8PMhkLwpA8H8geBZMgpzCCww/CkDy0IHgVhqQPFDIHgcaEFXRLbyB4L" + "0INvL/kFiyDRnpbe6aXrhNP17+0iB4PMhkL0bKQPB/tMhlTIKc//XfSD67vW/2l6Q/+vfqvr" + "///zyf6W7XkDUo/9LS/6/C//Ta+uGgwvBf+lev8R//+rX/+v/V/1///q1////V////9K0v//" + "/9v////S/////bX///9LnA2ycNKQkyDLMg2/kG2eQ26ZEoQ2oINT+Q0nIkyDEyDa5EoQVzkN" + "qCEmQZTkRyDTBF4hmoyGq5Aw5A8FNyBxf/03BAg8EDIbWoIHggeQ19cIGFBcgwfBQgYQMFCB" + "ggYKCBhA1ChMEU4ZkFBSGwQRKiC4yyD1EGEENHkFxhEqIaBv/1cQg8JhBhDCDwg9Mg1UIGKa" + "4KmmQaKJhAwVBhDIGKKE8g0UIEU9V7X//6sJ6DChQjw2ERAFzxEGGygyBjQho00CyCgMJpkD" + "AaDCPjZAvog1IaNFR4WiMB5BNCBhBDP5BQjIEUtHAUEgGAX/6ujYr0wjwsI8LCCDYSQQfX9B" + "emE9UEG9MI8L1QQcL7TTSYaIGDpMINTYEL/6sJA+jg2gg4QQOk2FSawjxP9BejxtHieEeJpB" + "h6ODaBA16QcF96fp96SS/9XpvhB0EHQQdJvSfQINkM2ZDKnoLIZvoEHQTZDLmgQbSchlzQIN" + "hIOQy5qr5DRMhnn/e2ldVX/1e+kG0nSdLIZXqzMGgU7vpcUxCdinSbdIOk7o2eshl+7uk602" + "k/pf+r08QnWldW9XhP+lwmnhOtxCbS4hPv/ff9aWv/pX9Wk6T19XTf6XTTdN03TpdOv/06aT" + "aT+l/+33ul1rf//oL/9fbX6/9tf20rS6/9LF61169Xr/S6rr2666HH/rTp0nxSS2yGpxAu/7" + "ft/6v/v+l7vul7a/f77c2q33SX4Qafpf9Cl///4QX/+//V/2vXulqqW6DX6b/X///9L///S+" + "H/1q2k2k9JJbaBpeIgu///X/pf/97/sH+2rdfFWtILDDQa+QyQ31/XX//9P/9f/5qDU/1rdN" + "PpJbaPEQzXkMgCsR6/H3/8f/+/r7IgGn7/Tq1dJBh4QVBivBFB///S/7////2H/aVr16qw8J" + "JBrwjQT/8gYZt6/59P////Yf7703tvST6SQNegX/8zBT2l/7f//9fhv+6/Sbtav6DXhBfX/t" + "pf+3////hv/XSvTekn6UGF6X//tpf+3////g3+9N7/6V6tD//d/sMJf+3///35Bk0/tddJNt" + "V/H0v//Yhf+3//daX5AkR+/03o3vpL//0v28ML/2////8H/tK10km7Wv/S/d/wzCBf+3/+/1" + "+/771elvSX//1d/pD/7f//WrfkGuP+0tX1b9IgRH//r63hL/2//7Xr/f1+/SpttJf/9em730" + "v/b//f1v/7V6er0r9L//XSttb1/9v//rptfr+1jY0ntUq/+197X1/9v/+0rS1/9XqqVtiqX/" + "9bQVtowrZDYPr/7D//b7SYa9pW2jCxu6V6//thG/aTdhLbC9/+3/+6VoK1/yKPYS3rSbaS//" + "hhLhhJhkMOwwlw15AgaP+3//aVpMMEc/aVhOGEuulfS//iFoMIJhhWKthhJYd2urBra922lB" + "ggmK1tJimK/eraSBf/asqIpgwVqGwYJBA2GCDhhA8IG2EDBAwQODBBw2GlFMLCKcDDCVVBu1" + "hJh6H/7Cjm0ExQanQMoUQ2F7IEF50DBLhokNghyGC5DBcEDnQHIgEOgFwYIKE1kNgOUGE0wo" + "OtoKw9f/a6DQaDC2Cw//sF/+xCn0gwvimg1vXW0//hhDQMIMEDQgwhDYiOLLhkhlBYMEDCxD" + "BBoMEO+I44/iIkCThILGBHgTiQyQaOxHIHh+EREa/xFBEM58fH/sIJ/6/wk///pP+l/kDwIK" + "////X/3r+uvStf///8ev0nrC+v9pZA8WvSr0g2l//7aXtpfqJTgi4GwGeP16QjtpJeQNAanX" + "tL+0tsJJehWwwgklX2GEgkmg6wxCBIgZiCCB+DrDChfCyBtDOdkcDMMcMLEREf//////////" + "////////////////////4AIAIAAOAAABAwABAAAATAkAAAEBAwABAAAAcwIAAAIBAwABAAAA" + "AQAAAAMBAwABAAAABAAAAAYBAwABAAAAAQAAABEBBAABAAAACAAAABIBAwABAAAAAQAAABUB" + "AwABAAAAAQAAABYBAwABAAAAcwIAABcBBAABAAAABxMAABoBBQABAAAAvhMAABsBBQABAAAA" + "xhMAABwBAwABAAAAAQAAACgBAwABAAAAAgAAAAAAAAAAAMASAAAEAAAAwBIAAAQA"; + +#endif /* LEPTONICA_BMFDATA_H */ + + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/bmp.h b/third_party/leptonica/uos/sw_64/include/leptonica/bmp.h new file mode 100644 index 00000000..c0ce8fbe --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/bmp.h @@ -0,0 +1,95 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_BMP_H +#define LEPTONICA_BMP_H + +/*! + * \file bmp.h + * + *
+ * This file is here to describe the fields in the header of
+ * the BMP file.  These fields are not used directly in Leptonica.
+ * The only thing we use are the sizes of these two headers.
+ * Furthermore, because of potential namespace conflicts with
+ * the typedefs and defined sizes, we have changed the names
+ * to protect anyone who may also need to use the original definitions.
+ * Thanks to J. D. Bryan for pointing out the potential problems when
+ * developing on Win32 compatible systems.
+ * 
+ */ + +/*-------------------------------------------------------------* + * BMP file header * + *-------------------------------------------------------------*/ + +/*! BMP file header */ +struct BMP_FileHeader +{ + l_int16 bfType; /*!< file type; must be "BM" */ + l_int16 bfSize; /*!< length of the file; + sizeof(BMP_FileHeader) + + sizeof(BMP_InfoHeader) + + size of color table + + size of DIB bits */ + l_int16 bfFill1; /*!< remainder of the bfSize field */ + l_int16 bfReserved1; /*!< don't care (set to 0) */ + l_int16 bfReserved2; /*!< don't care (set to 0) */ + l_int16 bfOffBits; /*!< offset from beginning of file */ + l_int16 bfFill2; /*!< remainder of the bfOffBits field */ +}; +typedef struct BMP_FileHeader BMP_FH; + +/*! Number of bytes in a BMP file header */ +#define BMP_FHBYTES sizeof(BMP_FH) + + +/*-------------------------------------------------------------* + * BMP info header * + *-------------------------------------------------------------*/ + +/*! BMP info header */ +struct BMP_InfoHeader +{ + l_int32 biSize; /*!< size of the BMP_InfoHeader struct */ + l_int32 biWidth; /*!< bitmap width in pixels */ + l_int32 biHeight; /*!< bitmap height in pixels */ + l_int16 biPlanes; /*!< number of bitmap planes */ + l_int16 biBitCount; /*!< number of bits per pixel */ + l_int32 biCompression; /*!< compress format (0 == uncompressed) */ + l_int32 biSizeImage; /*!< size of image in bytes */ + l_int32 biXPelsPerMeter; /*!< pixels per meter in x direction */ + l_int32 biYPelsPerMeter; /*!< pixels per meter in y direction */ + l_int32 biClrUsed; /*!< number of colors used */ + l_int32 biClrImportant; /*!< number of important colors used */ +}; +typedef struct BMP_InfoHeader BMP_IH; + +/*! Number of bytes in a BMP info header */ +#define BMP_IHBYTES sizeof(BMP_IH) + + +#endif /* LEPTONICA_BMP_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/ccbord.h b/third_party/leptonica/uos/sw_64/include/leptonica/ccbord.h new file mode 100644 index 00000000..dcd37817 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/ccbord.h @@ -0,0 +1,119 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_CCBORD_H +#define LEPTONICA_CCBORD_H + +/*! + * \file ccbord.h + * + *
+ *           CCBord:   represents a single connected component
+ *           CCBorda:  an array of CCBord
+ * 
+ */ + + /*! Use in ccbaStepChainsToPixCoords() */ +enum { + CCB_LOCAL_COORDS = 1, + CCB_GLOBAL_COORDS = 2 +}; + + /*! Use in ccbaGenerateSPGlobalLocs() */ +enum { + CCB_SAVE_ALL_PTS = 1, + CCB_SAVE_TURNING_PTS = 2 +}; + + + /*! + *
+     * CCBord contains:
+     *
+     *    (1) a minimally-clipped bitmap of the component (pix),
+     *    (2) a boxa consisting of:
+     *          for the primary component:
+     *                (xul, yul) pixel location in global coords
+     *                (w, h) of the bitmap
+     *          for the hole components:
+     *                (x, y) in relative coordinates in primary component
+     *                (w, h) of the hole border (which is 2 pixels
+     *                       larger in each direction than the hole itself)
+     *    (3) a pta ('start') of the initial border pixel location for each
+     *        closed curve, all in relative coordinates of the primary
+     *        component.  This is given for the primary component,
+     *        followed by the hole components, if any.
+     *    (4) a refcount of the ccbord; used internally when a ccbord
+     *        is accessed from a ccborda (array of ccbord)
+     *    (5) a ptaa for the chain code for the border in relative
+     *        coordinates, where the first pta is the exterior border
+     *        and all other pta are for interior borders (holes)
+     *    (6) a ptaa for the global pixel loc rendition of the border,
+     *        where the first pta is the exterior border and all other
+     *        pta are for interior borders (holes).
+     *        This is derived from the local or step chain code.
+     *    (7) a numaa for the chain code for the border as orientation
+     *        directions between successive border pixels, where
+     *        the first numa is the exterior border and all other
+     *        numa are for interior borders (holes).  This is derived
+     *        from the local chain code.  The 8 directions are 0 - 7.
+     *    (8) a pta for a single chain for each c.c., comprised of outer
+     *        and hole borders, plus cut paths between them, all in
+     *        local coords.
+     *    (9) a pta for a single chain for each c.c., comprised of outer
+     *        and hole borders, plus cut paths between them, all in
+     *        global coords.
+     * 
+ */ +struct CCBord +{ + struct Pix *pix; /*!< component bitmap (min size) */ + struct Boxa *boxa; /*!< regions of each closed curve */ + struct Pta *start; /*!< initial border pixel locations */ + l_int32 refcount; /*!< number of handles; start at 1 */ + struct Ptaa *local; /*!< ptaa of chain pixels (local) */ + struct Ptaa *global; /*!< ptaa of chain pixels (global) */ + struct Numaa *step; /*!< numaa of chain code (step dir) */ + struct Pta *splocal; /*!< pta of single chain (local) */ + struct Pta *spglobal; /*!< pta of single chain (global) */ +}; +typedef struct CCBord CCBORD; + +/*! Array of CCBord */ +struct CCBorda +{ + struct Pix *pix; /*!< input pix (may be null) */ + l_int32 w; /*!< width of pix */ + l_int32 h; /*!< height of pix */ + l_int32 n; /*!< number of ccbord in ptr array */ + l_int32 nalloc; /*!< number of ccbord ptrs allocated */ + struct CCBord **ccb; /*!< ccb ptr array */ +}; +typedef struct CCBorda CCBORDA; + + +#endif /* LEPTONICA_CCBORD_H */ + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/dewarp.h b/third_party/leptonica/uos/sw_64/include/leptonica/dewarp.h new file mode 100644 index 00000000..37bfb632 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/dewarp.h @@ -0,0 +1,191 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_DEWARP_H +#define LEPTONICA_DEWARP_H + +/*! + * \file dewarp.h + * + *
+ *     Data structure to hold arrays and results for generating
+ *     horizontal and vertical disparity arrays based on textlines.
+ *     Each disparity array is two-dimensional.  The vertical disparity
+ *     array gives a vertical displacement, relative to the lowest point
+ *     in the textlines.  The horizontal disparty array gives a horizontal
+ *     displacement, relative to the minimum values (for even pages)
+ *     or maximum values (for odd pages) of the left and right ends of
+ *     full textlines.  Horizontal alignment always involves translations
+ *     away from the book gutter.
+ *
+ *     We have intentionally separated the process of building models
+ *     from the rendering process that uses the models.  For any page,
+ *     the building operation either creates an actual model (that is,
+ *     a model with at least the vertical disparity being computed, and
+ *     for which the 'success' flag is set) or fails to create a model.
+ *     However, at rendering time, a page can have one of two different
+ *     types of models.
+ *     (1) A valid model is an actual model that meets the rendering
+ *         constraints, which are limits on model curvature parameters.
+ *         See dewarpaTestForValidModel() for details.
+ *         Valid models are identified by dewarpaInsertRefModels(),
+ *         which sets the 'vvalid' and 'hvalid' fields.  Only valid
+ *         models are used for rendering.
+ *     (2) A reference model is used by a page that doesn't have
+ *         a valid model, but has a nearby valid model of the same
+ *         parity (even/odd page) that it can use.  The range in pages
+ *         to search for a valid model is given by the 'maxdist' field.
+ *
+ *     At the rendering stage, vertical and horizontal disparities are
+ *     treated differently.  It is somewhat more robust to generate
+ *     vertical disparity models (VDM) than horizontal disparity
+ *     models (HDM). A valid VDM is required for any correction to
+ *     be made; if a valid VDM is not available, just use the input
+ *     image.  Otherwise, assuming it is available, the use of the
+ *     HDM is controlled by two fields: 'useboth' and 'check_columns'.
+ *       (a) With useboth == 0, we use only the VDM.
+ *       (b) With useboth == 1, we require using the VDM and, if a valid
+ *           horizontal disparity model (HDM) is available, we also use it.
+ *       (c) With check_columns == 1, check for multiple columns and if
+ *           true, only use the VDM, even if a valid HDM is available.
+ *           Note that 'check_columns' takes precedence over 'useboth'
+ *           when there is more than 1 column of text.  By default,
+ *           check_columns == 0.
+ *
+ *     The 'maxdist' parameter is input when the dewarpa is created.
+ *     The other rendering parameters have default values given in dewarp1.c.
+ *     All parameters used by rendering can be set (or reset) using accessors.
+ *
+ *     After dewarping, use of the VDM will cause all points on each
+ *     altered curve to have a y-value equal to the minimum.  Use of
+ *     the HDA will cause the left and right edges of the textlines
+ *     to be vertically aligned if they had been typeset flush-left
+ *     and flush-right, respectively.
+ *
+ *     The sampled disparity arrays are expanded to full resolution,
+ *     using linear interpolation, and this is further expanded
+ *     by slope continuation to the right and below if the image
+ *     is larger than the full resolution disparity arrays.  Then
+ *     the disparity correction can be applied to the input image.
+ *     If the input pix are 2x reduced, the expansion from sampled
+ *     to full res uses the product of (sampling) * (redfactor).
+ *
+ *     The most accurate results are produced at full resolution, and
+ *     this is generally recommended.
+ * 
+ */ + + /*! Dewarp version for serialization + *
+     * Note on versioning of the serialization of this data structure:
+     * The dewarping utility and the stored data can be expected to change.
+     * In most situations, the serialized version is ephemeral -- it is
+     * not needed after being used.  No functions will be provided to
+     * convert between different versions.
+     * 
+ */ +#define DEWARP_VERSION_NUMBER 4 + +/*! Data structure to hold a number of Dewarp */ +struct L_Dewarpa +{ + l_int32 nalloc; /*!< size of dewarp ptr array */ + l_int32 maxpage; /*!< maximum page number in array */ + struct L_Dewarp **dewarp; /*!< array of ptrs to page dewarp */ + struct L_Dewarp **dewarpcache; /*!< array of ptrs to cached dewarps */ + struct Numa *namodels; /*!< list of page numbers for pages */ + /*!< with page models */ + struct Numa *napages; /*!< list of page numbers with either */ + /*!< page models or ref page models */ + l_int32 redfactor; /*!< reduction factor of input: 1 or 2 */ + l_int32 sampling; /*!< disparity arrays sampling factor */ + l_int32 minlines; /*!< min number of long lines required */ + l_int32 maxdist; /*!< max distance for getting ref page */ + l_int32 max_linecurv; /*!< maximum abs line curvature, */ + /*!< in micro-units */ + l_int32 min_diff_linecurv; /*!< minimum abs diff line */ + /*!< curvature in micro-units */ + l_int32 max_diff_linecurv; /*!< maximum abs diff line */ + /*!< curvature in micro-units */ + l_int32 max_edgeslope; /*!< maximum abs left or right edge */ + /*!< slope, in milli-units */ + l_int32 max_edgecurv; /*!< maximum abs left or right edge */ + /*!< curvature, in micro-units */ + l_int32 max_diff_edgecurv; /*!< maximum abs diff left-right */ + /*!< edge curvature, in micro-units */ + l_int32 useboth; /*!< use both disparity arrays if */ + /*!< available; only vertical otherwise */ + l_int32 check_columns; /*!< if there are multiple columns, */ + /*!< only use the vertical disparity */ + /*!< array */ + l_int32 modelsready; /*!< invalid models have been removed */ + /*!< and refs built against valid set */ +}; +typedef struct L_Dewarpa L_DEWARPA; + + +/*! Data structure for a single dewarp */ +struct L_Dewarp +{ + struct L_Dewarpa *dewa; /*!< ptr to parent (not owned) */ + struct Pix *pixs; /*!< source pix, 1 bpp */ + struct FPix *sampvdispar; /*!< sampled vert disparity array */ + struct FPix *samphdispar; /*!< sampled horiz disparity array */ + struct FPix *sampydispar; /*!< sampled slope h-disparity array */ + struct FPix *fullvdispar; /*!< full vert disparity array */ + struct FPix *fullhdispar; /*!< full horiz disparity array */ + struct FPix *fullydispar; /*!< full slope h-disparity array */ + struct Numa *namidys; /*!< sorted y val of midpoint each line */ + struct Numa *nacurves; /*!< sorted curvature of each line */ + l_int32 w; /*!< width of source image */ + l_int32 h; /*!< height of source image */ + l_int32 pageno; /*!< page number; important for reuse */ + l_int32 sampling; /*!< sampling factor of disparity arrays */ + l_int32 redfactor; /*!< reduction factor of pixs: 1 or 2 */ + l_int32 minlines; /*!< min number of long lines required */ + l_int32 nlines; /*!< number of long lines found */ + l_int32 mincurv; /*!< min line curvature in micro-units */ + l_int32 maxcurv; /*!< max line curvature in micro-units */ + l_int32 leftslope; /*!< left edge slope in milli-units */ + l_int32 rightslope; /*!< right edge slope in milli-units */ + l_int32 leftcurv; /*!< left edge curvature in micro-units */ + l_int32 rightcurv; /*!< right edge curvature in micro-units*/ + l_int32 nx; /*!< number of sampling pts in x-dir */ + l_int32 ny; /*!< number of sampling pts in y-dir */ + l_int32 hasref; /*!< 0 if normal; 1 if has a refpage */ + l_int32 refpage; /*!< page with disparity model to use */ + l_int32 vsuccess; /*!< sets to 1 if vert disparity builds */ + l_int32 hsuccess; /*!< sets to 1 if horiz disparity builds */ + l_int32 ysuccess; /*!< sets to 1 if slope disparity builds */ + l_int32 vvalid; /*!< sets to 1 if valid vert disparity */ + l_int32 hvalid; /*!< sets to 1 if valid horiz disparity */ + l_int32 skip_horiz; /*!< if 1, skip horiz disparity */ + /*!< correction */ + l_int32 debug; /*!< set to 1 if debug output requested */ +}; +typedef struct L_Dewarp L_DEWARP; + +#endif /* LEPTONICA_DEWARP_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/endianness.h b/third_party/leptonica/uos/sw_64/include/leptonica/endianness.h new file mode 100644 index 00000000..8cdc060d --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/endianness.h @@ -0,0 +1,11 @@ +#if !defined (L_BIG_ENDIAN) && !defined (L_LITTLE_ENDIAN) +# if defined (__APPLE_CC__) +# ifdef __BIG_ENDIAN__ +# define L_BIG_ENDIAN +# else +# define L_LITTLE_ENDIAN +# endif +# else +# define L_LITTLE_ENDIAN +# endif +#endif diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/environ.h b/third_party/leptonica/uos/sw_64/include/leptonica/environ.h new file mode 100644 index 00000000..ebb669af --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/environ.h @@ -0,0 +1,509 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_ENVIRON_H +#define LEPTONICA_ENVIRON_H + +/*------------------------------------------------------------------------* + * Defines and includes differ for Unix and Windows. Also for Windows, * + * differentiate between conditionals based on platform and compiler. * + * For platforms: * + * _WIN32 => Windows, 32- or 64-bit * + * _WIN64 => Windows, 64-bit only * + * __CYGWIN__ => Cygwin * + * For compilers: * + * __GNUC__ => gcc * + * _MSC_VER => msvc * + *------------------------------------------------------------------------*/ + +/* MS VC++ does not provide stdint.h, so define the missing types here */ + + +#ifndef _MSC_VER +#include + +#else +/* Note that _WIN32 is defined for both 32 and 64 bit applications, + whereas _WIN64 is defined only for the latter */ + +#ifdef _WIN64 +typedef __int64 intptr_t; +typedef unsigned __int64 uintptr_t; +#else +typedef int intptr_t; +typedef unsigned int uintptr_t; +#endif + +/* VC++6 doesn't seem to have powf, expf. */ +#if (_MSC_VER < 1400) +#define powf(x, y) (float)pow((double)(x), (double)(y)) +#define expf(x) (float)exp((double)(x)) +#endif + +#endif /* _MSC_VER */ + +/* Windows specifics */ +#ifdef _WIN32 + /* DLL EXPORTS and IMPORTS */ + #if defined(LIBLEPT_EXPORTS) + #define LEPT_DLL __declspec(dllexport) + #elif defined(LIBLEPT_IMPORTS) + #define LEPT_DLL __declspec(dllimport) + #else + #define LEPT_DLL + #endif +#else /* non-Windows specifics */ + #include + #define LEPT_DLL +#endif /* _WIN32 */ + +typedef intptr_t l_intptr_t; +typedef uintptr_t l_uintptr_t; + + +/*--------------------------------------------------------------------* + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!* + * USER CONFIGURABLE * + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!* + * Environment variables with I/O libraries * + * Manual Configuration Only: NOT AUTO_CONF * + *--------------------------------------------------------------------*/ +/* + * Leptonica provides interfaces to link to several external image + * I/O libraries, plus zlib. Setting any of these to 0 here causes + * non-functioning stubs to be linked. + */ +#if !defined(HAVE_CONFIG_H) && !defined(ANDROID_BUILD) && !defined(OS_IOS) +#define HAVE_LIBJPEG 1 +#define HAVE_LIBTIFF 1 +#define HAVE_LIBPNG 1 +#define HAVE_LIBZ 1 +#define HAVE_LIBGIF 0 +#define HAVE_LIBUNGIF 0 +#define HAVE_LIBWEBP 0 +#define HAVE_LIBJP2K 0 + + /* Leptonica supports both OpenJPEG 2.0 and 2.1. If you have a + * version of openjpeg (HAVE_LIBJP2K) that is not 2.1, set the + * path to the openjpeg.h header in angle brackets here. */ +#define LIBJP2K_HEADER +#endif /* ! HAVE_CONFIG_H etc. */ + +/* + * On linux systems, you can do I/O between Pix and memory. Specifically, + * you can compress (write compressed data to memory from a Pix) and + * uncompress (read from compressed data in memory to a Pix). + * For jpeg, png, jp2k, gif, pnm and bmp, these use the non-posix GNU + * functions fmemopen() and open_memstream(). These functions are not + * available on other systems. + * To use these functions in linux, you must define HAVE_FMEMOPEN to 1. + * To use them on MacOS, which does not support these functions, set it to 0. + */ +#if !defined(HAVE_CONFIG_H) && !defined(ANDROID_BUILD) && !defined(OS_IOS) && \ + !defined(_WIN32) +#define HAVE_FMEMOPEN 1 +#endif /* ! HAVE_CONFIG_H etc. */ + + +/*--------------------------------------------------------------------* + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!* + * USER CONFIGURABLE * + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!* + * Environ variables for image I/O without external libraries * + *--------------------------------------------------------------------*/ +/* + * Leptonica supplies I/O support without using external libraries for: + * * image read/write for bmp, pnm + * * header read for jp2k + * * image wrapping write for pdf and ps. + * Setting any of these to 0 causes non-functioning stubs to be linked. + */ +#define USE_BMPIO 1 +#define USE_PNMIO 1 +#define USE_JP2KHEADER 1 +#define USE_PDFIO 1 +#define USE_PSIO 1 + + +/*--------------------------------------------------------------------* + * It is desirable on Windows to have all temp files written to the same + * subdirectory of the Windows directory, because files under + * persist after reboot, and the regression tests write a lot of files. + * We write all test files to /tmp/lept or subdirectories of /tmp/lept. + * Windows temp files are specified as in unix, but have the translation + * /tmp/lept/xxx --> /lept/xxx + *--------------------------------------------------------------------*/ + + +/*--------------------------------------------------------------------* + * Built-in types * + *--------------------------------------------------------------------*/ +typedef signed char l_int8; /*!< signed 8-bit value */ +typedef unsigned char l_uint8; /*!< unsigned 8-bit value */ +typedef short l_int16; /*!< signed 16-bit value */ +typedef unsigned short l_uint16; /*!< unsigned 16-bit value */ +typedef int l_int32; /*!< signed 32-bit value */ +typedef unsigned int l_uint32; /*!< unsigned 32-bit value */ +typedef float l_float32; /*!< 32-bit floating point value */ +typedef double l_float64; /*!< 64-bit floating point value */ +#ifdef COMPILER_MSVC +typedef __int64 l_int64; /*!< signed 64-bit value */ +typedef unsigned __int64 l_uint64; /*!< unsigned 64-bit value */ +#else +typedef long long l_int64; /*!< signed 64-bit value */ +typedef unsigned long long l_uint64; /*!< unsigned 64-bit value */ +#endif /* COMPILER_MSVC */ + + +/*------------------------------------------------------------------------* + * Standard macros * + *------------------------------------------------------------------------*/ +#ifndef L_MIN +/*! Minimum of %x and %y */ +#define L_MIN(x,y) (((x) < (y)) ? (x) : (y)) +#endif + +#ifndef L_MAX +/*! Maximum of %x and %y */ +#define L_MAX(x,y) (((x) > (y)) ? (x) : (y)) +#endif + +#ifndef L_ABS +/*! Absoulute value of %x */ +#define L_ABS(x) (((x) < 0) ? (-1 * (x)) : (x)) +#endif + +#ifndef L_SIGN +/*! Sign of %x */ +#define L_SIGN(x) (((x) < 0) ? -1 : 1) +#endif + +#ifndef UNDEF +/*! Undefined value */ +#define UNDEF -1 +#endif + +#ifndef NULL +/*! NULL value */ +#define NULL 0 +#endif + +#ifndef TRUE +/*! True value */ +#define TRUE 1 +#endif + +#ifndef FALSE +/*! False value */ +#define FALSE 0 +#endif + + +/*--------------------------------------------------------------------* + * Environment variables for endian dependence * + *--------------------------------------------------------------------*/ +/* + * To control conditional compilation, one of two variables + * + * L_LITTLE_ENDIAN (e.g., for Intel X86) + * L_BIG_ENDIAN (e.g., for Sun SPARC, Mac Power PC) + * + * is defined when the GCC compiler is invoked. + * All code should compile properly for both hardware architectures. + */ + + +/*------------------------------------------------------------------------* + * Simple search state variables * + *------------------------------------------------------------------------*/ + +/*! Simple search state variables */ +enum { + L_NOT_FOUND = 0, + L_FOUND = 1 +}; + + +/*------------------------------------------------------------------------* + * Path separator conversion * + *------------------------------------------------------------------------*/ + +/*! Path separator conversion */ +enum { + UNIX_PATH_SEPCHAR = 0, + WIN_PATH_SEPCHAR = 1 +}; + + +/*------------------------------------------------------------------------* + * Timing structs * + *------------------------------------------------------------------------*/ +typedef void *L_TIMER; + +/*! Timing struct */ +struct L_WallTimer { + l_int32 start_sec; + l_int32 start_usec; + l_int32 stop_sec; + l_int32 stop_usec; +}; +typedef struct L_WallTimer L_WALLTIMER; + + +/*------------------------------------------------------------------------* + * Standard memory allocation * + * * + * These specify the memory management functions that are used * + * on all heap data except for Pix. Memory management for Pix * + * also defaults to malloc and free. See pix1.c for details. * + *------------------------------------------------------------------------*/ +#define LEPT_MALLOC(blocksize) malloc(blocksize) +#define LEPT_CALLOC(numelem, elemsize) calloc(numelem, elemsize) +#define LEPT_REALLOC(ptr, blocksize) realloc(ptr, blocksize) +#define LEPT_FREE(ptr) free(ptr) + + +/*------------------------------------------------------------------------* + * Control printing of error, warning, and info messages * + * * + * To omit all messages to stderr, simply define NO_CONSOLE_IO on the * + * command line. For finer grained control, we have a mechanism * + * based on the message severity level. The following assumes that * + * NO_CONSOLE_IO is not defined. * + * * + * Messages are printed if the message severity is greater than or equal * + * to the current severity threshold. The current severity threshold * + * is the greater of the compile-time severity, which is the minimum * + * severity that can be reported, and the run-time severity, which is * + * the severity threshold at the moment. * + * * + * The compile-time threshold determines which messages are compiled * + * into the library for potential printing. Messages below the * + * compile-time threshold are omitted and can never be printed. The * + * default compile-time threshold is L_SEVERITY_INFO, but this may be * + * overridden by defining MINIMUM_SEVERITY to the desired enumeration * + * identifier on the compiler command line. Defining NO_CONSOLE_IO on * + * the command line is the same as setting MINIMUM_SEVERITY to * + * L_SEVERITY_NONE. * + * * + * The run-time threshold determines which messages are printed during * + * library execution. It defaults to the compile-time threshold but * + * may be changed either statically by defining DEFAULT_SEVERITY to * + * the desired enumeration identifier on the compiler command line, or * + * dynamically by calling setMsgSeverity() to specify a new threshold. * + * The run-time threshold may also be set from the value of the * + * environment variable LEPT_MSG_SEVERITY by calling setMsgSeverity() * + * and specifying L_SEVERITY_EXTERNAL. * + * * + * In effect, the compile-time threshold setting says, "Generate code * + * to permit messages of equal or greater severity than this to be * + * printed, if desired," whereas the run-time threshold setting says, * + * "Print messages that have an equal or greater severity than this." * + *------------------------------------------------------------------------*/ + +/*! Control printing of error, warning and info messages */ +enum { + L_SEVERITY_EXTERNAL = 0, /* Get the severity from the environment */ + L_SEVERITY_ALL = 1, /* Lowest severity: print all messages */ + L_SEVERITY_DEBUG = 2, /* Print debugging and higher messages */ + L_SEVERITY_INFO = 3, /* Print informational and higher messages */ + L_SEVERITY_WARNING = 4, /* Print warning and higher messages */ + L_SEVERITY_ERROR = 5, /* Print error and higher messages */ + L_SEVERITY_NONE = 6 /* Highest severity: print no messages */ +}; + +/* No message less than the compile-time threshold will ever be + * reported, regardless of the current run-time threshold. This allows + * selection of the set of messages to include in the library. For + * example, setting the threshold to L_SEVERITY_WARNING eliminates all + * informational messages from the library. With that setting, both + * warning and error messages would be printed unless setMsgSeverity() + * was called, or DEFAULT_SEVERITY was redefined, to set the run-time + * severity to L_SEVERITY_ERROR. In that case, only error messages + * would be printed. + * + * This mechanism makes the library smaller and faster, by eliminating + * undesired message reporting and the associated run-time overhead for + * message threshold checking, because code for messages whose severity + * is lower than MINIMUM_SEVERITY won't be generated. + * + * A production library might typically permit ERROR messages to be + * generated, and a development library might permit DEBUG and higher. + * The actual messages printed (as opposed to generated) would depend + * on the current run-time severity threshold. + * + * This is a complex mechanism and a few examples may help. + * (1) No output permitted under any circumstances. + * Use: -DNO_CONSOLE_IO or -DMINIMUM_SEVERITY=6 + * (2) Suppose you want to only allow error messages, and you don't + * want to permit info or warning messages at runtime. + * Use: -DMINIMUM_SEVERITY=5 + * (3) Suppose you want to only allow error messages by default, + * but you will permit this to be over-ridden at runtime. + * Use: -DDEFAULT_SEVERITY=5 + * and to allow info and warning override: + * setMsgSeverity(L_SEVERITY_INFO); + */ + +#ifdef NO_CONSOLE_IO + #undef MINIMUM_SEVERITY + #undef DEFAULT_SEVERITY + + #define MINIMUM_SEVERITY L_SEVERITY_NONE /*!< Compile-time default */ + #define DEFAULT_SEVERITY L_SEVERITY_NONE /*!< Run-time default */ + +#else + #ifndef MINIMUM_SEVERITY + #define MINIMUM_SEVERITY L_SEVERITY_INFO /*!< Compile-time default */ + #endif + + #ifndef DEFAULT_SEVERITY + #define DEFAULT_SEVERITY MINIMUM_SEVERITY /*!< Run-time default */ + #endif +#endif + + +/*! The run-time message severity threshold is defined in utils.c. */ +LEPT_DLL extern l_int32 LeptMsgSeverity; + +/* + *
+ *  Usage
+ *  =====
+ *  Messages are of two types.
+ *
+ *  (1) The messages
+ *      ERROR_INT(a,b,c)       : returns l_int32
+ *      ERROR_FLOAT(a,b,c)     : returns l_float32
+ *      ERROR_PTR(a,b,c)       : returns void*
+ *  are used to return from functions and take a fixed set of parameters:
+ *      a : 
+ *      b : procName
+ *      c : 
+ *  where procName is the name of the local variable naming the function.
+ *
+ *  (2) The purely informational L_* messages
+ *      L_ERROR(a,...)
+ *      L_WARNING(a,...)
+ *      L_INFO(a,...)
+ *  do not take a return value, but they take at least two parameters:
+ *      a  :   with optional format conversions
+ *      v1 : procName    (this must be included as the first vararg)
+ *      v2, ... :  optional varargs to match format converters in the message
+ *
+ *  To return an error from a function that returns void, use:
+ *      L_ERROR(, procName, [...])
+ *      return;
+ *
+ *  Implementation details
+ *  ======================
+ *  Messages are defined with the IF_SEV macro.  The first parameter is
+ *  the message severity, the second is the function to call if the
+ *  message is to be printed, and the third is the return value if the
+ *  message is to be suppressed.  For example, we might have an
+ *  informational message defined as:
+ *
+ *    IF_SEV(L_SEVERITY_INFO, fprintf(.......), 0)
+ *
+ *  The macro expands into a conditional.  Because the first comparison
+ *  is between two constants, an optimizing compiler will remove either
+ *  the comparison (if it's true) or the entire macro expansion (if it
+ *  is false).  This means that there is no run-time overhead for
+ *  messages whose severity falls below the minimum specified at compile
+ *  time, and for others the overhead is one (not two) comparisons.
+ *
+ *  The L_nnn() macros below do not return a value, but because the
+ *  conditional operator requires one for the false condition, we
+ *  specify a void expression.
+ * 
+ */ + +#ifdef NO_CONSOLE_IO + + #define PROCNAME(name) + #define ERROR_INT(a,b,c) ((l_int32)(c)) + #define ERROR_FLOAT(a,b,c) ((l_float32)(c)) + #define ERROR_PTR(a,b,c) ((void *)(c)) + #define L_ERROR(a,...) + #define L_WARNING(a,...) + #define L_INFO(a,...) + +#else + + #define PROCNAME(name) static const char procName[] = name + #define IF_SEV(l,t,f) \ + ((l) >= MINIMUM_SEVERITY && (l) >= LeptMsgSeverity ? (t) : (f)) + + #define ERROR_INT(a,b,c) \ + IF_SEV(L_SEVERITY_ERROR, returnErrorInt((a),(b),(c)), (l_int32)(c)) + #define ERROR_FLOAT(a,b,c) \ + IF_SEV(L_SEVERITY_ERROR, returnErrorFloat((a),(b),(c)), (l_float32)(c)) + #define ERROR_PTR(a,b,c) \ + IF_SEV(L_SEVERITY_ERROR, returnErrorPtr((a),(b),(c)), (void *)(c)) + + #define L_ERROR(a,...) \ + IF_SEV(L_SEVERITY_ERROR, \ + (void)fprintf(stderr, "Error in %s: " a, __VA_ARGS__), \ + (void)0) + #define L_WARNING(a,...) \ + IF_SEV(L_SEVERITY_WARNING, \ + (void)fprintf(stderr, "Warning in %s: " a, __VA_ARGS__), \ + (void)0) + #define L_INFO(a,...) \ + IF_SEV(L_SEVERITY_INFO, \ + (void)fprintf(stderr, "Info in %s: " a, __VA_ARGS__), \ + (void)0) + +#if 0 /* Alternative method for controlling L_* message output */ + #define L_ERROR(a,...) \ + { if (L_SEVERITY_ERROR >= MINIMUM_SEVERITY && \ + L_SEVERITY_ERROR >= LeptMsgSeverity) \ + fprintf(stderr, "Error in %s: " a, __VA_ARGS__) \ + } + #define L_WARNING(a,...) \ + { if (L_SEVERITY_WARNING >= MINIMUM_SEVERITY && \ + L_SEVERITY_WARNING >= LeptMsgSeverity) \ + fprintf(stderr, "Warning in %s: " a, __VA_ARGS__) \ + } + #define L_INFO(a,...) \ + { if (L_SEVERITY_INFO >= MINIMUM_SEVERITY && \ + L_SEVERITY_INFO >= LeptMsgSeverity) \ + fprintf(stderr, "Info in %s: " a, __VA_ARGS__) \ + } +#endif + +#endif /* NO_CONSOLE_IO */ + + +/*------------------------------------------------------------------------* + * snprintf() renamed in MSVC (pre-VS2015) * + *------------------------------------------------------------------------*/ +#if defined _MSC_VER && _MSC_VER < 1900 +#define snprintf(buf, size, ...) _snprintf_s(buf, size, _TRUNCATE, __VA_ARGS__) +#endif + + +#endif /* LEPTONICA_ENVIRON_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/gplot.h b/third_party/leptonica/uos/sw_64/include/leptonica/gplot.h new file mode 100644 index 00000000..9d74eafb --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/gplot.h @@ -0,0 +1,96 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_GPLOT_H +#define LEPTONICA_GPLOT_H + +/*! + * \file gplot.h + * + *
+ *   Data structures and parameters for generating gnuplot files
+ *
+ *   We used to support X11 output, but recent versions of gnuplot do not
+ *   support the X11 terminal.  To get display to your screen, use
+ *   GPLOT_PNG output; e.g.,
+ *       gplotSimple1(na, GPLOT_PNG, "/tmp/someroot", ...);
+ *       l_fileDisplay("/tmp/someroot.png", ...);
+ * 
+ */ + +#define GPLOT_VERSION_NUMBER 1 + +#define NUM_GPLOT_STYLES 5 +enum GPLOT_STYLE { + GPLOT_LINES = 0, + GPLOT_POINTS = 1, + GPLOT_IMPULSES = 2, + GPLOT_LINESPOINTS = 3, + GPLOT_DOTS = 4 +}; + +#define NUM_GPLOT_OUTPUTS 6 +enum GPLOT_OUTPUT { + GPLOT_NONE = 0, + GPLOT_PNG = 1, + GPLOT_PS = 2, + GPLOT_EPS = 3, + GPLOT_LATEX = 4 +}; + +enum GPLOT_SCALING { + GPLOT_LINEAR_SCALE = 0, /*!< default */ + GPLOT_LOG_SCALE_X = 1, + GPLOT_LOG_SCALE_Y = 2, + GPLOT_LOG_SCALE_X_Y = 3 +}; + +extern const char *gplotstylenames[]; /*!< used in gnuplot cmd file */ +extern const char *gplotfilestyles[]; /*!< used in simple file input */ +extern const char *gplotfileoutputs[]; /*!< used in simple file input */ + +/*! Data structure for generating gnuplot files */ +struct GPlot +{ + char *rootname; /*!< for cmd, data, output */ + char *cmdname; /*!< command file name */ + struct Sarray *cmddata; /*!< command file contents */ + struct Sarray *datanames; /*!< data file names */ + struct Sarray *plotdata; /*!< plot data (1 string/file) */ + struct Sarray *plottitles; /*!< title for each individual plot */ + struct Numa *plotstyles; /*!< plot style for individual plots */ + l_int32 nplots; /*!< current number of plots */ + char *outname; /*!< output file name */ + l_int32 outformat; /*!< GPLOT_OUTPUT values */ + l_int32 scaling; /*!< GPLOT_SCALING values */ + char *title; /*!< optional */ + char *xlabel; /*!< optional x axis label */ + char *ylabel; /*!< optional y axis label */ +}; +typedef struct GPlot GPLOT; + + +#endif /* LEPTONICA_GPLOT_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/heap.h b/third_party/leptonica/uos/sw_64/include/leptonica/heap.h new file mode 100644 index 00000000..d39b06b9 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/heap.h @@ -0,0 +1,87 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_HEAP_H +#define LEPTONICA_HEAP_H + +/*! + * \file heap.h + * + *
+ *      Expandable priority queue configured as a heap for arbitrary void* data
+ *
+ *      The L_Heap is used to implement a priority queue.  The elements
+ *      in the heap are ordered in either increasing or decreasing key value.
+ *      The key is a float field 'keyval' that is required to be
+ *      contained in the elements of the queue.
+ *
+ *      The heap is a simple binary tree with the following constraints:
+ *         - the key of each node is >= the keys of the two children
+ *         - the tree is complete, meaning that each level (1, 2, 4, ...)
+ *           is filled and the last level is filled from left to right
+ *
+ *      The tree structure is implicit in the queue array, with the
+ *      array elements numbered as a breadth-first search of the tree
+ *      from left to right.  It is thus guaranteed that the largest
+ *      (or smallest) key belongs to the first element in the array.
+ *
+ *      Heap sort is used to sort the array.  Once an array has been
+ *      sorted as a heap, it is convenient to use it as a priority queue,
+ *      because the min (or max) elements are always at the root of
+ *      the tree (element 0), and once removed, the heap can be
+ *      resorted in not more than log[n] steps, where n is the number
+ *      of elements on the heap.  Likewise, if an arbitrary element is
+ *      added to the end of the array A, the sorted heap can be restored
+ *      in not more than log[n] steps.
+ *
+ *      A L_Heap differs from a L_Queue in that the elements in the former
+ *      are sorted by a key.  Internally, the array is maintained
+ *      as a queue, with a pointer to the end of the array.  The
+ *      head of the array always remains at array[0].  The array is
+ *      maintained (sorted) as a heap.  When an item is removed from
+ *      the head, the last item takes its place (thus reducing the
+ *      array length by 1), and this is followed by array element
+ *      swaps to restore the heap property.   When an item is added,
+ *      it goes at the end of the array, and is swapped up to restore
+ *      the heap.  If the ptr array is full, adding another item causes
+ *      the ptr array size to double.
+ *
+ *      For further implementation details, see heap.c.
+ * 
+ */ + +/*! Heap of arbitrary void* data */ +struct L_Heap +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 n; /*!< number of elements stored in the heap */ + void **array; /*!< ptr array */ + l_int32 direction; /*!< L_SORT_INCREASING or L_SORT_DECREASING */ +}; +typedef struct L_Heap L_HEAP; + + +#endif /* LEPTONICA_HEAP_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/imageio.h b/third_party/leptonica/uos/sw_64/include/leptonica/imageio.h new file mode 100644 index 00000000..fb7456d4 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/imageio.h @@ -0,0 +1,237 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +/*! + * \file imageio.h + * + *
+ *  General features of image I/O in leptonica
+ *
+ *  At present, there are 9 file formats for images that can be read
+ *  and written:
+ *      png (requires libpng, libz)
+ *      jpeg (requires libjpeg)
+ *      tiff (requires libtiff, libz)
+ *      gif (requires libgif)
+ *      webp (requires libwebp)
+ *      jp2 (requires libopenjp2)
+ *      bmp (no library required)
+ *      pnm (no library required)
+ *      spix (no library required)
+ *  Additionally, there are two file formats for writing (only) images:
+ *      PostScript (requires libpng, libz, libjpeg, libtiff)
+ *      pdf (requires libpng, libz, libjpeg, libtiff)
+ *
+ *  For all 9 read/write formats, leptonica provides interconversion
+ *  between pix (with raster data) and formatted image data:
+ *      Conversion from pix (typically compression):
+ *          pixWrite():        pix --> file
+ *          pixWriteStream():  pix --> filestream (aka FILE*)
+ *          pixWriteMem():     pix --> memory buffer
+ *      Conversion to pix (typically decompression):
+ *          pixRead():         file --> pix
+ *          pixReadStream():   filestream --> pix
+ *          pixReadMem():      memory buffer --> pix
+ *
+ *  Conversions for which the image data is not compressed are:
+ *     * uncompressed tiff   (IFF_TIFF)
+ *     * bmp
+ *     * pnm
+ *     * spix (fast serialization that copies the pix raster data)
+ *
+ *  The image header (metadata) information can be read from either
+ *  the compressed file or a memory buffer, for all 9 formats.
+ * 
+ */ + +#ifndef LEPTONICA_IMAGEIO_H +#define LEPTONICA_IMAGEIO_H + +/* --------------------------------------------------------------- * + * Image file format types * + * --------------------------------------------------------------- */ +/* + * The IFF_DEFAULT flag is used to write the file out in the + * same (input) file format that the pix was read from. If the pix + * was not read from file, the input format field will be + * IFF_UNKNOWN and the output file format will be chosen to + * be compressed and lossless; namely, IFF_TIFF_G4 for d = 1 + * and IFF_PNG for everything else. + * + * In the future, new format types that have defined extensions + * will be added before IFF_DEFAULT, and will be kept in sync with + * the file format extensions in writefile.c. The positions of + * file formats before IFF_DEFAULT will remain invariant. + */ + +/*! Image file format types */ +enum { + IFF_UNKNOWN = 0, + IFF_BMP = 1, + IFF_JFIF_JPEG = 2, + IFF_PNG = 3, + IFF_TIFF = 4, + IFF_TIFF_PACKBITS = 5, + IFF_TIFF_RLE = 6, + IFF_TIFF_G3 = 7, + IFF_TIFF_G4 = 8, + IFF_TIFF_LZW = 9, + IFF_TIFF_ZIP = 10, + IFF_PNM = 11, + IFF_PS = 12, + IFF_GIF = 13, + IFF_JP2 = 14, + IFF_WEBP = 15, + IFF_LPDF = 16, + IFF_DEFAULT = 17, + IFF_SPIX = 18 +}; + + +/* --------------------------------------------------------------- * + * Format header ids * + * --------------------------------------------------------------- */ + +/*! Format header ids */ +enum { + BMP_ID = 0x4d42, /*!< BM - for bitmaps */ + TIFF_BIGEND_ID = 0x4d4d, /*!< MM - for 'motorola' */ + TIFF_LITTLEEND_ID = 0x4949 /*!< II - for 'intel' */ +}; + + +/* --------------------------------------------------------------- * + * Hinting bit flags in jpeg reader * + * --------------------------------------------------------------- */ + +/*! Hinting bit flags in jpeg reader */ +enum { + L_JPEG_READ_LUMINANCE = 1, /*!< only want luminance data; no chroma */ + L_JPEG_FAIL_ON_BAD_DATA = 2 /*!< don't return possibly damaged pix */ +}; + + +/* --------------------------------------------------------------- * + * Pdf formatted encoding types * + * --------------------------------------------------------------- */ + +/*! Pdf formatted encoding types */ +enum { + L_DEFAULT_ENCODE = 0, /*!< use default encoding based on image */ + L_JPEG_ENCODE = 1, /*!< use dct encoding: 8 and 32 bpp, no cmap */ + L_G4_ENCODE = 2, /*!< use ccitt g4 fax encoding: 1 bpp */ + L_FLATE_ENCODE = 3, /*!< use flate encoding: any depth, cmap ok */ + L_JP2K_ENCODE = 4 /*!< use jp2k encoding: 8 and 32 bpp, no cmap */ +}; + + +/* --------------------------------------------------------------- * + * Compressed image data * + * --------------------------------------------------------------- */ +/* + * In use, either datacomp or data85 will be produced, depending + * on whether the data needs to be ascii85 encoded. PostScript + * requires ascii85 encoding; pdf does not. + * + * For the colormap (flate compression only), PostScript uses ascii85 + * encoding and pdf uses a bracketed array of space-separated + * hex-encoded rgb triples. Only tiff g4 (type == L_G4_ENCODE) uses + * the minisblack field. + */ + +/*! Compressed image data */ +struct L_Compressed_Data +{ + l_int32 type; /*!< encoding type: L_JPEG_ENCODE, etc */ + l_uint8 *datacomp; /*!< gzipped raster data */ + size_t nbytescomp; /*!< number of compressed bytes */ + char *data85; /*!< ascii85-encoded gzipped raster data */ + size_t nbytes85; /*!< number of ascii85 encoded bytes */ + char *cmapdata85; /*!< ascii85-encoded uncompressed cmap */ + char *cmapdatahex; /*!< hex pdf array for the cmap */ + l_int32 ncolors; /*!< number of colors in cmap */ + l_int32 w; /*!< image width */ + l_int32 h; /*!< image height */ + l_int32 bps; /*!< bits/sample; typ. 1, 2, 4 or 8 */ + l_int32 spp; /*!< samples/pixel; typ. 1 or 3 */ + l_int32 minisblack; /*!< tiff g4 photometry */ + l_int32 predictor; /*!< flate data has PNG predictors */ + size_t nbytes; /*!< number of uncompressed raster bytes */ + l_int32 res; /*!< resolution (ppi) */ +}; +typedef struct L_Compressed_Data L_COMP_DATA; + + +/* ------------------------------------------------------------------------- * + * Pdf multi image flags * + * ------------------------------------------------------------------------- */ + +/*! Pdf multi image flags */ +enum { + L_FIRST_IMAGE = 1, /*!< first image to be used */ + L_NEXT_IMAGE = 2, /*!< intermediate image; not first or last */ + L_LAST_IMAGE = 3 /*!< last image to be used */ +}; + + +/* ------------------------------------------------------------------------- * + * Intermediate pdf generation data * + * ------------------------------------------------------------------------- */ +/* + * This accumulates data for generating a pdf of a single page consisting + * of an arbitrary number of images. + * + * None of the strings have a trailing newline. + */ + +/*! Intermediate pdf generation data */ +struct L_Pdf_Data +{ + char *title; /*!< optional title for pdf */ + l_int32 n; /*!< number of images */ + l_int32 ncmap; /*!< number of colormaps */ + struct L_Ptra *cida; /*!< array of compressed image data */ + char *id; /*!< %PDF-1.2 id string */ + char *obj1; /*!< catalog string */ + char *obj2; /*!< metadata string */ + char *obj3; /*!< pages string */ + char *obj4; /*!< page string (variable data) */ + char *obj5; /*!< content string (variable data) */ + char *poststream; /*!< post-binary-stream string */ + char *trailer; /*!< trailer string (variable data) */ + struct Pta *xy; /*!< store (xpt, ypt) array */ + struct Pta *wh; /*!< store (wpt, hpt) array */ + struct Box *mediabox; /*!< bounding region for all images */ + struct Sarray *saprex; /*!< pre-binary-stream xobject strings */ + struct Sarray *sacmap; /*!< colormap pdf object strings */ + struct L_Dna *objsize; /*!< sizes of each pdf string object */ + struct L_Dna *objloc; /*!< location of each pdf string object */ + l_int32 xrefloc; /*!< location of xref */ +}; +typedef struct L_Pdf_Data L_PDF_DATA; + + +#endif /* LEPTONICA_IMAGEIO_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/jbclass.h b/third_party/leptonica/uos/sw_64/include/leptonica/jbclass.h new file mode 100644 index 00000000..84a48a93 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/jbclass.h @@ -0,0 +1,141 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_JBCLASS_H +#define LEPTONICA_JBCLASS_H + +/*! + * \file jbclass.h + * + * JbClasser + * JbData + */ + + + /*! + *
+     * The JbClasser struct holds all the data accumulated during the
+     * classification process that can be used for a compressed
+     * jbig2-type representation of a set of images.  This is created
+     * in an initialization process and added to as the selected components
+     * on each successive page are analyzed.
+     * 
+ */ +struct JbClasser +{ + struct Sarray *safiles; /*!< input page image file names */ + l_int32 method; /*!< JB_RANKHAUS, JB_CORRELATION */ + l_int32 components; /*!< JB_CONN_COMPS, JB_CHARACTERS or */ + /*!< JB_WORDS */ + l_int32 maxwidth; /*!< max component width allowed */ + l_int32 maxheight; /*!< max component height allowed */ + l_int32 npages; /*!< number of pages already processed */ + l_int32 baseindex; /*!< number components already processed */ + /*!< on fully processed pages */ + struct Numa *nacomps; /*!< number of components on each page */ + l_int32 sizehaus; /*!< size of square struct elem for haus */ + l_float32 rankhaus; /*!< rank val of haus match, each way */ + l_float32 thresh; /*!< thresh value for correlation score */ + l_float32 weightfactor; /*!< corrects thresh value for heaver */ + /*!< components; use 0 for no correction */ + struct Numa *naarea; /*!< w * h of each template, without */ + /*!< extra border pixels */ + l_int32 w; /*!< max width of original src images */ + l_int32 h; /*!< max height of original src images */ + l_int32 nclass; /*!< current number of classes */ + l_int32 keep_pixaa; /*!< If zero, pixaa isn't filled */ + struct Pixaa *pixaa; /*!< instances for each class; unbordered */ + struct Pixa *pixat; /*!< templates for each class; bordered */ + /*!< and not dilated */ + struct Pixa *pixatd; /*!< templates for each class; bordered */ + /*!< and dilated */ + struct L_DnaHash *dahash; /*!< Hash table to find templates by size */ + struct Numa *nafgt; /*!< fg areas of undilated templates; */ + /*!< only used for rank < 1.0 */ + struct Pta *ptac; /*!< centroids of all bordered cc */ + struct Pta *ptact; /*!< centroids of all bordered template cc */ + struct Numa *naclass; /*!< array of class ids for each component */ + struct Numa *napage; /*!< array of page nums for each component */ + struct Pta *ptaul; /*!< array of UL corners at which the */ + /*!< template is to be placed for each */ + /*!< component */ + struct Pta *ptall; /*!< similar to ptaul, but for LL corners */ +}; +typedef struct JbClasser JBCLASSER; + + + /*! + *
+     * The JbData struct holds all the data required for
+     * the compressed jbig-type representation of a set of images.
+     * The data can be written to file, read back, and used
+     * to regenerate an approximate version of the original,
+     * which differs in two ways from the original:
+     *   (1) It uses a template image for each c.c. instead of the
+     *       original instance, for each occurrence on each page.
+     *   (2) It discards components with either a height or width larger
+     *       than the maximuma, given here by the lattice dimensions
+     *       used for storing the templates.
+     * 
+ */ +struct JbData +{ + struct Pix *pix; /*!< template composite for all classes */ + l_int32 npages; /*!< number of pages */ + l_int32 w; /*!< max width of original page images */ + l_int32 h; /*!< max height of original page images */ + l_int32 nclass; /*!< number of classes */ + l_int32 latticew; /*!< lattice width for template composite */ + l_int32 latticeh; /*!< lattice height for template composite */ + struct Numa *naclass; /*!< array of class ids for each component */ + struct Numa *napage; /*!< array of page nums for each component */ + struct Pta *ptaul; /*!< array of UL corners at which the */ + /*!< template is to be placed for each */ + /*!< component */ +}; +typedef struct JbData JBDATA; + + + /*! Classifier methods */ +enum { + JB_RANKHAUS = 0, + JB_CORRELATION = 1 +}; + + /*! For jbGetComponents(): type of component to extract from images */ +enum { + JB_CONN_COMPS = 0, + JB_CHARACTERS = 1, + JB_WORDS = 2 +}; + + /*! These parameters are used for naming the two files + * in which the jbig2-like compressed data is stored. */ +#define JB_TEMPLATE_EXT ".templates.png" +#define JB_DATA_EXT ".data" + + +#endif /* LEPTONICA_JBCLASS_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/leptwin.h b/third_party/leptonica/uos/sw_64/include/leptonica/leptwin.h new file mode 100644 index 00000000..451da6b6 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/leptwin.h @@ -0,0 +1,45 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifdef _WIN32 +#ifndef LEPTONICA_LEPTWIN_H +#define LEPTONICA_LEPTWIN_H + +#include "allheaders.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +LEPT_DLL extern HBITMAP pixGetWindowsHBITMAP( PIX *pixs ); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* LEPTONICA_LEPTWIN_H */ +#endif /* _WIN32 */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/list.h b/third_party/leptonica/uos/sw_64/include/leptonica/list.h new file mode 100644 index 00000000..d207e79f --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/list.h @@ -0,0 +1,90 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + + +#ifndef LEPTONICA_LIST_H +#define LEPTONICA_LIST_H + +/*! + * \file list.h + * + *
+ *       Cell for double-linked lists
+ *
+ *       This allows composition of a list of cells with
+ *           prev, next and data pointers.  Generic data
+ *           structures hang on the list cell data pointers.
+ *
+ *       The list is not circular because that would add much
+ *           complexity in traversing the list under general
+ *           conditions where list cells can be added and removed.
+ *           The only disadvantage of not having the head point to
+ *           the last cell is that the list must be traversed to
+ *           find its tail.  However, this traversal is fast, and
+ *           the listRemoveFromTail() function updates the tail
+ *           so there is no searching overhead with repeated use.
+ *
+ *       The list macros are used to run through a list, and their
+ *       use is encouraged.  They are invoked, e.g., as
+ *
+ *             DLLIST  *head, *elem;
+ *             ...
+ *             L_BEGIN_LIST_FORWARD(head, elem)
+ *                 data >
+ *             L_END_LIST
+ * 
+ */ + +struct DoubleLinkedList +{ + struct DoubleLinkedList *prev; + struct DoubleLinkedList *next; + void *data; +}; +typedef struct DoubleLinkedList DLLIST; + + + /*! Simple list traverse macro - forward */ +#define L_BEGIN_LIST_FORWARD(head, element) \ + { \ + DLLIST *_leptvar_nextelem_; \ + for ((element) = (head); (element); (element) = _leptvar_nextelem_) { \ + _leptvar_nextelem_ = (element)->next; + + + /*! Simple list traverse macro - reverse */ +#define L_BEGIN_LIST_REVERSE(tail, element) \ + { \ + DLLIST *_leptvar_prevelem_; \ + for ((element) = (tail); (element); (element) = _leptvar_prevelem_) { \ + _leptvar_prevelem_ = (element)->prev; + + + /*! Simple list traverse macro - end of a list traverse */ +#define L_END_LIST }} + + +#endif /* LEPTONICA_LIST_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/morph.h b/third_party/leptonica/uos/sw_64/include/leptonica/morph.h new file mode 100644 index 00000000..c0cd7987 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/morph.h @@ -0,0 +1,248 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_MORPH_H +#define LEPTONICA_MORPH_H + +/*! + * \file morph.h + * + *
+ *  Contains the following structs:
+ *      struct Sel
+ *      struct Sela
+ *      struct Kernel
+ *
+ *  Contains definitions for:
+ *      morphological b.c. flags
+ *      structuring element types
+ *      runlength flags for granulometry
+ *      direction flags for grayscale morphology
+ *      morphological operation flags
+ *      standard border size
+ *      grayscale intensity scaling flags
+ *      morphological tophat flags
+ *      arithmetic and logical operator flags
+ *      grayscale morphology selection flags
+ *      distance function b.c. flags
+ *      image comparison flags
+ *      color content flags
+ * 
+ */ + +/*-------------------------------------------------------------------------* + * Sel and Sel array * + *-------------------------------------------------------------------------*/ +#define SEL_VERSION_NUMBER 1 + +/*! Selection */ +struct Sel +{ + l_int32 sy; /*!< sel height */ + l_int32 sx; /*!< sel width */ + l_int32 cy; /*!< y location of sel origin */ + l_int32 cx; /*!< x location of sel origin */ + l_int32 **data; /*!< {0,1,2}; data[i][j] in [row][col] order */ + char *name; /*!< used to find sel by name */ +}; +typedef struct Sel SEL; + +/*! Array of Sel */ +struct Sela +{ + l_int32 n; /*!< number of sel actually stored */ + l_int32 nalloc; /*!< size of allocated ptr array */ + struct Sel **sel; /*!< sel ptr array */ +}; +typedef struct Sela SELA; + + +/*-------------------------------------------------------------------------* + * Kernel * + *-------------------------------------------------------------------------*/ +#define KERNEL_VERSION_NUMBER 2 + +/*! Kernel */ +struct L_Kernel +{ + l_int32 sy; /*!< kernel height */ + l_int32 sx; /*!< kernel width */ + l_int32 cy; /*!< y location of kernel origin */ + l_int32 cx; /*!< x location of kernel origin */ + l_float32 **data; /*!< data[i][j] in [row][col] order */ +}; +typedef struct L_Kernel L_KERNEL; + + +/*-------------------------------------------------------------------------* + * Morphological boundary condition flags * + * * + * Two types of boundary condition for erosion. * + * The global variable MORPH_BC takes on one of these two values. * + * See notes in morph.c for usage. * + *-------------------------------------------------------------------------*/ + +/*! Morphological boundary condition flags */ +enum { + SYMMETRIC_MORPH_BC = 0, + ASYMMETRIC_MORPH_BC = 1 +}; + +/*-------------------------------------------------------------------------* + * Structuring element types * + *-------------------------------------------------------------------------*/ + +/*! Structuring element types */ +enum { + SEL_DONT_CARE = 0, + SEL_HIT = 1, + SEL_MISS = 2 +}; + +/*-------------------------------------------------------------------------* + * Runlength flags for granulometry * + *-------------------------------------------------------------------------*/ + +/*! Runlength flags for granulometry */ +enum { + L_RUN_OFF = 0, + L_RUN_ON = 1 +}; + +/*-------------------------------------------------------------------------* + * Direction flags for grayscale morphology, granulometry, * + * composable Sels, convolution, etc. * + *-------------------------------------------------------------------------*/ + +/*! Direction flags */ +enum { + L_HORIZ = 1, + L_VERT = 2, + L_BOTH_DIRECTIONS = 3 +}; + +/*-------------------------------------------------------------------------* + * Morphological operation flags * + *-------------------------------------------------------------------------*/ + +/*! Morphological operation flags */ +enum { + L_MORPH_DILATE = 1, + L_MORPH_ERODE = 2, + L_MORPH_OPEN = 3, + L_MORPH_CLOSE = 4, + L_MORPH_HMT = 5 +}; + +/*-------------------------------------------------------------------------* + * Grayscale intensity scaling flags * + *-------------------------------------------------------------------------*/ + +/*! Grayscale intensity scaling flags */ +enum { + L_LINEAR_SCALE = 1, + L_LOG_SCALE = 2 +}; + +/*-------------------------------------------------------------------------* + * Morphological tophat flags * + *-------------------------------------------------------------------------*/ + +/*! Morphological tophat flags */ +enum { + L_TOPHAT_WHITE = 0, + L_TOPHAT_BLACK = 1 +}; + +/*-------------------------------------------------------------------------* + * Arithmetic and logical operator flags * + * (use on grayscale images and Numas) * + *-------------------------------------------------------------------------*/ + +/*! Arithmetic and logical operator flags */ +enum { + L_ARITH_ADD = 1, + L_ARITH_SUBTRACT = 2, + L_ARITH_MULTIPLY = 3, /* on numas only */ + L_ARITH_DIVIDE = 4, /* on numas only */ + L_UNION = 5, /* on numas only */ + L_INTERSECTION = 6, /* on numas only */ + L_SUBTRACTION = 7, /* on numas only */ + L_EXCLUSIVE_OR = 8 /* on numas only */ +}; + +/*-------------------------------------------------------------------------* + * Min/max selection flags * + *-------------------------------------------------------------------------*/ + +/*! Min/max selection flags */ +enum { + L_CHOOSE_MIN = 1, /* useful in a downscaling "erosion" */ + L_CHOOSE_MAX = 2, /* useful in a downscaling "dilation" */ + L_CHOOSE_MAXDIFF = 3, /* useful in a downscaling contrast */ + L_CHOOSE_MIN_BOOST = 4, /* use a modification of the min value */ + L_CHOOSE_MAX_BOOST = 5 /* use a modification of the max value */ +}; + +/*-------------------------------------------------------------------------* + * Distance function b.c. flags * + *-------------------------------------------------------------------------*/ + +/*! Distance function b.c. flags */ +enum { + L_BOUNDARY_BG = 1, /* assume bg outside image */ + L_BOUNDARY_FG = 2 /* assume fg outside image */ +}; + +/*-------------------------------------------------------------------------* + * Image comparison flags * + *-------------------------------------------------------------------------*/ + +/*! Image comparison flags */ +enum { + L_COMPARE_XOR = 1, + L_COMPARE_SUBTRACT = 2, + L_COMPARE_ABS_DIFF = 3 +}; + +/*-------------------------------------------------------------------------* + * Color content flags * + *-------------------------------------------------------------------------*/ + +/*! Color content flags */ +enum { + L_MAX_DIFF_FROM_AVERAGE_2 = 1, + L_MAX_MIN_DIFF_FROM_2 = 2, + L_MAX_DIFF = 3 +}; + +/*-------------------------------------------------------------------------* + * Standard size of border added around images for special processing * + *-------------------------------------------------------------------------*/ +static const l_int32 ADDED_BORDER = 32; /*!< pixels, not bits */ + + +#endif /* LEPTONICA_MORPH_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/pix.h b/third_party/leptonica/uos/sw_64/include/leptonica/pix.h new file mode 100644 index 00000000..c0dc5548 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/pix.h @@ -0,0 +1,1284 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_PIX_H +#define LEPTONICA_PIX_H + +/*! + * \file pix.h + * + *
+ *   Valid image types in leptonica:
+ *       Pix: 1 bpp, with and without colormap
+ *       Pix: 2 bpp, with and without colormap
+ *       Pix: 4 bpp, with and without colormap
+ *       Pix: 8 bpp, with and without colormap
+ *       Pix: 16 bpp (1 spp)
+ *       Pix: 32 bpp (rgb, 3 spp)
+ *       Pix: 32 bpp (rgba, 4 spp)
+ *       FPix: 32 bpp float
+ *       DPix: 64 bpp double
+ *       Notes:
+ *          (1) The only valid Pix image type with alpha is rgba.
+ *              In particular, the alpha component is not used in
+ *              cmapped images.
+ *          (2) PixComp can hold any Pix with IFF_PNG encoding.
+ *
+ *   Contents:
+ *
+ *   (1) This file defines most of the image-related structs used in leptonica:
+ *         struct Pix
+ *         struct PixColormap
+ *         struct RGBA_Quad
+ *         struct Pixa
+ *         struct Pixaa
+ *         struct Box
+ *         struct Boxa
+ *         struct Boxaa
+ *         struct Pta
+ *         struct Ptaa
+ *         struct Pixacc
+ *         struct PixTiling
+ *         struct FPix
+ *         struct FPixa
+ *         struct DPix
+ *         struct PixComp
+ *         struct PixaComp
+ *
+ *   (2) This file has definitions for:
+ *         Colors for RGB
+ *         Colors for drawing boxes
+ *         Perceptual color weights
+ *         Colormap conversion flags
+ *         Rasterop bit flags
+ *         Structure access flags (for insert, copy, clone, copy-clone)
+ *         Sorting flags (by type and direction)
+ *         Blending flags
+ *         Graphics pixel setting flags
+ *         Size filtering flags
+ *         Color component selection flags
+ *         16-bit conversion flags
+ *         Rotation and shear flags
+ *         Affine transform order flags
+ *         Grayscale filling flags
+ *         Flags for setting to white or black
+ *         Flags for getting white or black pixel value
+ *         Flags for 8 and 16 bit pixel sums
+ *         Dithering flags
+ *         Distance flags
+ *         Statistical measures
+ *         Set selection flags
+ *         Text orientation flags
+ *         Edge orientation flags
+ *         Line orientation flags
+ *         Scan direction flags
+ *         Box size adjustment flags
+ *         Flags for selecting box boundaries from two choices
+ *         Handling overlapping bounding boxes in boxa
+ *         Flags for replacing invalid boxes
+ *         Horizontal warp
+ *         Pixel selection for resampling
+ *         Thinning flags
+ *         Runlength flags
+ *         Edge filter flags
+ *         Subpixel color component ordering in LCD display
+ *         HSV histogram flags
+ *         Region flags (inclusion, exclusion)
+ *         Flags for adding text to a pix
+ *         Flags for plotting on a pix
+ *         Flags for selecting display program
+ *         Flags in the 'special' pix field for non-default operations
+ *         Handling negative values in conversion to unsigned int
+ *         Relative to zero flags
+ *         Flags for adding or removing traling slash from string
+ *
+ *   (3) This file has typedefs for the pix allocator and deallocator functions
+ *         alloc_fn()
+ *         dealloc_fn().
+ * 
+ */ + + +/*-------------------------------------------------------------------------* + * Basic Pix * + *-------------------------------------------------------------------------*/ + /* The 'special' field is by default 0, but it can hold integers + * that direct non-default actions, e.g., in png and jpeg I/O. */ + +/*! Basic Pix */ +struct Pix +{ + l_uint32 w; /*!< width in pixels */ + l_uint32 h; /*!< height in pixels */ + l_uint32 d; /*!< depth in bits (bpp) */ + l_uint32 spp; /*!< number of samples per pixel */ + l_uint32 wpl; /*!< 32-bit words/line */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + l_int32 xres; /*!< image res (ppi) in x direction */ + /*!< (use 0 if unknown) */ + l_int32 yres; /*!< image res (ppi) in y direction */ + /*!< (use 0 if unknown) */ + l_int32 informat; /*!< input file format, IFF_* */ + l_int32 special; /*!< special instructions for I/O, etc */ + char *text; /*!< text string associated with pix */ + struct PixColormap *colormap; /*!< colormap (may be null) */ + l_uint32 *data; /*!< the image data */ +}; +typedef struct Pix PIX; + +/*! Colormap of a Pix */ +struct PixColormap +{ + void *array; /*!< colormap table (array of RGBA_QUAD) */ + l_int32 depth; /*!< of pix (1, 2, 4 or 8 bpp) */ + l_int32 nalloc; /*!< number of color entries allocated */ + l_int32 n; /*!< number of color entries used */ +}; +typedef struct PixColormap PIXCMAP; + + + /*! Colormap table entry (after the BMP version). + * Note that the BMP format stores the colormap table exactly + * as it appears here, with color samples being stored sequentially, + * in the order (b,g,r,a). */ +struct RGBA_Quad +{ + l_uint8 blue; /*!< blue value */ + l_uint8 green; /*!< green value */ + l_uint8 red; /*!< red value */ + l_uint8 alpha; /*!< alpha value */ +}; +typedef struct RGBA_Quad RGBA_QUAD; + + + +/*-------------------------------------------------------------------------* + * Colors for 32 bpp * + *-------------------------------------------------------------------------*/ +/*
+ *  Notes:
+ *      (1) These are the byte indices for colors in 32 bpp images.
+ *          They are used through the GET/SET_DATA_BYTE accessors.
+ *          The 4th byte, typically known as the "alpha channel" and used
+ *          for blending, is used to a small extent in leptonica.
+ *      (2) Do not change these values!  If you redefine them, functions
+ *          that have the shifts hardcoded for efficiency and conciseness
+ *          (instead of using the constants below) will break.  These
+ *          functions are labelled with "***"  next to their names at
+ *          the top of the files in which they are defined.
+ *      (3) The shifts to extract the red, green, blue and alpha components
+ *          from a 32 bit pixel are defined here.
+ * 
+ */ + +/*! Colors for 32 bpp */ +enum { + COLOR_RED = 0, /*!< red color index in RGBA_QUAD */ + COLOR_GREEN = 1, /*!< green color index in RGBA_QUAD */ + COLOR_BLUE = 2, /*!< blue color index in RGBA_QUAD */ + L_ALPHA_CHANNEL = 3 /*!< alpha value index in RGBA_QUAD */ +}; + +static const l_int32 L_RED_SHIFT = + 8 * (sizeof(l_uint32) - 1 - COLOR_RED); /* 24 */ +static const l_int32 L_GREEN_SHIFT = + 8 * (sizeof(l_uint32) - 1 - COLOR_GREEN); /* 16 */ +static const l_int32 L_BLUE_SHIFT = + 8 * (sizeof(l_uint32) - 1 - COLOR_BLUE); /* 8 */ +static const l_int32 L_ALPHA_SHIFT = + 8 * (sizeof(l_uint32) - 1 - L_ALPHA_CHANNEL); /* 0 */ + + +/*-------------------------------------------------------------------------* + * Colors for drawing boxes * + *-------------------------------------------------------------------------*/ +/*! Colors for drawing boxes */ +enum { + L_DRAW_RED = 0, /*!< draw in red */ + L_DRAW_GREEN = 1, /*!< draw in green */ + L_DRAW_BLUE = 2, /*!< draw in blue */ + L_DRAW_SPECIFIED = 3, /*!< draw specified color */ + L_DRAW_RGB = 4, /*!< draw as sequence of r,g,b */ + L_DRAW_RANDOM = 5 /*!< draw randomly chosen colors */ +}; + + +/*-------------------------------------------------------------------------* + * Perceptual color weights * + *-------------------------------------------------------------------------*/ +/*
+ *  Notes:
+ *      (1) These perceptual weighting factors are ad-hoc, but they do
+ *          add up to 1.  Unlike, for example, the weighting factors for
+ *          converting RGB to luminance, or more specifically to Y in the
+ *          YUV colorspace.  Those numbers come from the
+ *          International Telecommunications Union, via ITU-R.
+ * 
+ */ +static const l_float32 L_RED_WEIGHT = 0.3; /*!< Percept. weight for red */ +static const l_float32 L_GREEN_WEIGHT = 0.5; /*!< Percept. weight for green */ +static const l_float32 L_BLUE_WEIGHT = 0.2; /*!< Percept. weight for blue */ + + +/*-------------------------------------------------------------------------* + * Flags for colormap conversion * + *-------------------------------------------------------------------------*/ +/*! Flags for colormap conversion */ +enum { + REMOVE_CMAP_TO_BINARY = 0, /*!< remove colormap for conv to 1 bpp */ + REMOVE_CMAP_TO_GRAYSCALE = 1, /*!< remove colormap for conv to 8 bpp */ + REMOVE_CMAP_TO_FULL_COLOR = 2, /*!< remove colormap for conv to 32 bpp */ + REMOVE_CMAP_WITH_ALPHA = 3, /*!< remove colormap and alpha */ + REMOVE_CMAP_BASED_ON_SRC = 4 /*!< remove depending on src format */ +}; + + +/*------------------------------------------------------------------------* + *! + *
+ * The following operation bit flags have been modified from
+ * Sun's pixrect.h.
+ *
+ * The 'op' in 'rasterop' is represented by an integer
+ * composed with Boolean functions using the set of five integers
+ * given below.  The integers, and the op codes resulting from
+ * boolean expressions on them, need only be in the range from 0 to 15.
+ * The function is applied on a per-pixel basis.
+ *
+ * Examples: the op code representing ORing the src and dest
+ * is computed using the bit OR, as PIX_SRC | PIX_DST;  the op
+ * code representing XORing src and dest is found from
+ * PIX_SRC ^ PIX_DST;  the op code representing ANDing src and dest
+ * is found from PIX_SRC & PIX_DST.  Note that
+ * PIX_NOT(PIX_CLR) = PIX_SET, and v.v., as they must be.
+ *
+ * We use the following set of definitions:
+ *
+ *      #define   PIX_SRC      0xc
+ *      #define   PIX_DST      0xa
+ *      #define   PIX_NOT(op)  (op) ^ 0xf
+ *      #define   PIX_CLR      0x0
+ *      #define   PIX_SET      0xf
+ *
+ * These definitions differ from Sun's, in that Sun left-shifted
+ * each value by 1 pixel, and used the least significant bit as a
+ * flag for the "pseudo-operation" of clipping.  We don't need
+ * this bit, because it is both efficient and safe ALWAYS to clip
+ * the rectangles to the src and dest images, which is what we do.
+ * See the notes in rop.h on the general choice of these bit flags.
+ *
+ * [If for some reason you need compatibility with Sun's xview package,
+ * you can adopt the original Sun definitions to avoid redefinition conflicts:
+ *
+ *      #define   PIX_SRC      (0xc << 1)
+ *      #define   PIX_DST      (0xa << 1)
+ *      #define   PIX_NOT(op)  ((op) ^ 0x1e)
+ *      #define   PIX_CLR      (0x0 << 1)
+ *      #define   PIX_SET      (0xf << 1)
+ * ]
+ *
+ * We have, for reference, the following 16 unique op flags:
+ *
+ *      PIX_CLR                           0000             0x0
+ *      PIX_SET                           1111             0xf
+ *      PIX_SRC                           1100             0xc
+ *      PIX_DST                           1010             0xa
+ *      PIX_NOT(PIX_SRC)                  0011             0x3
+ *      PIX_NOT(PIX_DST)                  0101             0x5
+ *      PIX_SRC | PIX_DST                 1110             0xe
+ *      PIX_SRC & PIX_DST                 1000             0x8
+ *      PIX_SRC ^ PIX_DST                 0110             0x6
+ *      PIX_NOT(PIX_SRC) | PIX_DST        1011             0xb
+ *      PIX_NOT(PIX_SRC) & PIX_DST        0010             0x2
+ *      PIX_SRC | PIX_NOT(PIX_DST)        1101             0xd
+ *      PIX_SRC & PIX_NOT(PIX_DST)        0100             0x4
+ *      PIX_NOT(PIX_SRC | PIX_DST)        0001             0x1
+ *      PIX_NOT(PIX_SRC & PIX_DST)        0111             0x7
+ *      PIX_NOT(PIX_SRC ^ PIX_DST)        1001             0x9
+ *
+ * 
+ *-------------------------------------------------------------------------*/ + +#define PIX_SRC (0xc) /*!< use source pixels */ +#define PIX_DST (0xa) /*!< use destination pixels */ +#define PIX_NOT(op) ((op) ^ 0x0f) /*!< invert operation %op */ +#define PIX_CLR (0x0) /*!< clear pixels */ +#define PIX_SET (0xf) /*!< set pixels */ + +#define PIX_PAINT (PIX_SRC | PIX_DST) /*!< paint = src | dst */ +#define PIX_MASK (PIX_SRC & PIX_DST) /*!< mask = src & dst */ +#define PIX_SUBTRACT (PIX_DST & PIX_NOT(PIX_SRC)) /*!< subtract = */ + /*!< src & !dst */ +#define PIX_XOR (PIX_SRC ^ PIX_DST) /*!< xor = src ^ dst */ + + +/*-------------------------------------------------------------------------* + *
+ *   Important Notes:
+ *
+ *       (1) The image data is stored in a single contiguous
+ *           array of l_uint32, into which the pixels are packed.
+ *           By "packed" we mean that there are no unused bits
+ *           between pixels, except for end-of-line padding to
+ *           satisfy item (2) below.
+ *
+ *       (2) Every image raster line begins on a 32-bit word
+ *           boundary within this array.
+ *
+ *       (3) Pix image data is stored in 32-bit units, with the
+ *           pixels ordered from left to right in the image being
+ *           stored in order from the MSB to LSB within the word,
+ *           for both big-endian and little-endian machines.
+ *           This is the natural ordering for big-endian machines,
+ *           as successive bytes are stored and fetched progressively
+ *           to the right.  However, for little-endians, when storing
+ *           we re-order the bytes from this byte stream order, and
+ *           reshuffle again for byte access on 32-bit entities.
+ *           So if the bytes come in sequence from left to right, we
+ *           store them on little-endians in byte order:
+ *                3 2 1 0 7 6 5 4 ...
+ *           This MSB to LSB ordering allows left and right shift
+ *           operations on 32 bit words to move the pixels properly.
+ *
+ *       (4) We use 32 bit pixels for both RGB and RGBA color images.
+ *           The A (alpha) byte is ignored in most leptonica functions
+ *           operating on color images.  Within each 4 byte pixel, the
+ *           colors are ordered from MSB to LSB, as follows:
+ *
+ *                |  MSB  |  2nd MSB  |  3rd MSB  |  LSB  |
+ *                   red      green       blue      alpha
+ *                    0         1           2         3   (big-endian)
+ *                    3         2           1         0   (little-endian)
+ *
+ *           Because we use MSB to LSB ordering within the 32-bit word,
+ *           the individual 8-bit samples can be accessed with
+ *           GET_DATA_BYTE and SET_DATA_BYTE macros, using the
+ *           (implicitly big-ending) ordering
+ *                 red:    byte 0  (MSB)
+ *                 green:  byte 1  (2nd MSB)
+ *                 blue:   byte 2  (3rd MSB)
+ *                 alpha:  byte 3  (LSB)
+ *
+ *           The specific color assignment is made in this file,
+ *           through the definitions of COLOR_RED, etc.  Then the R, G
+ *           B and A sample values can be retrieved using
+ *                 redval = GET_DATA_BYTE(&pixel, COLOR_RED);
+ *                 greenval = GET_DATA_BYTE(&pixel, COLOR_GREEN);
+ *                 blueval = GET_DATA_BYTE(&pixel, COLOR_BLUE);
+ *                 alphaval = GET_DATA_BYTE(&pixel, L_ALPHA_CHANNEL);
+ *           and they can be set with
+ *                 SET_DATA_BYTE(&pixel, COLOR_RED, redval);
+ *                 SET_DATA_BYTE(&pixel, COLOR_GREEN, greenval);
+ *                 SET_DATA_BYTE(&pixel, COLOR_BLUE, blueval);
+ *                 SET_DATA_BYTE(&pixel, L_ALPHA_CHANNEL, alphaval);
+ *
+ *           For extra speed we extract these components directly
+ *           by shifting and masking, explicitly using the values in
+ *           L_RED_SHIFT, etc.:
+ *                 (pixel32 >> L_RED_SHIFT) & 0xff;         (red)
+ *                 (pixel32 >> L_GREEN_SHIFT) & 0xff;       (green)
+ *                 (pixel32 >> L_BLUE_SHIFT) & 0xff;        (blue)
+ *                 (pixel32 >> L_ALPHA_SHIFT) & 0xff;       (alpha)
+ *           All these operations work properly on both big- and little-endians.
+ *
+ *           For a few situations, these color shift values are hard-coded.
+ *           Changing the RGB color component ordering through the assignments
+ *           in this file will cause functions marked with "***" to fail.
+ *
+ *       (5) A reference count is held within each pix, giving the
+ *           number of ptrs to the pix.  When a pixClone() call
+ *           is made, the ref count is increased by 1, and
+ *           when a pixDestroy() call is made, the reference count
+ *           of the pix is decremented.  The pix is only destroyed
+ *           when the reference count goes to zero.
+ *
+ *       (6) The version numbers (below) are used in the serialization
+ *           of these data structures.  They are placed in the files,
+ *           and rarely (if ever) change.  Provision is currently made for
+ *           backward compatibility in reading from boxaa version 2.
+ *
+ *       (7) The serialization dependencies are as follows:
+ *               pixaa  :  pixa  :  boxa
+ *               boxaa  :  boxa
+ *           So, for example, pixaa and boxaa can be changed without
+ *           forcing a change in pixa or boxa.  However, if pixa is
+ *           changed, it forces a change in pixaa, and if boxa is
+ *           changed, if forces a change in the other three.
+ *           We define four version numbers:
+ *               PIXAA_VERSION_NUMBER
+ *               PIXA_VERSION_NUMBER
+ *               BOXAA_VERSION_NUMBER
+ *               BOXA_VERSION_NUMBER
+ * 
+ *-------------------------------------------------------------------------*/ + + + +/*-------------------------------------------------------------------------* + * Array of pix * + *-------------------------------------------------------------------------*/ + + /* Serialization for primary data structures */ +#define PIXAA_VERSION_NUMBER 2 /*!< Version for Pixaa serialization */ +#define PIXA_VERSION_NUMBER 2 /*!< Version for Pixa serialization */ +#define BOXA_VERSION_NUMBER 2 /*!< Version for Boxa serialization */ +#define BOXAA_VERSION_NUMBER 3 /*!< Version for Boxaa serialization */ + +/*! Array of pix */ +struct Pixa +{ + l_int32 n; /*!< number of Pix in ptr array */ + l_int32 nalloc; /*!< number of Pix ptrs allocated */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + struct Pix **pix; /*!< the array of ptrs to pix */ + struct Boxa *boxa; /*!< array of boxes */ +}; +typedef struct Pixa PIXA; + +/*! Array of arrays of pix */ +struct Pixaa +{ + l_int32 n; /*!< number of Pixa in ptr array */ + l_int32 nalloc; /*!< number of Pixa ptrs allocated */ + struct Pixa **pixa; /*!< array of ptrs to pixa */ + struct Boxa *boxa; /*!< array of boxes */ +}; +typedef struct Pixaa PIXAA; + + +/*-------------------------------------------------------------------------* + * Basic rectangle and rectangle arrays * + *-------------------------------------------------------------------------*/ + +/*! Basic rectangle */ +struct Box +{ + l_int32 x; /*!< left coordinate */ + l_int32 y; /*!< top coordinate */ + l_int32 w; /*!< box width */ + l_int32 h; /*!< box height */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + +}; +typedef struct Box BOX; + +/*! Array of Box */ +struct Boxa +{ + l_int32 n; /*!< number of box in ptr array */ + l_int32 nalloc; /*!< number of box ptrs allocated */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + struct Box **box; /*!< box ptr array */ +}; +typedef struct Boxa BOXA; + +/*! Array of Boxa */ +struct Boxaa +{ + l_int32 n; /*!< number of boxa in ptr array */ + l_int32 nalloc; /*!< number of boxa ptrs allocated */ + struct Boxa **boxa; /*!< boxa ptr array */ +}; +typedef struct Boxaa BOXAA; + + +/*-------------------------------------------------------------------------* + * Array of points * + *-------------------------------------------------------------------------*/ +#define PTA_VERSION_NUMBER 1 /*!< Version for Pta serialization */ + +/*! Array of points */ +struct Pta +{ + l_int32 n; /*!< actual number of pts */ + l_int32 nalloc; /*!< size of allocated arrays */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + l_float32 *x, *y; /*!< arrays of floats */ +}; +typedef struct Pta PTA; + + +/*-------------------------------------------------------------------------* + * Array of Pta * + *-------------------------------------------------------------------------*/ + +/*! Array of Pta */ +struct Ptaa +{ + l_int32 n; /*!< number of pta in ptr array */ + l_int32 nalloc; /*!< number of pta ptrs allocated */ + struct Pta **pta; /*!< pta ptr array */ +}; +typedef struct Ptaa PTAA; + + +/*-------------------------------------------------------------------------* + * Pix accumulator container * + *-------------------------------------------------------------------------*/ + +/*! Pix accumulator container */ +struct Pixacc +{ + l_int32 w; /*!< array width */ + l_int32 h; /*!< array height */ + l_int32 offset; /*!< used to allow negative */ + /*!< intermediate results */ + struct Pix *pix; /*!< the 32 bit accumulator pix */ +}; +typedef struct Pixacc PIXACC; + + +/*-------------------------------------------------------------------------* + * Pix tiling * + *-------------------------------------------------------------------------*/ + +/*! Pix tiling */ +struct PixTiling +{ + struct Pix *pix; /*!< input pix (a clone) */ + l_int32 nx; /*!< number of tiles horizontally */ + l_int32 ny; /*!< number of tiles vertically */ + l_int32 w; /*!< tile width */ + l_int32 h; /*!< tile height */ + l_int32 xoverlap; /*!< overlap on left and right */ + l_int32 yoverlap; /*!< overlap on top and bottom */ + l_int32 strip; /*!< strip for paint; default is TRUE */ +}; +typedef struct PixTiling PIXTILING; + + +/*-------------------------------------------------------------------------* + * FPix: pix with float array * + *-------------------------------------------------------------------------*/ +#define FPIX_VERSION_NUMBER 2 /*!< Version for FPix serialization */ + +/*! Pix with float array */ +struct FPix +{ + l_int32 w; /*!< width in pixels */ + l_int32 h; /*!< height in pixels */ + l_int32 wpl; /*!< 32-bit words/line */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + l_int32 xres; /*!< image res (ppi) in x direction */ + /*!< (use 0 if unknown) */ + l_int32 yres; /*!< image res (ppi) in y direction */ + /*!< (use 0 if unknown) */ + l_float32 *data; /*!< the float image data */ +}; +typedef struct FPix FPIX; + +/*! Array of FPix */ +struct FPixa +{ + l_int32 n; /*!< number of fpix in ptr array */ + l_int32 nalloc; /*!< number of fpix ptrs allocated */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + struct FPix **fpix; /*!< the array of ptrs to fpix */ +}; +typedef struct FPixa FPIXA; + + +/*-------------------------------------------------------------------------* + * DPix: pix with double array * + *-------------------------------------------------------------------------*/ +#define DPIX_VERSION_NUMBER 2 /*!< Version for DPix serialization */ + +/*! Pix with double array */ +struct DPix +{ + l_int32 w; /*!< width in pixels */ + l_int32 h; /*!< height in pixels */ + l_int32 wpl; /*!< 32-bit words/line */ + l_uint32 refcount; /*!< reference count (1 if no clones) */ + l_int32 xres; /*!< image res (ppi) in x direction */ + /*!< (use 0 if unknown) */ + l_int32 yres; /*!< image res (ppi) in y direction */ + /*!< (use 0 if unknown) */ + l_float64 *data; /*!< the double image data */ +}; +typedef struct DPix DPIX; + + +/*-------------------------------------------------------------------------* + * PixComp: compressed pix * + *-------------------------------------------------------------------------*/ + +/*! Compressed Pix */ +struct PixComp +{ + l_int32 w; /*!< width in pixels */ + l_int32 h; /*!< height in pixels */ + l_int32 d; /*!< depth in bits */ + l_int32 xres; /*!< image res (ppi) in x direction */ + /*!< (use 0 if unknown) */ + l_int32 yres; /*!< image res (ppi) in y direction */ + /*!< (use 0 if unknown) */ + l_int32 comptype; /*!< compressed format (IFF_TIFF_G4, */ + /*!< IFF_PNG, IFF_JFIF_JPEG) */ + char *text; /*!< text string associated with pix */ + l_int32 cmapflag; /*!< flag (1 for cmap, 0 otherwise) */ + l_uint8 *data; /*!< the compressed image data */ + size_t size; /*!< size of the data array */ +}; +typedef struct PixComp PIXC; + + +/*-------------------------------------------------------------------------* + * PixaComp: array of compressed pix * + *-------------------------------------------------------------------------*/ +#define PIXACOMP_VERSION_NUMBER 2 /*!< Version for PixaComp serialization */ + +/*! Array of compressed pix */ +struct PixaComp +{ + l_int32 n; /*!< number of PixComp in ptr array */ + l_int32 nalloc; /*!< number of PixComp ptrs allocated */ + l_int32 offset; /*!< indexing offset into ptr array */ + struct PixComp **pixc; /*!< the array of ptrs to PixComp */ + struct Boxa *boxa; /*!< array of boxes */ +}; +typedef struct PixaComp PIXAC; + + +/*-------------------------------------------------------------------------* + * Access and storage flags * + *-------------------------------------------------------------------------*/ +/* + *
+ *  For Pix, Box, Pta and Numa, there are 3 standard methods for handling
+ *  the retrieval or insertion of a struct:
+ *     (1) direct insertion (Don't do this if there is another handle
+ *                           somewhere to this same struct!)
+ *     (2) copy (Always safe, sets up a refcount of 1 on the new object.
+ *               Can be undesirable if very large, such as an image or
+ *               an array of images.)
+ *     (3) clone (Makes another handle to the same struct, and bumps the
+ *                refcount up by 1.  Safe to do unless you're changing
+ *                data through one of the handles but don't want those
+ *                changes to be seen by the other handle.)
+ *
+ *  For Pixa and Boxa, which are structs that hold an array of clonable
+ *  structs, there is an additional method:
+ *     (4) copy-clone (Makes a new higher-level struct with a refcount
+ *                     of 1, but clones all the structs in the array.)
+ *
+ *  Unlike the other structs, when retrieving a string from an Sarray,
+ *  you are allowed to get a handle without a copy or clone (i.e., that
+ *  you don't own!).  You must not free or insert such a string!
+ *  Specifically, for an Sarray, the copyflag for retrieval is either:
+ *         L_COPY or L_NOCOPY
+ *  and for insertion, the copyflag is either:
+ *         L_COPY or one of {L_INSERT , L_NOCOPY} (the latter are equivalent
+ *                                                 for insertion))
+ * 
+ */ + +/*! Access and storage flags */ +enum { + L_NOCOPY = 0, /*!< do not copy the object; do not delete the ptr */ + L_COPY = 1, /*!< make/use a copy of the object */ + L_CLONE = 2, /*!< make/use clone (ref count) of the object */ + L_COPY_CLONE = 3 /*!< make a new object and fill each object in the */ + /*!< array(s) with clones */ +}; +static const l_int32 L_INSERT = 0; /*!< stuff it in; no copy or clone */ + + +/*----------------------------------------------------------------------------* + * Sort flags * + *----------------------------------------------------------------------------*/ + +/*! Sort mode flags */ +enum { + L_SHELL_SORT = 1, /*!< use shell sort */ + L_BIN_SORT = 2 /*!< use bin sort */ +}; + +/*! Sort order flags */ +enum { + L_SORT_INCREASING = 1, /*!< sort in increasing order */ + L_SORT_DECREASING = 2 /*!< sort in decreasing order */ +}; + +/*! Sort type flags */ +enum { + L_SORT_BY_X = 1, /*!< sort box or c.c. by left edge location */ + L_SORT_BY_Y = 2, /*!< sort box or c.c. by top edge location */ + L_SORT_BY_RIGHT = 3, /*!< sort box or c.c. by right edge location */ + L_SORT_BY_BOT = 4, /*!< sort box or c.c. by bot edge location */ + L_SORT_BY_WIDTH = 5, /*!< sort box or c.c. by width */ + L_SORT_BY_HEIGHT = 6, /*!< sort box or c.c. by height */ + L_SORT_BY_MIN_DIMENSION = 7, /*!< sort box or c.c. by min dimension */ + L_SORT_BY_MAX_DIMENSION = 8, /*!< sort box or c.c. by max dimension */ + L_SORT_BY_PERIMETER = 9, /*!< sort box or c.c. by perimeter */ + L_SORT_BY_AREA = 10, /*!< sort box or c.c. by area */ + L_SORT_BY_ASPECT_RATIO = 11 /*!< sort box or c.c. by width/height ratio */ +}; + + +/*---------------------------------------------------------------------------* + * Blend flags * + *---------------------------------------------------------------------------*/ + +/*! Blend flags */ +enum { + L_BLEND_WITH_INVERSE = 1, /*!< add some of src inverse to itself */ + L_BLEND_TO_WHITE = 2, /*!< shift src colors towards white */ + L_BLEND_TO_BLACK = 3, /*!< shift src colors towards black */ + L_BLEND_GRAY = 4, /*!< blend src directly with blender */ + L_BLEND_GRAY_WITH_INVERSE = 5 /*!< add amount of src inverse to itself, */ + /*!< based on blender pix value */ +}; + +enum { + L_PAINT_LIGHT = 1, /*!< colorize non-black pixels */ + L_PAINT_DARK = 2 /*!< colorize non-white pixels */ +}; + + +/*-------------------------------------------------------------------------* + * Graphics pixel setting * + *-------------------------------------------------------------------------*/ + +/*! Graphics pixel setting */ +enum { + L_SET_PIXELS = 1, /*!< set all bits in each pixel to 1 */ + L_CLEAR_PIXELS = 2, /*!< set all bits in each pixel to 0 */ + L_FLIP_PIXELS = 3 /*!< flip all bits in each pixel */ +}; + + +/*-------------------------------------------------------------------------* + * Size and location filter flags * + *-------------------------------------------------------------------------*/ + +/*! Location filter flags */ +enum { + L_SELECT_WIDTH = 1, /*!< width must satisfy constraint */ + L_SELECT_HEIGHT = 2, /*!< height must satisfy constraint */ + L_SELECT_XVAL = 3, /*!< x value satisfy constraint */ + L_SELECT_YVAL = 4, /*!< y value must satisfy constraint */ + L_SELECT_IF_EITHER = 5, /*!< either width or height (or xval */ + /*!< or yval) can satisfy */ + L_SELECT_IF_BOTH = 6 /*!< both width and height (or xval */ + /*!< and yval must satisfy */ +}; + +/*! Size filter flags */ +enum { + L_SELECT_IF_LT = 1, /*!< save if value is less than threshold */ + L_SELECT_IF_GT = 2, /*!< save if value is more than threshold */ + L_SELECT_IF_LTE = 3, /*!< save if value is <= to the threshold */ + L_SELECT_IF_GTE = 4 /*!< save if value is >= to the threshold */ +}; + + +/*-------------------------------------------------------------------------* + * Color component selection flags * + *-------------------------------------------------------------------------*/ + +/*! Color component selection flags */ +enum { + L_SELECT_RED = 1, /*!< use red component */ + L_SELECT_GREEN = 2, /*!< use green component */ + L_SELECT_BLUE = 3, /*!< use blue component */ + L_SELECT_MIN = 4, /*!< use min color component */ + L_SELECT_MAX = 5, /*!< use max color component */ + L_SELECT_AVERAGE = 6, /*!< use average of color components */ + L_SELECT_HUE = 7, /*!< use hue value (in HSV color space) */ + L_SELECT_SATURATION = 8 /*!< use saturation value (in HSV space) */ +}; + + +/*-------------------------------------------------------------------------* + * 16-bit conversion flags * + *-------------------------------------------------------------------------*/ + +/*! 16-bit conversion flags */ +enum { + L_LS_BYTE = 1, /*!< use LSB */ + L_MS_BYTE = 2, /*!< use MSB */ + L_AUTO_BYTE = 3, /*!< use LSB if max(val) < 256; else MSB */ + L_CLIP_TO_FF = 4, /*!< use max(val, 255) */ + L_LS_TWO_BYTES = 5, /*!< use two LSB */ + L_MS_TWO_BYTES = 6, /*!< use two MSB */ + L_CLIP_TO_FFFF = 7 /*!< use max(val, 65535) */ +}; + + +/*-------------------------------------------------------------------------* + * Rotate and shear flags * + *-------------------------------------------------------------------------*/ + +/*! Rotate flags */ +enum { + L_ROTATE_AREA_MAP = 1, /*!< use area map rotation, if possible */ + L_ROTATE_SHEAR = 2, /*!< use shear rotation */ + L_ROTATE_SAMPLING = 3 /*!< use sampling */ +}; + +/*! Background flags */ +enum { + L_BRING_IN_WHITE = 1, /*!< bring in white pixels from the outside */ + L_BRING_IN_BLACK = 2 /*!< bring in black pixels from the outside */ +}; + +/*! Shear flags */ +enum { + L_SHEAR_ABOUT_CORNER = 1, /*!< shear image about UL corner */ + L_SHEAR_ABOUT_CENTER = 2 /*!< shear image about center */ +}; + + +/*-------------------------------------------------------------------------* + * Affine transform order flags * + *-------------------------------------------------------------------------*/ + +/*! Affine transform order flags */ +enum { + L_TR_SC_RO = 1, /*!< translate, scale, rotate */ + L_SC_RO_TR = 2, /*!< scale, rotate, translate */ + L_RO_TR_SC = 3, /*!< rotate, translate, scale */ + L_TR_RO_SC = 4, /*!< translate, rotate, scale */ + L_RO_SC_TR = 5, /*!< rotate, scale, translate */ + L_SC_TR_RO = 6 /*!< scale, translate, rotate */ +}; + + +/*-------------------------------------------------------------------------* + * Grayscale filling flags * + *-------------------------------------------------------------------------*/ + +/*! Grayscale filling flags */ +enum { + L_FILL_WHITE = 1, /*!< fill white pixels (e.g, in fg map) */ + L_FILL_BLACK = 2 /*!< fill black pixels (e.g., in bg map) */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for setting to white or black * + *-------------------------------------------------------------------------*/ + +/*! Flags for setting to white or black */ +enum { + L_SET_WHITE = 1, /*!< set pixels to white */ + L_SET_BLACK = 2 /*!< set pixels to black */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for getting white or black value * + *-------------------------------------------------------------------------*/ + +/*! Flags for getting white or black value */ +enum { + L_GET_WHITE_VAL = 1, /*!< get white pixel value */ + L_GET_BLACK_VAL = 2 /*!< get black pixel value */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for 8 bit and 16 bit pixel sums * + *-------------------------------------------------------------------------*/ + +/*! Flags for 8 bit and 16 bit pixel sums */ +enum { + L_WHITE_IS_MAX = 1, /*!< white pixels are 0xff or 0xffff; black are 0 */ + L_BLACK_IS_MAX = 2 /*!< black pixels are 0xff or 0xffff; white are 0 */ +}; + + +/*-------------------------------------------------------------------------* + * Dither parameters * + * If within this grayscale distance from black or white, * + * do not propagate excess or deficit to neighboring pixels. * + *-------------------------------------------------------------------------*/ + +/*! Dither parameters */ +enum { + DEFAULT_CLIP_LOWER_1 = 10, /*!< dist to black with no prop; 1 bpp */ + DEFAULT_CLIP_UPPER_1 = 10, /*!< dist to black with no prop; 1 bpp */ + DEFAULT_CLIP_LOWER_2 = 5, /*!< dist to black with no prop; 2 bpp */ + DEFAULT_CLIP_UPPER_2 = 5 /*!< dist to black with no prop; 2 bpp */ +}; + + +/*-------------------------------------------------------------------------* + * Distance flags * + *-------------------------------------------------------------------------*/ + +/*! Distance flags */ +enum { + L_MANHATTAN_DISTANCE = 1, /*!< L1 distance (e.g., in color space) */ + L_EUCLIDEAN_DISTANCE = 2 /*!< L2 distance */ +}; + + +/*-------------------------------------------------------------------------* + * Statistical measures * + *-------------------------------------------------------------------------*/ + +/*! Statistical measures */ +enum { + L_MEAN_ABSVAL = 1, /*!< average of abs values */ + L_MEDIAN_VAL = 2, /*!< median value of set */ + L_MODE_VAL = 3, /*!< mode value of set */ + L_MODE_COUNT = 4, /*!< mode count of set */ + L_ROOT_MEAN_SQUARE = 5, /*!< rms of values */ + L_STANDARD_DEVIATION = 6, /*!< standard deviation from mean */ + L_VARIANCE = 7 /*!< variance of values */ +}; + + +/*-------------------------------------------------------------------------* + * Set selection flags * + *-------------------------------------------------------------------------*/ + +/*! Set selection flags */ +enum { + L_CHOOSE_CONSECUTIVE = 1, /*!< select 'n' consecutive */ + L_CHOOSE_SKIP_BY = 2 /*!< select at intervals of 'n' */ +}; + + +/*-------------------------------------------------------------------------* + * Text orientation flags * + *-------------------------------------------------------------------------*/ + +/*! Text orientation flags */ +enum { + L_TEXT_ORIENT_UNKNOWN = 0, /*!< low confidence on text orientation */ + L_TEXT_ORIENT_UP = 1, /*!< portrait, text rightside-up */ + L_TEXT_ORIENT_LEFT = 2, /*!< landscape, text up to left */ + L_TEXT_ORIENT_DOWN = 3, /*!< portrait, text upside-down */ + L_TEXT_ORIENT_RIGHT = 4 /*!< landscape, text up to right */ +}; + + +/*-------------------------------------------------------------------------* + * Edge orientation flags * + *-------------------------------------------------------------------------*/ + +/*! Edge orientation flags */ +enum { + L_HORIZONTAL_EDGES = 0, /*!< filters for horizontal edges */ + L_VERTICAL_EDGES = 1, /*!< filters for vertical edges */ + L_ALL_EDGES = 2 /*!< filters for all edges */ +}; + + +/*-------------------------------------------------------------------------* + * Line orientation flags * + *-------------------------------------------------------------------------*/ + +/*! Line orientation flags */ +enum { + L_HORIZONTAL_LINE = 0, /*!< horizontal line */ + L_POS_SLOPE_LINE = 1, /*!< 45 degree line with positive slope */ + L_VERTICAL_LINE = 2, /*!< vertical line */ + L_NEG_SLOPE_LINE = 3, /*!< 45 degree line with negative slope */ + L_OBLIQUE_LINE = 4 /*!< neither horizontal nor vertical */ +}; + + +/*-------------------------------------------------------------------------* + * Scan direction flags * + *-------------------------------------------------------------------------*/ + +/*! Scan direction flags */ +enum { + L_FROM_LEFT = 0, /*!< scan from left */ + L_FROM_RIGHT = 1, /*!< scan from right */ + L_FROM_TOP = 2, /*!< scan from top */ + L_FROM_BOT = 3, /*!< scan from bottom */ + L_SCAN_NEGATIVE = 4, /*!< scan in negative direction */ + L_SCAN_POSITIVE = 5, /*!< scan in positive direction */ + L_SCAN_BOTH = 6, /*!< scan in both directions */ + L_SCAN_HORIZONTAL = 7, /*!< horizontal scan (direction unimportant) */ + L_SCAN_VERTICAL = 8 /*!< vertical scan (direction unimportant) */ +}; + + +/*-------------------------------------------------------------------------* + * Box size adjustment and location flags * + *-------------------------------------------------------------------------*/ + +/*! Box size adjustment and location flags */ +enum { + L_ADJUST_SKIP = 0, /*!< do not adjust */ + L_ADJUST_LEFT = 1, /*!< adjust left edge */ + L_ADJUST_RIGHT = 2, /*!< adjust right edge */ + L_ADJUST_LEFT_AND_RIGHT = 3, /*!< adjust both left and right edges */ + L_ADJUST_TOP = 4, /*!< adjust top edge */ + L_ADJUST_BOT = 5, /*!< adjust bottom edge */ + L_ADJUST_TOP_AND_BOT = 6, /*!< adjust both top and bottom edges */ + L_ADJUST_CHOOSE_MIN = 7, /*!< choose the min median value */ + L_ADJUST_CHOOSE_MAX = 8, /*!< choose the max median value */ + L_SET_LEFT = 9, /*!< set left side to a given value */ + L_SET_RIGHT = 10, /*!< set right side to a given value */ + L_SET_TOP = 11, /*!< set top side to a given value */ + L_SET_BOT = 12, /*!< set bottom side to a given value */ + L_GET_LEFT = 13, /*!< get left side location */ + L_GET_RIGHT = 14, /*!< get right side location */ + L_GET_TOP = 15, /*!< get top side location */ + L_GET_BOT = 16 /*!< get bottom side location */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for selecting box boundaries from two choices * + *-------------------------------------------------------------------------*/ + +/*! Flags for selecting box boundaries from two choices */ +enum { + L_USE_MINSIZE = 1, /*!< use boundaries giving min size */ + L_USE_MAXSIZE = 2, /*!< use boundaries giving max size */ + L_SUB_ON_BIG_DIFF = 3, /*!< substitute boundary if big abs diff */ + L_USE_CAPPED_MIN = 4, /*!< substitute boundary with capped min */ + L_USE_CAPPED_MAX = 5 /*!< substitute boundary with capped max */ +}; + +/*-------------------------------------------------------------------------* + * Handling overlapping bounding boxes in boxa * + *-------------------------------------------------------------------------*/ + +/*! Handling overlapping bounding boxes in Boxa */ +enum { + L_COMBINE = 1, /*!< resize to bounding region; remove smaller */ + L_REMOVE_SMALL = 2 /*!< only remove smaller */ +}; + +/*-------------------------------------------------------------------------* + * Flags for replacing invalid boxes * + *-------------------------------------------------------------------------*/ + +/*! Flags for replacing invalid boxes */ +enum { + L_USE_ALL_BOXES = 1, /*!< consider all boxes in the sequence */ + L_USE_SAME_PARITY_BOXES = 2 /*!< consider boxes with the same parity */ +}; + +/*-------------------------------------------------------------------------* + * Horizontal warp * + *-------------------------------------------------------------------------*/ + +/*! Horizonal warp direction */ +enum { + L_WARP_TO_LEFT = 1, /*!< increasing stretch or contraction to left */ + L_WARP_TO_RIGHT = 2 /*!< increasing stretch or contraction to right */ +}; + +/*! Horizonal warp stretch mode */ +enum { + L_LINEAR_WARP = 1, /*!< stretch or contraction grows linearly */ + L_QUADRATIC_WARP = 2 /*!< stretch or contraction grows quadratically */ +}; + + +/*-------------------------------------------------------------------------* + * Pixel selection for resampling * + *-------------------------------------------------------------------------*/ + +/*! Pixel selection for resampling */ +enum { + L_INTERPOLATED = 1, /*!< linear interpolation from src pixels */ + L_SAMPLED = 2 /*!< nearest src pixel sampling only */ +}; + + +/*-------------------------------------------------------------------------* + * Thinning flags * + *-------------------------------------------------------------------------*/ + +/*! Thinning flags */ +enum { + L_THIN_FG = 1, /*!< thin foreground of 1 bpp image */ + L_THIN_BG = 2 /*!< thin background of 1 bpp image */ +}; + + +/*-------------------------------------------------------------------------* + * Runlength flags * + *-------------------------------------------------------------------------*/ + +/*! Runlength flags */ +enum { + L_HORIZONTAL_RUNS = 0, /*!< determine runlengths of horizontal runs */ + L_VERTICAL_RUNS = 1 /*!< determine runlengths of vertical runs */ +}; + + +/*-------------------------------------------------------------------------* + * Edge filter flags * + *-------------------------------------------------------------------------*/ + +/*! Edge filter flags */ +enum { + L_SOBEL_EDGE = 1, /*!< Sobel edge filter */ + L_TWO_SIDED_EDGE = 2 /*!< Two-sided edge filter */ +}; + + +/*-------------------------------------------------------------------------* + * Subpixel color component ordering in LCD display * + *-------------------------------------------------------------------------*/ + +/*! Subpixel color component ordering in LC display */ +enum { + L_SUBPIXEL_ORDER_RGB = 1, /*!< sensor order left-to-right RGB */ + L_SUBPIXEL_ORDER_BGR = 2, /*!< sensor order left-to-right BGR */ + L_SUBPIXEL_ORDER_VRGB = 3, /*!< sensor order top-to-bottom RGB */ + L_SUBPIXEL_ORDER_VBGR = 4 /*!< sensor order top-to-bottom BGR */ +}; + + +/*-------------------------------------------------------------------------* + * HSV histogram flags * + *-------------------------------------------------------------------------*/ + +/*! HSV histogram flags */ +enum { + L_HS_HISTO = 1, /*!< Use hue-saturation histogram */ + L_HV_HISTO = 2, /*!< Use hue-value histogram */ + L_SV_HISTO = 3 /*!< Use saturation-value histogram */ +}; + + +/*-------------------------------------------------------------------------* + * Region flags (inclusion, exclusion) * + *-------------------------------------------------------------------------*/ + +/*! Region flags (inclusion, exclusion) */ +enum { + L_INCLUDE_REGION = 1, /*!< Use hue-saturation histogram */ + L_EXCLUDE_REGION = 2 /*!< Use hue-value histogram */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for adding text to a pix * + *-------------------------------------------------------------------------*/ + +/*! Flags for adding text to a Pix */ +enum { + L_ADD_ABOVE = 1, /*!< Add text above the image */ + L_ADD_BELOW = 2, /*!< Add text below the image */ + L_ADD_LEFT = 3, /*!< Add text to the left of the image */ + L_ADD_RIGHT = 4, /*!< Add text to the right of the image */ + L_ADD_AT_TOP = 5, /*!< Add text over the top of the image */ + L_ADD_AT_BOT = 6, /*!< Add text over the bottom of the image */ + L_ADD_AT_LEFT = 7, /*!< Add text over left side of the image */ + L_ADD_AT_RIGHT = 8 /*!< Add text over right side of the image */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for plotting on a pix * + *-------------------------------------------------------------------------*/ + +/*! Flags for plotting on a Pix */ +enum { + L_PLOT_AT_TOP = 1, /*!< Plot horizontally at top */ + L_PLOT_AT_MID_HORIZ = 2, /*!< Plot horizontally at middle */ + L_PLOT_AT_BOT = 3, /*!< Plot horizontally at bottom */ + L_PLOT_AT_LEFT = 4, /*!< Plot vertically at left */ + L_PLOT_AT_MID_VERT = 5, /*!< Plot vertically at middle */ + L_PLOT_AT_RIGHT = 6 /*!< Plot vertically at right */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for selecting display program * + *-------------------------------------------------------------------------*/ + +/*! Flags for selecting display program */ +enum { + L_DISPLAY_WITH_XZGV = 1, /*!< Use xzgv with pixDisplay() */ + L_DISPLAY_WITH_XLI = 2, /*!< Use xli with pixDisplay() */ + L_DISPLAY_WITH_XV = 3, /*!< Use xv with pixDisplay() */ + L_DISPLAY_WITH_IV = 4, /*!< Use irfvanview (win) with pixDisplay() */ + L_DISPLAY_WITH_OPEN = 5 /*!< Use open (apple) with pixDisplay() */ +}; + +/*-------------------------------------------------------------------------* + * Flag(s) used in the 'special' pix field for non-default operations * + * - 0 is default for chroma sampling in jpeg * + * - 10-19 are used for zlib compression in png write * + * - 4 and 8 are used for specifying connectivity in labelling * + *-------------------------------------------------------------------------*/ + +/*! Flags used in Pix::special */ +enum { + L_NO_CHROMA_SAMPLING_JPEG = 1 /*!< Write full resolution chroma */ +}; + + +/*-------------------------------------------------------------------------* + * Handling negative values in conversion to unsigned int * + *-------------------------------------------------------------------------*/ + +/*! Handling negative values in conversion to unsigned int */ +enum { + L_CLIP_TO_ZERO = 1, /*!< Clip negative values to 0 */ + L_TAKE_ABSVAL = 2 /*!< Convert to positive using L_ABS() */ +}; + + +/*-------------------------------------------------------------------------* + * Relative to zero flags * + *-------------------------------------------------------------------------*/ + +/*! Relative to zero flags */ +enum { + L_LESS_THAN_ZERO = 1, /*!< Choose values less than zero */ + L_EQUAL_TO_ZERO = 2, /*!< Choose values equal to zero */ + L_GREATER_THAN_ZERO = 3 /*!< Choose values greater than zero */ +}; + + +/*-------------------------------------------------------------------------* + * Flags for adding or removing traling slash from string * + *-------------------------------------------------------------------------*/ + +/*! Flags for adding or removing traling slash from string */ +enum { + L_ADD_TRAIL_SLASH = 1, /*!< Add trailing slash to string */ + L_REMOVE_TRAIL_SLASH = 2 /*!< Remove trailing slash from string */ +}; + + +/*-------------------------------------------------------------------------* + * Pix allocator and deallocator function types * + *-------------------------------------------------------------------------*/ +/*! Allocator function type */ +typedef void *(*alloc_fn)(size_t); + +/*! Deallocator function type */ +typedef void (*dealloc_fn)(void *); + + +#endif /* LEPTONICA_PIX_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/ptra.h b/third_party/leptonica/uos/sw_64/include/leptonica/ptra.h new file mode 100644 index 00000000..2de841cc --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/ptra.h @@ -0,0 +1,95 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_PTRA_H +#define LEPTONICA_PTRA_H + +/*! + * \file ptra.h + * + *
+ *  Contains the following structs:
+ *      struct L_Ptra
+ *      struct L_Ptraa
+ *
+ *  Contains definitions for:
+ *      L_Ptra compaction flags for removal
+ *      L_Ptra shifting flags for insert
+ *      L_Ptraa accessor flags
+ * 
+ */ + + +/*------------------------------------------------------------------------* + * Generic Ptr Array Structs * + *------------------------------------------------------------------------*/ + + /*! Generic pointer array */ +struct L_Ptra +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 imax; /*!< greatest valid index */ + l_int32 nactual; /*!< actual number of stored elements */ + void **array; /*!< ptr array */ +}; +typedef struct L_Ptra L_PTRA; + + + /*! Array of generic pointer arrays */ +struct L_Ptraa +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + struct L_Ptra **ptra; /*!< array of ptra */ +}; +typedef struct L_Ptraa L_PTRAA; + + + +/*------------------------------------------------------------------------* + * Array flags * + *------------------------------------------------------------------------*/ + + /*! Flags for removal from L_Ptra */ +enum { + L_NO_COMPACTION = 1, /*!< null the pointer only */ + L_COMPACTION = 2 /*!< compact the array */ +}; + + /*! Flags for insertion into L_Ptra */ +enum { + L_AUTO_DOWNSHIFT = 0, /*!< choose based on number of holes */ + L_MIN_DOWNSHIFT = 1, /*!< downshifts min # of ptrs below insert */ + L_FULL_DOWNSHIFT = 2 /*!< downshifts all ptrs below insert */ +}; + + /*! Accessor flags for L_Ptraa */ +enum { + L_HANDLE_ONLY = 0, /*!< ptr to L_Ptra; caller can inspect only */ + L_REMOVE = 1 /*!< caller owns; destroy or save in L_Ptraa */ +}; + + +#endif /* LEPTONICA_PTRA_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/queue.h b/third_party/leptonica/uos/sw_64/include/leptonica/queue.h new file mode 100644 index 00000000..fd380e83 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/queue.h @@ -0,0 +1,77 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_QUEUE_H +#define LEPTONICA_QUEUE_H + +/*! + * \file queue.h + * + *
+ *      Expandable pointer queue for arbitrary void* data.
+ *
+ *      The L_Queue is a fifo that implements a queue of void* pointers.
+ *      It can be used to hold a queue of any type of struct.
+ *
+ *      Internally, it maintains two counters:
+ *          nhead:  location of head (in ptrs) from the beginning
+ *                  of the array.
+ *          nelem:  number of ptr elements stored in the queue.
+ *
+ *      The element at the head of the queue, which is the next to
+ *      be removed, is array[nhead].  The location at the tail of the
+ *      queue to which the next element will be added is
+ *      array[nhead + nelem].
+ *
+ *      As items are added to the queue, nelem increases.
+ *      As items are removed, nhead increases and nelem decreases.
+ *      Any time the tail reaches the end of the allocated array,
+ *      all the pointers are shifted to the left, so that the head
+ *      is at the beginning of the array.
+ *      If the array becomes more than 3/4 full, it doubles in size.
+ *
+ *      The auxiliary stack can be used in a wrapper for re-using
+ *      items popped from the queue.  It is not made by default.
+ *
+ *      For further implementation details, see queue.c.
+ * 
+ */ + +/*! Expandable pointer queue for arbitrary void* data */ +struct L_Queue +{ + l_int32 nalloc; /*!< size of allocated ptr array */ + l_int32 nhead; /*!< location of head (in ptrs) from the */ + /*!< beginning of the array */ + l_int32 nelem; /*!< number of elements stored in the queue */ + void **array; /*!< ptr array */ + struct L_Stack *stack; /*!< auxiliary stack */ + +}; +typedef struct L_Queue L_QUEUE; + + +#endif /* LEPTONICA_QUEUE_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/rbtree.h b/third_party/leptonica/uos/sw_64/include/leptonica/rbtree.h new file mode 100644 index 00000000..a7fbffef --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/rbtree.h @@ -0,0 +1,90 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +/* + * Modified from the excellent code here: + * http://en.literateprograms.org/Red-black_tree_(C)?oldid=19567 + * which has been placed in the public domain under the Creative Commons + * CC0 1.0 waiver (http://creativecommons.org/publicdomain/zero/1.0/). + * + * When the key is generated from a hash (e.g., string --> uint64), + * there is always the possibility of having collisions, but to make + * the collision probability very low requires using a large hash. + * For that reason, the key types are 64 bit quantities, which will result + * in a negligible probabililty of collisions for millions of hashed values. + * Using 8 byte keys instead of 4 byte keys requires a little more + * storage, but the simplification in being able to ignore collisions + * with the red-black trees for most applications is worth it. + */ + +#ifndef LEPTONICA_RBTREE_H +#define LEPTONICA_RBTREE_H + + /*! The three valid key types for red-black trees, maps and sets. */ +enum { + L_INT_TYPE = 1, + L_UINT_TYPE = 2, + L_FLOAT_TYPE = 3 +}; + + /*! + * Storage for keys and values for red-black trees, maps and sets. + *
+     * Note:
+     *   (1) Keys and values of the valid key types are all 64-bit
+     *   (2) (void *) can be used for values but not for keys.
+     * 
+ */ +union Rb_Type { + l_int64 itype; + l_uint64 utype; + l_float64 ftype; + void *ptype; +}; +typedef union Rb_Type RB_TYPE; + +struct L_Rbtree { + struct L_Rbtree_Node *root; + l_int32 keytype; +}; +typedef struct L_Rbtree L_RBTREE; +typedef struct L_Rbtree L_AMAP; /* hide underlying implementation for map */ +typedef struct L_Rbtree L_ASET; /* hide underlying implementation for set */ + +struct L_Rbtree_Node { + union Rb_Type key; + union Rb_Type value; + struct L_Rbtree_Node *left; + struct L_Rbtree_Node *right; + struct L_Rbtree_Node *parent; + l_int32 color; +}; +typedef struct L_Rbtree_Node L_RBTREE_NODE; +typedef struct L_Rbtree_Node L_AMAP_NODE; /* hide tree implementation */ +typedef struct L_Rbtree_Node L_ASET_NODE; /* hide tree implementation */ + + +#endif /* LEPTONICA_RBTREE_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/readbarcode.h b/third_party/leptonica/uos/sw_64/include/leptonica/readbarcode.h new file mode 100644 index 00000000..7452f236 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/readbarcode.h @@ -0,0 +1,239 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_READBARCODE_H +#define LEPTONICA_READBARCODE_H + + /* ----------------------------------------------------------------- * + * Flags for method of extracting barcode widths * + * ----------------------------------------------------------------- */ + +/*! Flags for method of extracting barcode widths */ +enum { + L_USE_WIDTHS = 1, /*!< use histogram of barcode widths */ + L_USE_WINDOWS = 2 /*!< find best window for decoding transitions */ +}; + + /* ----------------------------------------------------------------- * + * Flags for barcode formats * + * These are used both to identify a barcode format and to identify * + * the decoding method to use on a barcode. * + * ----------------------------------------------------------------- */ + +/*! Flags for barcode formats */ +enum { + L_BF_UNKNOWN = 0, /*!< unknown format */ + L_BF_ANY = 1, /*!< try decoding with all known formats */ + L_BF_CODE128 = 2, /*!< decode with Code128 format */ + L_BF_EAN8 = 3, /*!< decode with EAN8 format */ + L_BF_EAN13 = 4, /*!< decode with EAN13 format */ + L_BF_CODE2OF5 = 5, /*!< decode with Code 2 of 5 format */ + L_BF_CODEI2OF5 = 6, /*!< decode with Interleaved 2 of 5 format */ + L_BF_CODE39 = 7, /*!< decode with Code39 format */ + L_BF_CODE93 = 8, /*!< decode with Code93 format */ + L_BF_CODABAR = 9, /*!< decode with Code93 format */ + L_BF_UPCA = 10 /*!< decode with UPC A format */ +}; + + /* ----------------------------------------------------------------- * + * Currently supported formats * + * Update these arrays as new formats are added. * + * ----------------------------------------------------------------- */ + +/*! Currently supported formats */ +static const l_int32 SupportedBarcodeFormat[] = { + L_BF_CODE2OF5, + L_BF_CODEI2OF5, + L_BF_CODE93, + L_BF_CODE39, + L_BF_CODABAR, + L_BF_UPCA, + L_BF_EAN13 +}; + +/*! Currently supported format names */ +static const char *SupportedBarcodeFormatName[] = { + "Code2of5", + "CodeI2of5", + "Code93", + "Code39", + "Codabar", + "Upca", + "Ean13" +}; +static const l_int32 NumSupportedBarcodeFormats = 7; /*!< Number of formats */ + + + /* ----------------------------------------------------------------- * + * Code 2 of 5 symbology * + * ----------------------------------------------------------------- */ +static const char *Code2of5[] = { + "111121211", "211111112", "112111112", "212111111", /* 0 - 3 */ + "111121112", "211121111", "112121111", "111111212", /* 4 - 7 */ + "211111211", "112111211", /* 8 - 9 */ + "21211", "21112" /* Start, Stop */ +}; + +static const l_int32 C25_START = 10; +static const l_int32 C25_STOP = 11; + + + /* ----------------------------------------------------------------- * + * Code Interleaved 2 of 5 symbology * + * ----------------------------------------------------------------- */ +static const char *CodeI2of5[] = { + "11221", "21112", "12112", "22111", "11212", /* 0 - 4 */ + "21211", "12211", "11122", "21121", "12121", /* 5 - 9 */ + "1111", "211" /* start, stop */ +}; + +static const l_int32 CI25_START = 10; +static const l_int32 CI25_STOP = 11; + + + /* ----------------------------------------------------------------- * + * Code 93 symbology * + * ----------------------------------------------------------------- */ +static const char *Code93[] = { + "131112", "111213", "111312", "111411", "121113", /* 0: 0 - 4 */ + "121212", "121311", "111114", "131211", "141111", /* 5: 5 - 9 */ + "211113", "211212", "211311", "221112", "221211", /* 10: A - E */ + "231111", "112113", "112212", "112311", "122112", /* 15: F - J */ + "132111", "111123", "111222", "111321", "121122", /* 20: K - O */ + "131121", "212112", "212211", "211122", "211221", /* 25: P - T */ + "221121", "222111", "112122", "112221", "122121", /* 30: U - Y */ + "123111", "121131", "311112", "311211", "321111", /* 35: Z,-,.,SP,$ */ + "112131", "113121", "211131", "131221", "312111", /* 40: /,+,%,($),(%) */ + "311121", "122211", "111141" /* 45: (/),(+), Start */ +}; + + /* Use "[]{}#" to represent special codes 43-47 */ +static const char Code93Val[] = + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-. $/+%[]{}#"; + +static const l_int32 C93_START = 47; +static const l_int32 C93_STOP = 47; + + + /* ----------------------------------------------------------------- * + * Code 39 symbology * + * ----------------------------------------------------------------- */ +static const char *Code39[] = { + "111221211", "211211112", "112211112", "212211111", /* 0: 0 - 3 */ + "111221112", "211221111", "112221111", "111211212", /* 4: 4 - 7 */ + "211211211", "112211211", "211112112", "112112112", /* 8: 8 - B */ + "212112111", "111122112", "211122111", "112122111", /* 12: C - F */ + "111112212", "211112211", "112112211", "111122211", /* 16: G - J */ + "211111122", "112111122", "212111121", "111121122", /* 20: K - N */ + "211121121", "112121121", "111111222", "211111221", /* 24: O - R */ + "112111221", "111121221", "221111112", "122111112", /* 28: S - V */ + "222111111", "121121112", "221121111", "122121111", /* 32: W - Z */ + "121111212", "221111211", "122111211", "121212111", /* 36: -,.,SP,$ */ + "121211121", "121112121", "111212121", "121121211" /* 40: /,+,%,* */ +}; + + /* Use "*" to represent the Start and Stop codes (43) */ +static const char Code39Val[] = + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-. $/+%*"; + +static const l_int32 C39_START = 43; +static const l_int32 C39_STOP = 43; + + + /* ----------------------------------------------------------------- * + * Codabar symbology * + * ----------------------------------------------------------------- */ +static const char *Codabar[] = { + "1111122", "1111221", "1112112", "2211111", "1121121", /* 0: 0 - 4 */ + "2111121", "1211112", "1211211", "1221111", "2112111", /* 5: 5 - 9 */ + "1112211", "1122111", "2111212", "2121112", "2121211", /* 10: -,$,:,/,. */ + "1121212", "1122121", "1212112", "1112122", "1112221" /* 15: +,A,B,C,D */ +}; + + /* Ascii representations for codes 16-19: (A or T), (B or N), (C or *), + * (D or E). These are used in pairs for the Start and Stop codes. */ +static const char CodabarVal[] = "0123456789-$:/.+ABCD"; + + + /* ----------------------------------------------------------------- * + * UPC-A symbology * + * ----------------------------------------------------------------- */ +static const char *Upca[] = { + "3211", "2221", "2122", "1411", "1132", /* 0: 0 - 4 */ + "1231", "1114", "1312", "1213", "3112", /* 5: 5 - 9 */ + "111", "111", "11111" /* 10: Start, Stop, Mid */ +}; + +static const l_int32 UPCA_START = 10; +static const l_int32 UPCA_STOP = 11; +static const l_int32 UPCA_MID = 12; + + + /* ----------------------------------------------------------------- * + * Code128 symbology * + * ----------------------------------------------------------------- */ +static const char *Code128[] = { + "212222", "222122", "222221", "121223", "121322", /* 0 - 4 */ + "131222", "122213", "122312", "132212", "221213", /* 5 - 9 */ + "221312", "231212", "112232", "122132", "122231", /* 10 - 14 */ + "113222", "123122", "123221", "223211", "221132", /* 15 - 19 */ + "221231", "213212", "223112", "312131", "311222", /* 20 - 24 */ + "321122", "321221", "312212", "322112", "322211", /* 25 - 29 */ + "212123", "212321", "232121", "111323", "131123", /* 30 - 34 */ + "131321", "112313", "132113", "132311", "211313", /* 35 - 39 */ + "231113", "231311", "112133", "112331", "132131", /* 40 - 44 */ + "113123", "113321", "133121", "313121", "211331", /* 45 - 49 */ + "231131", "213113", "213311", "213131", "311123", /* 50 - 54 */ + "311321", "331121", "312113", "312311", "332111", /* 55 - 59 */ + "314111", "221411", "431111", "111224", "111422", /* 60 - 64 */ + "121124", "121421", "141122", "141221", "112214", /* 65 - 69 */ + "112412", "122114", "122411", "142112", "142211", /* 70 - 74 */ + "241211", "221114", "413111", "241112", "134111", /* 75 - 79 */ + "111242", "121142", "121241", "114212", "124112", /* 80 - 84 */ + "124211", "411212", "421112", "421211", "212141", /* 85 - 89 */ + "214121", "412121", "111143", "111341", "131141", /* 90 - 94 */ + "114113", "114311", "411113", "411311", "113141", /* 95 - 99 */ + "114131", "311141", "411131", "211412", "211214", /* 100 - 104 */ + "211232", "2331112" /* 105 - 106 */ +}; + +static const l_int32 C128_FUN_3 = 96; /* in A or B only; in C it is 96 */ +static const l_int32 C128_FUNC_2 = 97; /* in A or B only; in C it is 97 */ +static const l_int32 C128_SHIFT = 98; /* in A or B only; in C it is 98 */ +static const l_int32 C128_GOTO_C = 99; /* in A or B only; in C it is 99 */ +static const l_int32 C128_GOTO_B = 100; +static const l_int32 C128_GOTO_A = 101; +static const l_int32 C128_FUNC_1 = 102; +static const l_int32 C128_START_A = 103; +static const l_int32 C128_START_B = 104; +static const l_int32 C128_START_C = 105; +static const l_int32 C128_STOP = 106; + /* code 128 symbols are 11 units */ +static const l_int32 C128_SYMBOL_WIDTH = 11; + + + +#endif /* LEPTONICA_READBARCODE_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/recog.h b/third_party/leptonica/uos/sw_64/include/leptonica/recog.h new file mode 100644 index 00000000..06177200 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/recog.h @@ -0,0 +1,263 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_RECOG_H +#define LEPTONICA_RECOG_H + +/*! + * \file recog.h + * + *
+ *     This is a simple utility for training and recognizing individual
+ *     machine-printed text characters.  It is designed to be adapted
+ *     to a particular set of character images; e.g., from a book.
+ *
+ *     There are two methods of training the recognizer.  In the most
+ *     simple, a set of bitmaps has been labeled by some means, such
+ *     a generic OCR program.  This is input either one template at a time
+ *     or as a pixa of templates, to a function that creates a recog.
+ *     If in a pixa, the text string label must be embedded in the
+ *     text field of each pix.
+ *
+ *     If labeled data is not available, we start with a bootstrap
+ *     recognizer (BSR) that has labeled data from a variety of sources.
+ *     These images are scaled, typically to a fixed height, and then
+ *     fed similarly scaled unlabeled images from the source (e.g., book),
+ *     and the BSR attempts to identify them.  All images that have
+ *     a high enough correlation score with one of the templates in the
+ *     BSR are emitted in a pixa, which now holds unscaled and labeled
+ *     templates from the source.  This is the generator for a book adapted
+ *     recognizer (BAR).
+ *
+ *     The pixa should always be thought of as the primary structure.
+ *     It is the generator for the recog, because a recog is built
+ *     from a pixa of unscaled images.
+ *
+ *     New image templates can be added to a recog as long as it is
+ *     in training mode.  Once training is finished, to add templates
+ *     it is necessary to extract the generating pixa, add templates
+ *     to that pixa, and make a new recog.  Similarly, we do not
+ *     join two recog; instead, we simply join their generating pixa,
+ *     and make a recog from that.
+ *
+ *     To remove outliers from a pixa of labeled pix, make a recog,
+ *     determine the outliers, and generate a new pixa with the
+ *     outliers removed.  The outliers are determined by building
+ *     special templates for each character set that are scaled averages
+ *     of the individual templates.  Then a correlation score is found
+ *     between each template and the averaged templates.  There are
+ *     two implementations; outliers are determined as either:
+ *      (1) a template having a correlation score with its class average
+ *          that is below a threshold, or
+ *      (2) a template having a correlation score with its class average
+ *          that is smaller than the correlation score with the average
+ *          of another class.
+ *     Outliers are removed from the generating pixa.  Scaled averaging
+ *     is only performed for determining outliers and for splitting
+ *     characters; it is never used in a trained recognizer for identifying
+ *     unlabeled samples.
+ *
+ *     Two methods using averaged templates are provided for splitting
+ *     touching characters:
+ *      (1) greedy matching
+ *      (2) document image decoding (DID)
+ *     The DID method is the default.  It is about 5x faster and
+ *     possibly more accurate.
+ *
+ *     Once a BAR has been made, unlabeled sample images are identified
+ *     by finding the individual template in the BAR with highest
+ *     correlation.  The input images and images in the BAR can be
+ *     represented in two ways:
+ *      (1) as scanned, binarized to 1 bpp
+ *      (2) as a width-normalized outline formed by thinning to a
+ *          skeleton and then dilating by a fixed amount.
+ *
+ *     The recog can be serialized to file and read back.  The serialized
+ *     version holds the templates used for correlation (which may have
+ *     been modified by scaling and turning into lines from the unscaled
+ *     templates), plus, for arbitrary character sets, the UTF8
+ *     representation and the lookup table mapping from the character
+ *     representation to index.
+ *
+ *     Why do we not use averaged templates for recognition?
+ *     Letterforms can take on significantly different shapes (eg.,
+ *     the letters 'a' and 'g'), and it makes no sense to average these.
+ *     The previous version of this utility allowed multiple recognizers
+ *     to exist, but this is an unnecessary complication if recognition
+ *     is done on all samples instead of on averages.
+ * 
+ */ + +#define RECOG_VERSION_NUMBER 2 + +struct L_Recog { + l_int32 scalew; /*!< scale all examples to this width; */ + /*!< use 0 prevent horizontal scaling */ + l_int32 scaleh; /*!< scale all examples to this height; */ + /*!< use 0 prevent vertical scaling */ + l_int32 linew; /*!< use a value > 0 to convert the bitmap */ + /*!< to lines of fixed width; 0 to skip */ + l_int32 templ_use; /*!< template use: use either the average */ + /*!< or all temmplates (L_USE_AVERAGE or */ + /*!< L_USE_ALL) */ + l_int32 maxarraysize; /*!< initialize container arrays to this */ + l_int32 setsize; /*!< size of character set */ + l_int32 threshold; /*!< for binarizing if depth > 1 */ + l_int32 maxyshift; /*!< vertical jiggle on nominal centroid */ + /*!< alignment; typically 0 or 1 */ + l_int32 charset_type; /*!< one of L_ARABIC_NUMERALS, etc. */ + l_int32 charset_size; /*!< expected number of classes in charset */ + l_int32 min_nopad; /*!< min number of samples without padding */ + l_int32 num_samples; /*!< number of training samples */ + l_int32 minwidth_u; /*!< min width averaged unscaled templates */ + l_int32 maxwidth_u; /*!< max width averaged unscaled templates */ + l_int32 minheight_u; /*!< min height averaged unscaled templates */ + l_int32 maxheight_u; /*!< max height averaged unscaled templates */ + l_int32 minwidth; /*!< min width averaged scaled templates */ + l_int32 maxwidth; /*!< max width averaged scaled templates */ + l_int32 ave_done; /*!< set to 1 when averaged bitmaps are made */ + l_int32 train_done; /*!< set to 1 when training is complete or */ + /*!< identification has started */ + l_float32 max_wh_ratio; /*!< max width/height ratio to split */ + l_float32 max_ht_ratio; /*!< max of max/min template height ratio */ + l_int32 min_splitw; /*!< min component width kept in splitting */ + l_int32 max_splith; /*!< max component height kept in splitting */ + struct Sarray *sa_text; /*!< text array for arbitrary char set */ + struct L_Dna *dna_tochar; /*!< index-to-char lut for arbitrary charset */ + l_int32 *centtab; /*!< table for finding centroids */ + l_int32 *sumtab; /*!< table for finding pixel sums */ + struct Pixaa *pixaa_u; /*!< all unscaled templates for each class */ + struct Ptaa *ptaa_u; /*!< centroids of all unscaled templates */ + struct Numaa *naasum_u; /*!< area of all unscaled templates */ + struct Pixaa *pixaa; /*!< all (scaled) templates for each class */ + struct Ptaa *ptaa; /*!< centroids of all (scaledl) templates */ + struct Numaa *naasum; /*!< area of all (scaled) templates */ + struct Pixa *pixa_u; /*!< averaged unscaled templates per class */ + struct Pta *pta_u; /*!< centroids of unscaled ave. templates */ + struct Numa *nasum_u; /*!< area of unscaled averaged templates */ + struct Pixa *pixa; /*!< averaged (scaled) templates per class */ + struct Pta *pta; /*!< centroids of (scaled) ave. templates */ + struct Numa *nasum; /*!< area of (scaled) averaged templates */ + struct Pixa *pixa_tr; /*!< all input training images */ + struct Pixa *pixadb_ave; /*!< unscaled and scaled averaged bitmaps */ + struct Pixa *pixa_id; /*!< input images for identifying */ + struct Pix *pixdb_ave; /*!< debug: best match of input against ave. */ + struct Pix *pixdb_range; /*!< debug: best matches within range */ + struct Pixa *pixadb_boot; /*!< debug: bootstrap training results */ + struct Pixa *pixadb_split; /*!< debug: splitting results */ + struct L_Bmf *bmf; /*!< bmf fonts */ + l_int32 bmf_size; /*!< font size of bmf; default is 6 pt */ + struct L_Rdid *did; /*!< temp data used for image decoding */ + struct L_Rch *rch; /*!< temp data used for holding best char */ + struct L_Rcha *rcha; /*!< temp data used for array of best chars */ +}; +typedef struct L_Recog L_RECOG; + +/*! + * Data returned from correlation matching on a single character + */ +struct L_Rch { + l_int32 index; /*!< index of best template */ + l_float32 score; /*!< correlation score of best template */ + char *text; /*!< character string of best template */ + l_int32 sample; /*!< index of best sample (within the best */ + /*!< template class, if all samples are used) */ + l_int32 xloc; /*!< x-location of template (delx + shiftx) */ + l_int32 yloc; /*!< y-location of template (dely + shifty) */ + l_int32 width; /*!< width of best template */ +}; +typedef struct L_Rch L_RCH; + +/*! + * Data returned from correlation matching on an array of characters + */ +struct L_Rcha { + struct Numa *naindex; /*!< indices of best templates */ + struct Numa *nascore; /*!< correlation scores of best templates */ + struct Sarray *satext; /*!< character strings of best templates */ + struct Numa *nasample; /*!< indices of best samples */ + struct Numa *naxloc; /*!< x-locations of templates (delx + shiftx) */ + struct Numa *nayloc; /*!< y-locations of templates (dely + shifty) */ + struct Numa *nawidth; /*!< widths of best templates */ +}; +typedef struct L_Rcha L_RCHA; + +/*! + * Data used for decoding a line of characters. + */ +struct L_Rdid { + struct Pix *pixs; /*!< clone of pix to be decoded */ + l_int32 **counta; /*!< count array for each averaged template */ + l_int32 **delya; /*!< best y-shift array per average template */ + l_int32 narray; /*!< number of averaged templates */ + l_int32 size; /*!< size of count array (width of pixs) */ + l_int32 *setwidth; /*!< setwidths for each template */ + struct Numa *nasum; /*!< pixel count in pixs by column */ + struct Numa *namoment; /*!< first moment of pixels in pixs by cols */ + l_int32 fullarrays; /*!< 1 if full arrays are made; 0 otherwise */ + l_float32 *beta; /*!< channel coeffs for template fg term */ + l_float32 *gamma; /*!< channel coeffs for bit-and term */ + l_float32 *trellisscore; /*!< score on trellis */ + l_int32 *trellistempl; /*!< template on trellis (for backtrack) */ + struct Numa *natempl; /*!< indices of best path templates */ + struct Numa *naxloc; /*!< x locations of best path templates */ + struct Numa *nadely; /*!< y locations of best path templates */ + struct Numa *nawidth; /*!< widths of best path templates */ + struct Boxa *boxa; /*!< Viterbi result for splitting input pixs */ + struct Numa *nascore; /*!< correlation scores: best path templates */ + struct Numa *natempl_r; /*!< indices of best rescored templates */ + struct Numa *nasample_r; /*!< samples of best scored templates */ + struct Numa *naxloc_r; /*!< x locations of best rescoredtemplates */ + struct Numa *nadely_r; /*!< y locations of best rescoredtemplates */ + struct Numa *nawidth_r; /*!< widths of best rescoredtemplates */ + struct Numa *nascore_r; /*!< correlation scores: rescored templates */ +}; +typedef struct L_Rdid L_RDID; + + +/*-------------------------------------------------------------------------* + * Flags for describing limited character sets * + *-------------------------------------------------------------------------*/ +/*! Flags for describing limited character sets */ +enum { + L_UNKNOWN = 0, /*!< character set type is not specified */ + L_ARABIC_NUMERALS = 1, /*!< 10 digits */ + L_LC_ROMAN_NUMERALS = 2, /*!< 7 lower-case letters (i,v,x,l,c,d,m) */ + L_UC_ROMAN_NUMERALS = 3, /*!< 7 upper-case letters (I,V,X,L,C,D,M) */ + L_LC_ALPHA = 4, /*!< 26 lower-case letters */ + L_UC_ALPHA = 5 /*!< 26 upper-case letters */ +}; + +/*-------------------------------------------------------------------------* + * Flags for selecting between using average and all templates * + *-------------------------------------------------------------------------*/ +/*! Flags for selecting average or all templates: recog->templ_use */ +enum { + L_USE_ALL_TEMPLATES = 0, /*!< use all templates; default */ + L_USE_AVERAGE_TEMPLATES = 1 /*!< use average templates; special cases */ +}; + +#endif /* LEPTONICA_RECOG_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/regutils.h b/third_party/leptonica/uos/sw_64/include/leptonica/regutils.h new file mode 100644 index 00000000..f3506a31 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/regutils.h @@ -0,0 +1,140 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_REGUTILS_H +#define LEPTONICA_REGUTILS_H + +/*! + * \file regutils.h + * + *
+ *   Contains this regression test parameter packaging struct
+ *       struct L_RegParams
+ *
+ *   The regression test utility allows you to write regression tests
+ *   that compare results with existing "golden files" and with
+ *   compiled in data.
+ *
+ *   Regression tests can be called in three ways.
+ *   For example, for distance_reg:
+ *
+ *       Case 1: distance_reg [compare]
+ *           This runs the test against the set of golden files.  It
+ *           appends to 'outfile.txt' either "SUCCESS" or "FAILURE",
+ *           as well as the details of any parts of the test that failed.
+ *           It writes to a temporary file stream (fp).
+ *           Using 'compare' on the command line is optional.
+ *
+ *       Case 2: distance_reg generate
+ *           This generates golden files in /tmp for the reg test.
+ *
+ *       Case 3: distance_reg display
+ *           This runs the test but makes no comparison of the output
+ *           against the set of golden files.  In addition, this displays
+ *           images and plots that are specified in the test under
+ *           control of the display variable.  Display is enabled only
+ *           for this case.
+ *
+ *   Regression tests follow the pattern given below.  Tests are
+ *   automatically numbered sequentially, and it is convenient to
+ *   comment each with a number to keep track (for comparison tests
+ *   and for debugging).  In an actual case, comparisons of pix and
+ *   of files can occur in any order.  We give a specific order here
+ *   for clarity.
+ *
+ *       L_REGPARAMS  *rp;  // holds data required by the test functions
+ *
+ *       // Setup variables; optionally open stream
+ *       if (regTestSetup(argc, argv, &rp))
+ *           return 1;
+ *
+ *       // Test pairs of generated pix for identity.  This compares
+ *       // two pix; no golden file is generated.
+ *       regTestComparePix(rp, pix1, pix2);  // 0
+ *
+ *       // Test pairs of generated pix for similarity.  This compares
+ *       // two pix; no golden file is generated.  The last arg determines
+ *       // if stats are to be written to stderr.
+ *       regTestCompareSimilarPix(rp, pix1, pix2, 15, 0.001, 0);  // 1
+ *
+ *       // Generation of  outputs and testing for identity
+ *       // These files can be anything, of course.
+ *       regTestCheckFile(rp, );  // 2
+ *       regTestCheckFile(rp, );  // 3
+ *
+ *       // Test pairs of output golden files for identity.  Here we
+ *       // are comparing golden files 2 and 3.
+ *       regTestCompareFiles(rp, 2, 3);  // 4
+ *
+ *       // "Write and check".  This writes a pix using a canonical
+ *       // formulation for the local filename and either:
+ *       //     case 1: generates a golden file
+ *       //     case 2: compares the local file with a golden file
+ *       //     case 3: generates local files and displays
+ *       // Here we write the pix compressed with png and jpeg, respectively;
+ *       // Then check against the golden file.  The internal %index
+ *       // is incremented; it is embedded in the local filename and,
+ *       // if generating, in the golden file as well.
+ *       regTestWritePixAndCheck(rp, pix1, IFF_PNG);  // 5
+ *       regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG);  // 6
+ *
+ *       // Display if reg test was called in 'display' mode
+ *       pixDisplayWithTitle(pix1, 100, 100, NULL, rp->display);
+ *
+ *       // Clean up and output result
+ *       regTestCleanup(rp);
+ * 
+ */ + +/*----------------------------------------------------------------------------* + * Regression test parameter packer * + *----------------------------------------------------------------------------*/ + +/*! Regression test parameter packer */ +struct L_RegParams +{ + FILE *fp; /*!< stream to temporary output file for compare mode */ + char *testname; /*!< name of test, without '_reg' */ + char *tempfile; /*!< name of temp file for compare mode output */ + l_int32 mode; /*!< generate, compare or display */ + l_int32 index; /*!< index into saved files for this test; 0-based */ + l_int32 success; /*!< overall result of the test */ + l_int32 display; /*!< 1 if in display mode; 0 otherwise */ + L_TIMER tstart; /*!< marks beginning of the reg test */ +}; +typedef struct L_RegParams L_REGPARAMS; + + + /*! Running modes for the test */ +enum { + L_REG_GENERATE = 0, + L_REG_COMPARE = 1, + L_REG_DISPLAY = 2 +}; + + +#endif /* LEPTONICA_REGUTILS_H */ + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/stack.h b/third_party/leptonica/uos/sw_64/include/leptonica/stack.h new file mode 100644 index 00000000..4fa61141 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/stack.h @@ -0,0 +1,70 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_STACK_H +#define LEPTONICA_STACK_H + +/*! + * \file stack.h + * + *
+ *       Expandable pointer stack for arbitrary void* data.
+ *
+ *       The L_Stack is an array of void * ptrs, onto which arbitrary
+ *       objects can be stored.  At any time, the number of
+ *       stored objects is stack->n.  The object at the bottom
+ *       of the stack is at array[0]; the object at the top of
+ *       the stack is at array[n-1].  New objects are added
+ *       to the top of the stack, at the first available location,
+ *       which is array[n].  Objects are removed from the top of the
+ *       stack.  When an attempt is made to remove an object from an
+ *       empty stack, the result is null.   When the stack becomes
+ *       filled, so that n = nalloc, the size is doubled.
+ *
+ *       The auxiliary stack can be used to store and remove
+ *       objects for re-use.  It must be created by a separate
+ *       call to pstackCreate().  [Just imagine the chaos if
+ *       pstackCreate() created the auxiliary stack!]
+ *       pstackDestroy() checks for the auxiliary stack and removes it.
+ * 
+ */ + + + /*! Expandable pointer stack for arbitrary void* data. + * Note that array[n] is the first null ptr in the array + */ +struct L_Stack +{ + l_int32 nalloc; /*!< size of ptr array */ + l_int32 n; /*!< number of stored elements */ + void **array; /*!< ptr array */ + struct L_Stack *auxstack; /*!< auxiliary stack */ +}; +typedef struct L_Stack L_STACK; + + +#endif /* LEPTONICA_STACK_H */ + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/stringcode.h b/third_party/leptonica/uos/sw_64/include/leptonica/stringcode.h new file mode 100644 index 00000000..db0faa20 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/stringcode.h @@ -0,0 +1,60 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_STRINGCODE_H +#define LEPTONICA_STRINGCODE_H + +/*! + * \file stringcode.h + * + * Data structure to hold accumulating generated code for storing + * and extracing serializable leptonica objects (e.g., pixa, recog). + * + * Also a flag for selecting a string from the L_GenAssoc struct + * in stringcode. + */ + +struct L_StrCode +{ + l_int32 fileno; /*!< index for function and output file names */ + l_int32 ifunc; /*!< index into struct currently being stored */ + SARRAY *function; /*!< store case code for extraction */ + SARRAY *data; /*!< store base64 encoded data as strings */ + SARRAY *descr; /*!< store line in description table */ + l_int32 n; /*!< number of data strings */ +}; +typedef struct L_StrCode L_STRCODE; + + +/*! Select string in stringcode for a specific serializable data type */ +enum { + L_STR_TYPE = 0, /*!< typedef for the data type */ + L_STR_NAME = 1, /*!< name of the data type */ + L_STR_READER = 2, /*!< reader to get the data type from file */ + L_STR_MEMREADER = 3 /*!< reader to get the compressed string in memory */ +}; + +#endif /* LEPTONICA_STRINGCODE_H */ diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/sudoku.h b/third_party/leptonica/uos/sw_64/include/leptonica/sudoku.h new file mode 100644 index 00000000..ba4b21e9 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/sudoku.h @@ -0,0 +1,76 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef SUDOKU_H_INCLUDED +#define SUDOKU_H_INCLUDED + +/*! + * \file sudoku.h + * + *
+ *    The L_Sudoku holds all the information of the current state.
+ *
+ *    The input to sudokuCreate() is a file with any number of lines
+ *    starting with '#', followed by 9 lines consisting of 9 numbers
+ *    in each line.  These have the known values and use 0 for the unknowns.
+ *    Blank lines are ignored.
+ *
+ *    The %locs array holds the indices of the unknowns, numbered
+ *    left-to-right and top-to-bottom from 0 to 80.  The array size
+ *    is initialized to %num.  %current is the index into the %locs
+ *    array of the current guess: locs[current].
+ *
+ *    The %state array is used to determine the validity of each guess.
+ *    It is of size 81, and is initialized by setting the unknowns to 0
+ *    and the knowns to their input values.
+ * 
+ */ + +struct L_Sudoku +{ + l_int32 num; /*!< number of unknowns */ + l_int32 *locs; /*!< location of unknowns */ + l_int32 current; /*!< index into %locs of current location */ + l_int32 *init; /*!< initial state, with 0 representing */ + /*!< the unknowns */ + l_int32 *state; /*!< present state, including inits and */ + /*!< guesses of unknowns up to %current */ + l_int32 nguess; /*!< shows current number of guesses */ + l_int32 finished; /*!< set to 1 when solved */ + l_int32 failure; /*!< set to 1 if no solution is possible */ +}; +typedef struct L_Sudoku L_SUDOKU; + + + /*! For printing out array data */ +enum { + L_SUDOKU_INIT = 0, + L_SUDOKU_STATE = 1 +}; + +#endif /* SUDOKU_H_INCLUDED */ + + diff --git a/third_party/leptonica/uos/sw_64/include/leptonica/watershed.h b/third_party/leptonica/uos/sw_64/include/leptonica/watershed.h new file mode 100644 index 00000000..d6b20775 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/include/leptonica/watershed.h @@ -0,0 +1,64 @@ +/*====================================================================* + - Copyright (C) 2001 Leptonica. All rights reserved. + - + - Redistribution and use in source and binary forms, with or without + - modification, are permitted provided that the following conditions + - are met: + - 1. Redistributions of source code must retain the above copyright + - notice, this list of conditions and the following disclaimer. + - 2. Redistributions in binary form must reproduce the above + - copyright notice, this list of conditions and the following + - disclaimer in the documentation and/or other materials + - provided with the distribution. + - + - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY + - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *====================================================================*/ + +#ifndef LEPTONICA_WATERSHED_H +#define LEPTONICA_WATERSHED_H + +/*! + * \file watershed.h + * + * Simple data structure to hold watershed data. + * All data here is owned by the L_WShed and must be freed. + */ + +/*! Simple data structure to hold watershed data. */ +struct L_WShed +{ + struct Pix *pixs; /*!< clone of input 8 bpp pixs */ + struct Pix *pixm; /*!< clone of input 1 bpp seed (marker) pixm */ + l_int32 mindepth; /*!< minimum depth allowed for a watershed */ + struct Pix *pixlab; /*!< 16 bpp label pix */ + struct Pix *pixt; /*!< scratch pix for computing wshed regions */ + void **lines8; /*!< line ptrs for pixs */ + void **linem1; /*!< line ptrs for pixm */ + void **linelab32; /*!< line ptrs for pixlab */ + void **linet1; /*!< line ptrs for pixt */ + struct Pixa *pixad; /*!< result: 1 bpp pixa of watersheds */ + struct Pta *ptas; /*!< pta of initial seed pixels */ + struct Numa *nasi; /*!< numa of seed indicators; 0 if completed */ + struct Numa *nash; /*!< numa of initial seed heights */ + struct Numa *namh; /*!< numa of initial minima heights */ + struct Numa *nalevels; /*!< result: numa of watershed levels */ + l_int32 nseeds; /*!< number of seeds (markers) */ + l_int32 nother; /*!< number of minima different from seeds */ + l_int32 *lut; /*!< lut for pixel indices */ + struct Numa **links; /*!< back-links into lut, for updates */ + l_int32 arraysize; /*!< size of links array */ + l_int32 debug; /*!< set to 1 for debug output */ +}; +typedef struct L_WShed L_WSHED; + +#endif /* LEPTONICA_WATERSHED_H */ diff --git a/third_party/leptonica/uos/sw_64/lib/libleptonica.a b/third_party/leptonica/uos/sw_64/lib/libleptonica.a new file mode 100644 index 00000000..4d163df7 Binary files /dev/null and b/third_party/leptonica/uos/sw_64/lib/libleptonica.a differ diff --git a/third_party/leptonica/uos/sw_64/lib/pkgconfig/lept.pc b/third_party/leptonica/uos/sw_64/lib/pkgconfig/lept.pc new file mode 100644 index 00000000..4d960848 --- /dev/null +++ b/third_party/leptonica/uos/sw_64/lib/pkgconfig/lept.pc @@ -0,0 +1,11 @@ +prefix=/home/SW2023/sane/code_app/third_party/leptonica/leptonica-1.74.4/leptonica-1.74.4/build/release +exec_prefix=${prefix}/bin +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: leptonica +Description: An open source C library for efficient image processing and image analysis operations +Version: 1.74.4 +Libs: -L${libdir} -lleptonica_OUTPUT_NAME-NOTFOUND +Libs.private: +Cflags: -I${includedir} -I${includedir}/leptonica diff --git a/third_party/mupdf/uos/sw_64/lib/libmupdf.so b/third_party/mupdf/uos/sw_64/lib/libmupdf.so new file mode 100755 index 00000000..c1570d20 Binary files /dev/null and b/third_party/mupdf/uos/sw_64/lib/libmupdf.so differ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/apitypes.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/apitypes.h new file mode 100644 index 00000000..2c0e85c9 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/apitypes.h @@ -0,0 +1,33 @@ +/////////////////////////////////////////////////////////////////////// +// File: apitypes.h +// Description: Types used in both the API and internally +// Author: Ray Smith +// Created: Wed Mar 03 09:22:53 PST 2010 +// +// (C) Copyright 2010, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_API_APITYPES_H_ +#define TESSERACT_API_APITYPES_H_ + +#include "publictypes.h" + +// The types used by the API and Page/ResultIterator can be found in: +// ccstruct/publictypes.h +// ccmain/resultiterator.h +// ccmain/pageiterator.h +// API interfaces and API users should be sure to include this file, rather +// than the lower-level one, and lower-level code should be sure to include +// only the lower-level file. + +#endif // TESSERACT_API_APITYPES_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/baseapi.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/baseapi.h new file mode 100644 index 00000000..fe12351b --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/baseapi.h @@ -0,0 +1,946 @@ +/////////////////////////////////////////////////////////////////////// +// File: baseapi.h +// Description: Simple API for calling tesseract. +// Author: Ray Smith +// +// (C) Copyright 2006, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_API_BASEAPI_H_ +#define TESSERACT_API_BASEAPI_H_ + +#include +// To avoid collision with other typenames include the ABSOLUTE MINIMUM +// complexity of includes here. Use forward declarations wherever possible +// and hide includes of complex types in baseapi.cpp. +#include "apitypes.h" +#include "pageiterator.h" +#include "platform.h" +#include "publictypes.h" +#include "resultiterator.h" +#include "serialis.h" +#include "tess_version.h" +#include "tesscallback.h" +#include "thresholder.h" +#include "unichar.h" + +template class GenericVector; +class PAGE_RES; +class PAGE_RES_IT; +class ParagraphModel; +struct BlamerBundle; +class BLOCK_LIST; +class DENORM; +class MATRIX; +class ROW; +class STRING; +class WERD; +struct Pix; +struct Box; +struct Pixa; +struct Boxa; +class ETEXT_DESC; +struct OSResults; +class TBOX; +class UNICHARSET; +class WERD_CHOICE_LIST; + +struct INT_FEATURE_STRUCT; +using INT_FEATURE = INT_FEATURE_STRUCT *; +struct TBLOB; + +namespace tesseract { + +class Dawg; +class Dict; +class EquationDetect; +class PageIterator; +class LTRResultIterator; +class ResultIterator; +class MutableIterator; +class TessResultRenderer; +class Tesseract; +class Trie; +class Wordrec; + +using DictFunc = int (Dict::*)(void *, const UNICHARSET &, UNICHAR_ID, bool) const; +using ProbabilityInContextFunc = double (Dict::*)(const char *, const char *, int, const char *, int); +using ParamsModelClassifyFunc = float (Dict::*)(const char *, void *); +using FillLatticeFunc = void (Wordrec::*)(const MATRIX &, const WERD_CHOICE_LIST &, const UNICHARSET &, BlamerBundle *); +typedef TessCallback4 + TruthCallback; + +/** + * Base class for all tesseract APIs. + * Specific classes can add ability to work on different inputs or produce + * different outputs. + * This class is mostly an interface layer on top of the Tesseract instance + * class to hide the data types so that users of this class don't have to + * include any other Tesseract headers. + */ +class TESS_API TessBaseAPI { + public: + TessBaseAPI(); + virtual ~TessBaseAPI(); + + int MyOSD(); + + /** + * Returns the version identifier as a static string. Do not delete. + */ + static const char* Version(); + + /** + * If compiled with OpenCL AND an available OpenCL + * device is deemed faster than serial code, then + * "device" is populated with the cl_device_id + * and returns sizeof(cl_device_id) + * otherwise *device=nullptr and returns 0. + */ + static size_t getOpenCLDevice(void **device); + + /** + * Writes the thresholded image to stderr as a PBM file on receipt of a + * SIGSEGV, SIGFPE, or SIGBUS signal. (Linux/Unix only). + */ + static void CatchSignals(); + + /** + * Set the name of the input file. Needed for training and + * reading a UNLV zone file, and for searchable PDF output. + */ + void SetInputName(const char* name); + /** + * These functions are required for searchable PDF output. + * We need our hands on the input file so that we can include + * it in the PDF without transcoding. If that is not possible, + * we need the original image. Finally, resolution metadata + * is stored in the PDF so we need that as well. + */ + const char* GetInputName(); + // Takes ownership of the input pix. + void SetInputImage(Pix *pix); + Pix* GetInputImage(); + int GetSourceYResolution(); + const char* GetDatapath(); + + /** Set the name of the bonus output files. Needed only for debugging. */ + void SetOutputName(const char* name); + + /** + * Set the value of an internal "parameter." + * Supply the name of the parameter and the value as a string, just as + * you would in a config file. + * Returns false if the name lookup failed. + * Eg SetVariable("tessedit_char_blacklist", "xyz"); to ignore x, y and z. + * Or SetVariable("classify_bln_numeric_mode", "1"); to set numeric-only mode. + * SetVariable may be used before Init, but settings will revert to + * defaults on End(). + * + * Note: Must be called after Init(). Only works for non-init variables + * (init variables should be passed to Init()). + */ + bool SetVariable(const char* name, const char* value); + bool SetDebugVariable(const char* name, const char* value); + + /** + * Returns true if the parameter was found among Tesseract parameters. + * Fills in value with the value of the parameter. + */ + bool GetIntVariable(const char *name, int *value) const; + bool GetBoolVariable(const char *name, bool *value) const; + bool GetDoubleVariable(const char *name, double *value) const; + + /** + * Returns the pointer to the string that represents the value of the + * parameter if it was found among Tesseract parameters. + */ + const char *GetStringVariable(const char *name) const; + + /** + * Print Tesseract parameters to the given file. + */ + void PrintVariables(FILE *fp) const; + + /** + * Get value of named variable as a string, if it exists. + */ + bool GetVariableAsString(const char *name, STRING *val); + + /** + * Instances are now mostly thread-safe and totally independent, + * but some global parameters remain. Basically it is safe to use multiple + * TessBaseAPIs in different threads in parallel, UNLESS: + * you use SetVariable on some of the Params in classify and textord. + * If you do, then the effect will be to change it for all your instances. + * + * Start tesseract. Returns zero on success and -1 on failure. + * NOTE that the only members that may be called before Init are those + * listed above here in the class definition. + * + * The datapath must be the name of the tessdata directory. + * The language is (usually) an ISO 639-3 string or nullptr will default to eng. + * It is entirely safe (and eventually will be efficient too) to call + * Init multiple times on the same instance to change language, or just + * to reset the classifier. + * The language may be a string of the form [~][+[~]]* indicating + * that multiple languages are to be loaded. Eg hin+eng will load Hindi and + * English. Languages may specify internally that they want to be loaded + * with one or more other languages, so the ~ sign is available to override + * that. Eg if hin were set to load eng by default, then hin+~eng would force + * loading only hin. The number of loaded languages is limited only by + * memory, with the caveat that loading additional languages will impact + * both speed and accuracy, as there is more work to do to decide on the + * applicable language, and there is more chance of hallucinating incorrect + * words. + * WARNING: On changing languages, all Tesseract parameters are reset + * back to their default values. (Which may vary between languages.) + * If you have a rare need to set a Variable that controls + * initialization for a second call to Init you should explicitly + * call End() and then use SetVariable before Init. This is only a very + * rare use case, since there are very few uses that require any parameters + * to be set before Init. + * + * If set_only_non_debug_params is true, only params that do not contain + * "debug" in the name will be set. + */ + int Init(const char* datapath, const char* language, OcrEngineMode mode, + char **configs, int configs_size, + const GenericVector *vars_vec, + const GenericVector *vars_values, + bool set_only_non_debug_params); + int Init(const char* datapath, const char* language, OcrEngineMode oem) { + return Init(datapath, language, oem, nullptr, 0, nullptr, nullptr, false); + } + int Init(const char* datapath, const char* language) { + return Init(datapath, language, OEM_DEFAULT, nullptr, 0, nullptr, nullptr, false); + } + // In-memory version reads the traineddata file directly from the given + // data[data_size] array, and/or reads data via a FileReader. + int Init(const char* data, int data_size, const char* language, + OcrEngineMode mode, char** configs, int configs_size, + const GenericVector* vars_vec, + const GenericVector* vars_values, + bool set_only_non_debug_params, FileReader reader); + + /** + * Returns the languages string used in the last valid initialization. + * If the last initialization specified "deu+hin" then that will be + * returned. If hin loaded eng automatically as well, then that will + * not be included in this list. To find the languages actually + * loaded use GetLoadedLanguagesAsVector. + * The returned string should NOT be deleted. + */ + const char* GetInitLanguagesAsString() const; + + /** + * Returns the loaded languages in the vector of STRINGs. + * Includes all languages loaded by the last Init, including those loaded + * as dependencies of other loaded languages. + */ + void GetLoadedLanguagesAsVector(GenericVector* langs) const; + + /** + * Returns the available languages in the sorted vector of STRINGs. + */ + void GetAvailableLanguagesAsVector(GenericVector* langs) const; + + /** + * Init only the lang model component of Tesseract. The only functions + * that work after this init are SetVariable and IsValidWord. + * WARNING: temporary! This function will be removed from here and placed + * in a separate API at some future time. + */ + int InitLangMod(const char* datapath, const char* language); + + /** + * Init only for page layout analysis. Use only for calls to SetImage and + * AnalysePage. Calls that attempt recognition will generate an error. + */ + void InitForAnalysePage(); + + /** + * Read a "config" file containing a set of param, value pairs. + * Searches the standard places: tessdata/configs, tessdata/tessconfigs + * and also accepts a relative or absolute path name. + * Note: only non-init params will be set (init params are set by Init()). + */ + void ReadConfigFile(const char* filename); + /** Same as above, but only set debug params from the given config file. */ + void ReadDebugConfigFile(const char* filename); + + /** + * Set the current page segmentation mode. Defaults to PSM_SINGLE_BLOCK. + * The mode is stored as an IntParam so it can also be modified by + * ReadConfigFile or SetVariable("tessedit_pageseg_mode", mode as string). + */ + void SetPageSegMode(PageSegMode mode); + + /** Return the current page segmentation mode. */ + PageSegMode GetPageSegMode() const; + + /** + * Recognize a rectangle from an image and return the result as a string. + * May be called many times for a single Init. + * Currently has no error checking. + * Greyscale of 8 and color of 24 or 32 bits per pixel may be given. + * Palette color images will not work properly and must be converted to + * 24 bit. + * Binary images of 1 bit per pixel may also be given but they must be + * byte packed with the MSB of the first byte being the first pixel, and a + * 1 represents WHITE. For binary images set bytes_per_pixel=0. + * The recognized text is returned as a char* which is coded + * as UTF8 and must be freed with the delete [] operator. + * + * Note that TesseractRect is the simplified convenience interface. + * For advanced uses, use SetImage, (optionally) SetRectangle, Recognize, + * and one or more of the Get*Text functions below. + */ + char* TesseractRect(const unsigned char* imagedata, + int bytes_per_pixel, int bytes_per_line, + int left, int top, int width, int height); + + /** + * Call between pages or documents etc to free up memory and forget + * adaptive data. + */ + void ClearAdaptiveClassifier(); + + /** + * @defgroup AdvancedAPI Advanced API + * The following methods break TesseractRect into pieces, so you can + * get hold of the thresholded image, get the text in different formats, + * get bounding boxes, confidences etc. + */ + /* @{ */ + + /** + * Provide an image for Tesseract to recognize. Format is as + * TesseractRect above. Copies the image buffer and converts to Pix. + * SetImage clears all recognition results, and sets the rectangle to the + * full image, so it may be followed immediately by a GetUTF8Text, and it + * will automatically perform recognition. + */ + void SetImage(const unsigned char* imagedata, int width, int height, + int bytes_per_pixel, int bytes_per_line); + + /** + * Provide an image for Tesseract to recognize. As with SetImage above, + * Tesseract takes its own copy of the image, so it need not persist until + * after Recognize. + * Pix vs raw, which to use? + * Use Pix where possible. Tesseract uses Pix as its internal representation + * and it is therefore more efficient to provide a Pix directly. + */ + void SetImage(Pix* pix); + + /** + * Set the resolution of the source image in pixels per inch so font size + * information can be calculated in results. Call this after SetImage(). + */ + void SetSourceResolution(int ppi); + + /** + * Restrict recognition to a sub-rectangle of the image. Call after SetImage. + * Each SetRectangle clears the recogntion results so multiple rectangles + * can be recognized with the same image. + */ + void SetRectangle(int left, int top, int width, int height); + + /** + * In extreme cases only, usually with a subclass of Thresholder, it + * is possible to provide a different Thresholder. The Thresholder may + * be preloaded with an image, settings etc, or they may be set after. + * Note that Tesseract takes ownership of the Thresholder and will + * delete it when it it is replaced or the API is destructed. + */ + void SetThresholder(ImageThresholder* thresholder) { + delete thresholder_; + thresholder_ = thresholder; + ClearResults(); + } + + /** + * Get a copy of the internal thresholded image from Tesseract. + * Caller takes ownership of the Pix and must pixDestroy it. + * May be called any time after SetImage, or after TesseractRect. + */ + Pix* GetThresholdedImage(); + + /** + * Get the result of page layout analysis as a leptonica-style + * Boxa, Pixa pair, in reading order. + * Can be called before or after Recognize. + */ + Boxa* GetRegions(Pixa** pixa); + + /** + * Get the textlines as a leptonica-style + * Boxa, Pixa pair, in reading order. + * Can be called before or after Recognize. + * If raw_image is true, then extract from the original image instead of the + * thresholded image and pad by raw_padding pixels. + * If blockids is not nullptr, the block-id of each line is also returned as an + * array of one element per line. delete [] after use. + * If paraids is not nullptr, the paragraph-id of each line within its block is + * also returned as an array of one element per line. delete [] after use. + */ + Boxa* GetTextlines(bool raw_image, int raw_padding, + Pixa** pixa, int** blockids, int** paraids); + /* + Helper method to extract from the thresholded image. (most common usage) + */ + Boxa* GetTextlines(Pixa** pixa, int** blockids) { + return GetTextlines(false, 0, pixa, blockids, nullptr); + } + + /** + * Get textlines and strips of image regions as a leptonica-style Boxa, Pixa + * pair, in reading order. Enables downstream handling of non-rectangular + * regions. + * Can be called before or after Recognize. + * If blockids is not nullptr, the block-id of each line is also returned as an + * array of one element per line. delete [] after use. + */ + Boxa* GetStrips(Pixa** pixa, int** blockids); + + /** + * Get the words as a leptonica-style + * Boxa, Pixa pair, in reading order. + * Can be called before or after Recognize. + */ + Boxa* GetWords(Pixa** pixa); + + /** + * Gets the individual connected (text) components (created + * after pages segmentation step, but before recognition) + * as a leptonica-style Boxa, Pixa pair, in reading order. + * Can be called before or after Recognize. + * Note: the caller is responsible for calling boxaDestroy() + * on the returned Boxa array and pixaDestroy() on cc array. + */ + Boxa* GetConnectedComponents(Pixa** cc); + + /** + * Get the given level kind of components (block, textline, word etc.) as a + * leptonica-style Boxa, Pixa pair, in reading order. + * Can be called before or after Recognize. + * If blockids is not nullptr, the block-id of each component is also returned + * as an array of one element per component. delete [] after use. + * If blockids is not nullptr, the paragraph-id of each component with its block + * is also returned as an array of one element per component. delete [] after + * use. + * If raw_image is true, then portions of the original image are extracted + * instead of the thresholded image and padded with raw_padding. + * If text_only is true, then only text components are returned. + */ + Boxa* GetComponentImages(PageIteratorLevel level, + bool text_only, bool raw_image, + int raw_padding, + Pixa** pixa, int** blockids, int** paraids); + // Helper function to get binary images with no padding (most common usage). + Boxa* GetComponentImages(const PageIteratorLevel level, + const bool text_only, + Pixa** pixa, int** blockids) { + return GetComponentImages(level, text_only, false, 0, pixa, blockids, nullptr); + } + + /** + * Returns the scale factor of the thresholded image that would be returned by + * GetThresholdedImage() and the various GetX() methods that call + * GetComponentImages(). + * Returns 0 if no thresholder has been set. + */ + int GetThresholdedImageScaleFactor() const; + + /** + * Runs page layout analysis in the mode set by SetPageSegMode. + * May optionally be called prior to Recognize to get access to just + * the page layout results. Returns an iterator to the results. + * If merge_similar_words is true, words are combined where suitable for use + * with a line recognizer. Use if you want to use AnalyseLayout to find the + * textlines, and then want to process textline fragments with an external + * line recognizer. + * Returns nullptr on error or an empty page. + * The returned iterator must be deleted after use. + * WARNING! This class points to data held within the TessBaseAPI class, and + * therefore can only be used while the TessBaseAPI class still exists and + * has not been subjected to a call of Init, SetImage, Recognize, Clear, End + * DetectOS, or anything else that changes the internal PAGE_RES. + */ + PageIterator* AnalyseLayout(); + PageIterator* AnalyseLayout(bool merge_similar_words); + + /** + * Recognize the image from SetAndThresholdImage, generating Tesseract + * internal structures. Returns 0 on success. + * Optional. The Get*Text functions below will call Recognize if needed. + * After Recognize, the output is kept internally until the next SetImage. + */ + int Recognize(ETEXT_DESC* monitor); + + /** + * Methods to retrieve information after SetAndThresholdImage(), + * Recognize() or TesseractRect(). (Recognize is called implicitly if needed.) + */ + + #ifndef DISABLED_LEGACY_ENGINE + /** Variant on Recognize used for testing chopper. */ + int RecognizeForChopTest(ETEXT_DESC* monitor); + #endif + + /** + * Turns images into symbolic text. + * + * filename can point to a single image, a multi-page TIFF, + * or a plain text list of image filenames. + * + * retry_config is useful for debugging. If not nullptr, you can fall + * back to an alternate configuration if a page fails for some + * reason. + * + * timeout_millisec terminates processing if any single page + * takes too long. Set to 0 for unlimited time. + * + * renderer is responible for creating the output. For example, + * use the TessTextRenderer if you want plaintext output, or + * the TessPDFRender to produce searchable PDF. + * + * If tessedit_page_number is non-negative, will only process that + * single page. Works for multi-page tiff file, or filelist. + * + * Returns true if successful, false on error. + */ + bool ProcessPages(const char* filename, const char* retry_config, + int timeout_millisec, TessResultRenderer* renderer); + // Does the real work of ProcessPages. + bool ProcessPagesInternal(const char* filename, const char* retry_config, + int timeout_millisec, TessResultRenderer* renderer); + + /** + * Turn a single image into symbolic text. + * + * The pix is the image processed. filename and page_index are + * metadata used by side-effect processes, such as reading a box + * file or formatting as hOCR. + * + * See ProcessPages for desciptions of other parameters. + */ + bool ProcessPage(Pix* pix, int page_index, const char* filename, + const char* retry_config, int timeout_millisec, + TessResultRenderer* renderer); + + /** + * Get a reading-order iterator to the results of LayoutAnalysis and/or + * Recognize. The returned iterator must be deleted after use. + * WARNING! This class points to data held within the TessBaseAPI class, and + * therefore can only be used while the TessBaseAPI class still exists and + * has not been subjected to a call of Init, SetImage, Recognize, Clear, End + * DetectOS, or anything else that changes the internal PAGE_RES. + */ + ResultIterator* GetIterator(); + + /** + * Get a mutable iterator to the results of LayoutAnalysis and/or Recognize. + * The returned iterator must be deleted after use. + * WARNING! This class points to data held within the TessBaseAPI class, and + * therefore can only be used while the TessBaseAPI class still exists and + * has not been subjected to a call of Init, SetImage, Recognize, Clear, End + * DetectOS, or anything else that changes the internal PAGE_RES. + */ + MutableIterator* GetMutableIterator(); + + /** + * The recognized text is returned as a char* which is coded + * as UTF8 and must be freed with the delete [] operator. + */ + char* GetUTF8Text(); + + /** + * Make a HTML-formatted string with hOCR markup from the internal + * data structures. + * page_number is 0-based but will appear in the output as 1-based. + * monitor can be used to + * cancel the recognition + * receive progress callbacks + * Returned string must be freed with the delete [] operator. + */ + char* GetHOCRText(ETEXT_DESC* monitor, int page_number); + + /** + * Make a HTML-formatted string with hOCR markup from the internal + * data structures. + * page_number is 0-based but will appear in the output as 1-based. + * Returned string must be freed with the delete [] operator. + */ + char* GetHOCRText(int page_number); + + /** + * Make an XML-formatted string with Alto markup from the internal + * data structures. + */ + char* GetAltoText(ETEXT_DESC* monitor, int page_number); + + + /** + * Make an XML-formatted string with Alto markup from the internal + * data structures. + */ + char* GetAltoText(int page_number); + + /** + * Make a TSV-formatted string from the internal data structures. + * page_number is 0-based but will appear in the output as 1-based. + * Returned string must be freed with the delete [] operator. + */ + char* GetTSVText(int page_number); + + /** + * Make a box file for LSTM training from the internal data structures. + * Constructs coordinates in the original image - not just the rectangle. + * page_number is a 0-based page index that will appear in the box file. + * Returned string must be freed with the delete [] operator. + */ + char* GetLSTMBoxText(int page_number); + + /** + * The recognized text is returned as a char* which is coded in the same + * format as a box file used in training. + * Constructs coordinates in the original image - not just the rectangle. + * page_number is a 0-based page index that will appear in the box file. + * Returned string must be freed with the delete [] operator. + */ + char* GetBoxText(int page_number); + + /** + * The recognized text is returned as a char* which is coded in the same + * format as a WordStr box file used in training. + * page_number is a 0-based page index that will appear in the box file. + * Returned string must be freed with the delete [] operator. + */ + char* GetWordStrBoxText(int page_number); + + /** + * The recognized text is returned as a char* which is coded + * as UNLV format Latin-1 with specific reject and suspect codes. + * Returned string must be freed with the delete [] operator. + */ + char* GetUNLVText(); + + /** + * Detect the orientation of the input image and apparent script (alphabet). + * orient_deg is the detected clockwise rotation of the input image in degrees + * (0, 90, 180, 270) + * orient_conf is the confidence (15.0 is reasonably confident) + * script_name is an ASCII string, the name of the script, e.g. "Latin" + * script_conf is confidence level in the script + * Returns true on success and writes values to each parameter as an output + */ + bool DetectOrientationScript(int* orient_deg, float* orient_conf, + const char** script_name, float* script_conf); + + /** + * The recognized text is returned as a char* which is coded + * as UTF8 and must be freed with the delete [] operator. + * page_number is a 0-based page index that will appear in the osd file. + */ + char* GetOsdText(int page_number); + + /** Returns the (average) confidence value between 0 and 100. */ + int MeanTextConf(); + /** + * Returns all word confidences (between 0 and 100) in an array, terminated + * by -1. The calling function must delete [] after use. + * The number of confidences should correspond to the number of space- + * delimited words in GetUTF8Text. + */ + int* AllWordConfidences(); + +#ifndef DISABLED_LEGACY_ENGINE + /** + * Applies the given word to the adaptive classifier if possible. + * The word must be SPACE-DELIMITED UTF-8 - l i k e t h i s , so it can + * tell the boundaries of the graphemes. + * Assumes that SetImage/SetRectangle have been used to set the image + * to the given word. The mode arg should be PSM_SINGLE_WORD or + * PSM_CIRCLE_WORD, as that will be used to control layout analysis. + * The currently set PageSegMode is preserved. + * Returns false if adaption was not possible for some reason. + */ + bool AdaptToWordStr(PageSegMode mode, const char* wordstr); +#endif // ndef DISABLED_LEGACY_ENGINE + + /** + * Free up recognition results and any stored image data, without actually + * freeing any recognition data that would be time-consuming to reload. + * Afterwards, you must call SetImage or TesseractRect before doing + * any Recognize or Get* operation. + */ + void Clear(); + + /** + * Close down tesseract and free up all memory. End() is equivalent to + * destructing and reconstructing your TessBaseAPI. + * Once End() has been used, none of the other API functions may be used + * other than Init and anything declared above it in the class definition. + */ + void End(); + + /** + * Clear any library-level memory caches. + * There are a variety of expensive-to-load constant data structures (mostly + * language dictionaries) that are cached globally -- surviving the Init() + * and End() of individual TessBaseAPI's. This function allows the clearing + * of these caches. + **/ + static void ClearPersistentCache(); + + /** + * Check whether a word is valid according to Tesseract's language model + * @return 0 if the word is invalid, non-zero if valid. + * @warning temporary! This function will be removed from here and placed + * in a separate API at some future time. + */ + int IsValidWord(const char *word); + // Returns true if utf8_character is defined in the UniCharset. + bool IsValidCharacter(const char *utf8_character); + + + bool GetTextDirection(int* out_offset, float* out_slope); + + /** Sets Dict::letter_is_okay_ function to point to the given function. */ + void SetDictFunc(DictFunc f); + + /** Sets Dict::probability_in_context_ function to point to the given + * function. + */ + void SetProbabilityInContextFunc(ProbabilityInContextFunc f); + + /** + * Estimates the Orientation And Script of the image. + * @return true if the image was processed successfully. + */ + bool DetectOS(OSResults*); + + /** + * Return text orientation of each block as determined by an earlier run + * of layout analysis. + */ + void GetBlockTextOrientations(int** block_orientation, + bool** vertical_writing); + + + #ifndef DISABLED_LEGACY_ENGINE + + /** Sets Wordrec::fill_lattice_ function to point to the given function. */ + void SetFillLatticeFunc(FillLatticeFunc f); + + /** Find lines from the image making the BLOCK_LIST. */ + BLOCK_LIST* FindLinesCreateBlockList(); + + /** + * Delete a block list. + * This is to keep BLOCK_LIST pointer opaque + * and let go of including the other headers. + */ + static void DeleteBlockList(BLOCK_LIST* block_list); + + /** Returns a ROW object created from the input row specification. */ + static ROW *MakeTessOCRRow(float baseline, float xheight, + float descender, float ascender); + + /** Returns a TBLOB corresponding to the entire input image. */ + static TBLOB *MakeTBLOB(Pix *pix); + + /** + * This method baseline normalizes a TBLOB in-place. The input row is used + * for normalization. The denorm is an optional parameter in which the + * normalization-antidote is returned. + */ + static void NormalizeTBLOB(TBLOB *tblob, ROW *row, bool numeric_mode); + + /** This method returns the features associated with the input image. */ + void GetFeaturesForBlob(TBLOB* blob, INT_FEATURE_STRUCT* int_features, + int* num_features, int* feature_outline_index); + + /** + * This method returns the row to which a box of specified dimensions would + * belong. If no good match is found, it returns nullptr. + */ + static ROW* FindRowForBox(BLOCK_LIST* blocks, int left, int top, + int right, int bottom); + + /** + * Method to run adaptive classifier on a blob. + * It returns at max num_max_matches results. + */ + void RunAdaptiveClassifier(TBLOB* blob, + int num_max_matches, + int* unichar_ids, + float* ratings, + int* num_matches_returned); +#endif // ndef DISABLED_LEGACY_ENGINE + + /** This method returns the string form of the specified unichar. */ + const char* GetUnichar(int unichar_id); + + /** Return the pointer to the i-th dawg loaded into tesseract_ object. */ + const Dawg *GetDawg(int i) const; + + /** Return the number of dawgs loaded into tesseract_ object. */ + int NumDawgs() const; + + Tesseract* tesseract() const { return tesseract_; } + + OcrEngineMode oem() const { return last_oem_requested_; } + + void InitTruthCallback(TruthCallback *cb) { truth_cb_ = cb; } + + void set_min_orientation_margin(double margin); + /* @} */ + + protected: + + /** Common code for setting the image. Returns true if Init has been called. */ + TESS_LOCAL bool InternalSetImage(); + + /** + * Run the thresholder to make the thresholded image. If pix is not nullptr, + * the source is thresholded to pix instead of the internal IMAGE. + */ + TESS_LOCAL virtual bool Threshold(Pix** pix); + + /** + * Find lines from the image making the BLOCK_LIST. + * @return 0 on success. + */ + TESS_LOCAL int FindLines(); + + /** Delete the pageres and block list ready for a new page. */ + void ClearResults(); + + /** + * Return an LTR Result Iterator -- used only for training, as we really want + * to ignore all BiDi smarts at that point. + * delete once you're done with it. + */ + TESS_LOCAL LTRResultIterator* GetLTRIterator(); + + /** + * Return the length of the output text string, as UTF8, assuming + * one newline per line and one per block, with a terminator, + * and assuming a single character reject marker for each rejected character. + * Also return the number of recognized blobs in blob_count. + */ + TESS_LOCAL int TextLength(int* blob_count); + + //// paragraphs.cpp //////////////////////////////////////////////////// + TESS_LOCAL void DetectParagraphs(bool after_text_recognition); + + #ifndef DISABLED_LEGACY_ENGINE + + /** @defgroup ocropusAddOns ocropus add-ons */ + /* @{ */ + + /** + * Adapt to recognize the current image as the given character. + * The image must be preloaded and be just an image of a single character. + */ + TESS_LOCAL void AdaptToCharacter(const char *unichar_repr, + int length, + float baseline, + float xheight, + float descender, + float ascender); + + /** Recognize text doing one pass only, using settings for a given pass. */ + TESS_LOCAL PAGE_RES* RecognitionPass1(BLOCK_LIST* block_list); + + TESS_LOCAL PAGE_RES* RecognitionPass2(BLOCK_LIST* block_list, + PAGE_RES* pass1_result); + + /** + * Extract the OCR results, costs (penalty points for uncertainty), + * and the bounding boxes of the characters. + */ + TESS_LOCAL static int TesseractExtractResult(char** text, + int** lengths, + float** costs, + int** x0, + int** y0, + int** x1, + int** y1, + PAGE_RES* page_res); + + TESS_LOCAL const PAGE_RES* GetPageRes() const { return page_res_; } + /* @} */ +#endif // ndef DISABLED_LEGACY_ENGINE + + protected: + Tesseract* tesseract_; ///< The underlying data object. + Tesseract* osd_tesseract_; ///< For orientation & script detection. + EquationDetect* equ_detect_; ///* paragraph_models_; + BLOCK_LIST* block_list_; ///< The page layout. + PAGE_RES* page_res_; ///< The page-level data. + STRING* input_file_; ///< Name used by training code. + STRING* output_file_; ///< Name used by debug code. + STRING* datapath_; ///< Current location of tessdata. + STRING* language_; ///< Last initialized language. + OcrEngineMode last_oem_requested_; ///< Last ocr language mode requested. + bool recognition_done_; ///< page_res_ contains recognition data. + TruthCallback *truth_cb_; /// fxn for setting truth_* in WERD_RES + + /** + * @defgroup ThresholderParams Thresholder Parameters + * Parameters saved from the Thresholder. Needed to rebuild coordinates. + */ + /* @{ */ + int rect_left_; + int rect_top_; + int rect_width_; + int rect_height_; + int image_width_; + int image_height_; + /* @} */ + + private: + // A list of image filenames gets special consideration + bool ProcessPagesFileList(FILE *fp, + STRING *buf, + const char* retry_config, int timeout_millisec, + TessResultRenderer* renderer, + int tessedit_page_number); + // TIFF supports multipage so gets special consideration. + bool ProcessPagesMultipageTiff(const unsigned char *data, + size_t size, + const char* filename, + const char* retry_config, + int timeout_millisec, + TessResultRenderer* renderer, + int tessedit_page_number); + // There's currently no way to pass a document title from the + // Tesseract command line, and we have multiple places that choose + // to set the title to an empty string. Using a single named + // variable will hopefully reduce confusion if the situation changes + // in the future. + const char *unknown_title_ = ""; +}; // class TessBaseAPI. + +/** Escape a char string - remove &<>"' with HTML codes. */ +STRING HOcrEscape(const char* text); +} // namespace tesseract. + +#endif // TESSERACT_API_BASEAPI_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/capi.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/capi.h new file mode 100644 index 00000000..8752816a --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/capi.h @@ -0,0 +1,630 @@ +/////////////////////////////////////////////////////////////////////// +// File: capi.h +// Description: C-API TessBaseAPI +// +// (C) Copyright 2012, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef API_CAPI_H_ +#define API_CAPI_H_ + +#if defined(TESSERACT_API_BASEAPI_H_) && !defined(TESS_CAPI_INCLUDE_BASEAPI) +# define TESS_CAPI_INCLUDE_BASEAPI +#endif + +#ifdef TESS_CAPI_INCLUDE_BASEAPI +# include "baseapi.h" +# include "ocrclass.h" +# include "pageiterator.h" +# include "renderer.h" +# include "resultiterator.h" +#else +# include +# include +# include "platform.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef TESS_CALL +# if defined(WIN32) +# define TESS_CALL __cdecl +# else +# define TESS_CALL +# endif +#endif + +#ifndef BOOL +# define BOOL int +# define TRUE 1 +# define FALSE 0 +#endif + +#ifdef TESS_CAPI_INCLUDE_BASEAPI +typedef tesseract::TessResultRenderer TessResultRenderer; +typedef tesseract::TessTextRenderer TessTextRenderer; +typedef tesseract::TessHOcrRenderer TessHOcrRenderer; +typedef tesseract::TessAltoRenderer TessAltoRenderer; +typedef tesseract::TessTsvRenderer TessTsvRenderer; +typedef tesseract::TessPDFRenderer TessPDFRenderer; +typedef tesseract::TessUnlvRenderer TessUnlvRenderer; +typedef tesseract::TessBoxTextRenderer TessBoxTextRenderer; +typedef tesseract::TessWordStrBoxRenderer TessWordStrBoxRenderer; +typedef tesseract::TessLSTMBoxRenderer TessLSTMBoxRenderer; +typedef tesseract::TessBaseAPI TessBaseAPI; +typedef tesseract::PageIterator TessPageIterator; +typedef tesseract::ResultIterator TessResultIterator; +typedef tesseract::MutableIterator TessMutableIterator; +typedef tesseract::ChoiceIterator TessChoiceIterator; +typedef tesseract::OcrEngineMode TessOcrEngineMode; +typedef tesseract::PageSegMode TessPageSegMode; +typedef tesseract::ImageThresholder TessImageThresholder; +typedef tesseract::PageIteratorLevel TessPageIteratorLevel; +typedef tesseract::DictFunc TessDictFunc; +typedef tesseract::ProbabilityInContextFunc TessProbabilityInContextFunc; +// typedef tesseract::ParamsModelClassifyFunc TessParamsModelClassifyFunc; +typedef tesseract::FillLatticeFunc TessFillLatticeFunc; +typedef tesseract::Dawg TessDawg; +typedef tesseract::TruthCallback TessTruthCallback; +typedef tesseract::Orientation TessOrientation; +typedef tesseract::ParagraphJustification TessParagraphJustification; +typedef tesseract::WritingDirection TessWritingDirection; +typedef tesseract::TextlineOrder TessTextlineOrder; +typedef PolyBlockType TessPolyBlockType; +#else +typedef struct TessResultRenderer TessResultRenderer; +typedef struct TessTextRenderer TessTextRenderer; +typedef struct TessHOcrRenderer TessHOcrRenderer; +typedef struct TessPDFRenderer TessPDFRenderer; +typedef struct TessUnlvRenderer TessUnlvRenderer; +typedef struct TessBoxTextRenderer TessBoxTextRenderer; +typedef struct TessBaseAPI TessBaseAPI; +typedef struct TessPageIterator TessPageIterator; +typedef struct TessResultIterator TessResultIterator; +typedef struct TessMutableIterator TessMutableIterator; +typedef struct TessChoiceIterator TessChoiceIterator; +typedef enum TessOcrEngineMode { + OEM_TESSERACT_ONLY, + OEM_LSTM_ONLY, + OEM_TESSERACT_LSTM_COMBINED, + OEM_DEFAULT +} TessOcrEngineMode; +typedef enum TessPageSegMode { + PSM_OSD_ONLY, + PSM_AUTO_OSD, + PSM_AUTO_ONLY, + PSM_AUTO, + PSM_SINGLE_COLUMN, + PSM_SINGLE_BLOCK_VERT_TEXT, + PSM_SINGLE_BLOCK, + PSM_SINGLE_LINE, + PSM_SINGLE_WORD, + PSM_CIRCLE_WORD, + PSM_SINGLE_CHAR, + PSM_SPARSE_TEXT, + PSM_SPARSE_TEXT_OSD, + PSM_RAW_LINE, + PSM_COUNT +} TessPageSegMode; +typedef enum TessPageIteratorLevel { + RIL_BLOCK, + RIL_PARA, + RIL_TEXTLINE, + RIL_WORD, + RIL_SYMBOL +} TessPageIteratorLevel; +typedef enum TessPolyBlockType { + PT_UNKNOWN, + PT_FLOWING_TEXT, + PT_HEADING_TEXT, + PT_PULLOUT_TEXT, + PT_EQUATION, + PT_INLINE_EQUATION, + PT_TABLE, + PT_VERTICAL_TEXT, + PT_CAPTION_TEXT, + PT_FLOWING_IMAGE, + PT_HEADING_IMAGE, + PT_PULLOUT_IMAGE, + PT_HORZ_LINE, + PT_VERT_LINE, + PT_NOISE, + PT_COUNT +} TessPolyBlockType; +typedef enum TessOrientation { + ORIENTATION_PAGE_UP, + ORIENTATION_PAGE_RIGHT, + ORIENTATION_PAGE_DOWN, + ORIENTATION_PAGE_LEFT +} TessOrientation; +typedef enum TessParagraphJustification { + JUSTIFICATION_UNKNOWN, + JUSTIFICATION_LEFT, + JUSTIFICATION_CENTER, + JUSTIFICATION_RIGHT +} TessParagraphJustification; +typedef enum TessWritingDirection { + WRITING_DIRECTION_LEFT_TO_RIGHT, + WRITING_DIRECTION_RIGHT_TO_LEFT, + WRITING_DIRECTION_TOP_TO_BOTTOM +} TessWritingDirection; +typedef enum TessTextlineOrder { + TEXTLINE_ORDER_LEFT_TO_RIGHT, + TEXTLINE_ORDER_RIGHT_TO_LEFT, + TEXTLINE_ORDER_TOP_TO_BOTTOM +} TessTextlineOrder; +typedef struct ETEXT_DESC ETEXT_DESC; +#endif + +typedef bool (*TessCancelFunc)(void* cancel_this, int words); +typedef bool (*TessProgressFunc)(ETEXT_DESC* ths, int left, int right, int top, + int bottom); + +struct Pix; +struct Boxa; +struct Pixa; + +TESS_API int MyOSD(TessBaseAPI* api); + +/* General free functions */ + +TESS_API const char* TESS_CALL TessVersion(); +TESS_API void TESS_CALL TessDeleteText(const char* text); +TESS_API void TESS_CALL TessDeleteTextArray(char** arr); +TESS_API void TESS_CALL TessDeleteIntArray(const int* arr); + +/* Renderer API */ +TESS_API TessResultRenderer* TESS_CALL +TessTextRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessHOcrRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessHOcrRendererCreate2(const char* outputbase, BOOL font_info); +TESS_API TessResultRenderer* TESS_CALL +TessAltoRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessTsvRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL TessPDFRendererCreate( + const char* outputbase, const char* datadir, BOOL textonly); +TESS_API TessResultRenderer* TESS_CALL +TessUnlvRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessBoxTextRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessLSTMBoxRendererCreate(const char* outputbase); +TESS_API TessResultRenderer* TESS_CALL +TessWordStrBoxRendererCreate(const char* outputbase); + +TESS_API void TESS_CALL TessDeleteResultRenderer(TessResultRenderer* renderer); +TESS_API void TESS_CALL TessResultRendererInsert(TessResultRenderer* renderer, + TessResultRenderer* next); +TESS_API TessResultRenderer* TESS_CALL +TessResultRendererNext(TessResultRenderer* renderer); +TESS_API BOOL TESS_CALL TessResultRendererBeginDocument( + TessResultRenderer* renderer, const char* title); +TESS_API BOOL TESS_CALL TessResultRendererAddImage(TessResultRenderer* renderer, + TessBaseAPI* api); +TESS_API BOOL TESS_CALL +TessResultRendererEndDocument(TessResultRenderer* renderer); + +TESS_API const char* TESS_CALL +TessResultRendererExtention(TessResultRenderer* renderer); +TESS_API const char* TESS_CALL +TessResultRendererTitle(TessResultRenderer* renderer); +TESS_API int TESS_CALL TessResultRendererImageNum(TessResultRenderer* renderer); + +/* Base API */ + +TESS_API TessBaseAPI* TESS_CALL TessBaseAPICreate(); +TESS_API void TESS_CALL TessBaseAPIDelete(TessBaseAPI* handle); + +TESS_API size_t TESS_CALL TessBaseAPIGetOpenCLDevice(TessBaseAPI* handle, + void** device); + +TESS_API void TESS_CALL TessBaseAPISetInputName(TessBaseAPI* handle, + const char* name); +TESS_API const char* TESS_CALL TessBaseAPIGetInputName(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPISetInputImage(TessBaseAPI* handle, + struct Pix* pix); +TESS_API struct Pix* TESS_CALL TessBaseAPIGetInputImage(TessBaseAPI* handle); + +TESS_API int TESS_CALL TessBaseAPIGetSourceYResolution(TessBaseAPI* handle); +TESS_API const char* TESS_CALL TessBaseAPIGetDatapath(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPISetOutputName(TessBaseAPI* handle, + const char* name); + +TESS_API BOOL TESS_CALL TessBaseAPISetVariable(TessBaseAPI* handle, + const char* name, + const char* value); +TESS_API BOOL TESS_CALL TessBaseAPISetDebugVariable(TessBaseAPI* handle, + const char* name, + const char* value); + +TESS_API BOOL TESS_CALL TessBaseAPIGetIntVariable(const TessBaseAPI* handle, + const char* name, int* value); +TESS_API BOOL TESS_CALL TessBaseAPIGetBoolVariable(const TessBaseAPI* handle, + const char* name, + BOOL* value); +TESS_API BOOL TESS_CALL TessBaseAPIGetDoubleVariable(const TessBaseAPI* handle, + const char* name, + double* value); +TESS_API const char* TESS_CALL +TessBaseAPIGetStringVariable(const TessBaseAPI* handle, const char* name); + +TESS_API void TESS_CALL TessBaseAPIPrintVariables(const TessBaseAPI* handle, + FILE* fp); +TESS_API BOOL TESS_CALL TessBaseAPIPrintVariablesToFile( + const TessBaseAPI* handle, const char* filename); + +#ifdef TESS_CAPI_INCLUDE_BASEAPI + +TESS_API BOOL TESS_CALL TessBaseAPIGetVariableAsString(TessBaseAPI* handle, + const char* name, + STRING* val); + +TESS_API int TESS_CALL TessBaseAPIInit( + TessBaseAPI* handle, const char* datapath, const char* language, + TessOcrEngineMode mode, char** configs, int configs_size, + const STRING* vars_vec, size_t vars_vec_size, const STRING* vars_values, + size_t vars_values_size, BOOL set_only_init_params); + +#endif // def TESS_CAPI_INCLUDE_BASEAPI + +TESS_API int TESS_CALL TessBaseAPIInit1(TessBaseAPI* handle, + const char* datapath, + const char* language, + TessOcrEngineMode oem, char** configs, + int configs_size); +TESS_API int TESS_CALL TessBaseAPIInit2(TessBaseAPI* handle, + const char* datapath, + const char* language, + TessOcrEngineMode oem); +TESS_API int TESS_CALL TessBaseAPIInit3(TessBaseAPI* handle, + const char* datapath, + const char* language); + +TESS_API int TESS_CALL TessBaseAPIInit4( + TessBaseAPI* handle, const char* datapath, const char* language, + TessOcrEngineMode mode, char** configs, int configs_size, char** vars_vec, + char** vars_values, size_t vars_vec_size, BOOL set_only_non_debug_params); + +TESS_API const char* TESS_CALL +TessBaseAPIGetInitLanguagesAsString(const TessBaseAPI* handle); +TESS_API char** TESS_CALL +TessBaseAPIGetLoadedLanguagesAsVector(const TessBaseAPI* handle); +TESS_API char** TESS_CALL +TessBaseAPIGetAvailableLanguagesAsVector(const TessBaseAPI* handle); + +TESS_API int TESS_CALL TessBaseAPIInitLangMod(TessBaseAPI* handle, + const char* datapath, + const char* language); +TESS_API void TESS_CALL TessBaseAPIInitForAnalysePage(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPIReadConfigFile(TessBaseAPI* handle, + const char* filename); +TESS_API void TESS_CALL TessBaseAPIReadDebugConfigFile(TessBaseAPI* handle, + const char* filename); + +TESS_API void TESS_CALL TessBaseAPISetPageSegMode(TessBaseAPI* handle, + TessPageSegMode mode); +TESS_API TessPageSegMode TESS_CALL +TessBaseAPIGetPageSegMode(const TessBaseAPI* handle); + +TESS_API char* TESS_CALL TessBaseAPIRect(TessBaseAPI* handle, + const unsigned char* imagedata, + int bytes_per_pixel, + int bytes_per_line, int left, int top, + int width, int height); + +TESS_API void TESS_CALL TessBaseAPIClearAdaptiveClassifier(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPISetImage(TessBaseAPI* handle, + const unsigned char* imagedata, + int width, int height, + int bytes_per_pixel, + int bytes_per_line); +TESS_API void TESS_CALL TessBaseAPISetImage2(TessBaseAPI* handle, + struct Pix* pix); + +TESS_API void TESS_CALL TessBaseAPISetSourceResolution(TessBaseAPI* handle, + int ppi); + +TESS_API void TESS_CALL TessBaseAPISetRectangle(TessBaseAPI* handle, int left, + int top, int width, int height); + +#ifdef TESS_CAPI_INCLUDE_BASEAPI +TESS_API void TESS_CALL TessBaseAPISetThresholder( + TessBaseAPI* handle, TessImageThresholder* thresholder); +#endif + +TESS_API struct Pix* TESS_CALL +TessBaseAPIGetThresholdedImage(TessBaseAPI* handle); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetRegions(TessBaseAPI* handle, + struct Pixa** pixa); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetTextlines(TessBaseAPI* handle, + struct Pixa** pixa, + int** blockids); +TESS_API struct Boxa* TESS_CALL +TessBaseAPIGetTextlines1(TessBaseAPI* handle, BOOL raw_image, int raw_padding, + struct Pixa** pixa, int** blockids, int** paraids); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetStrips(TessBaseAPI* handle, + struct Pixa** pixa, + int** blockids); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetWords(TessBaseAPI* handle, + struct Pixa** pixa); +TESS_API struct Boxa* TESS_CALL +TessBaseAPIGetConnectedComponents(TessBaseAPI* handle, struct Pixa** cc); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetComponentImages( + TessBaseAPI* handle, TessPageIteratorLevel level, BOOL text_only, + struct Pixa** pixa, int** blockids); +TESS_API struct Boxa* TESS_CALL TessBaseAPIGetComponentImages1( + TessBaseAPI* handle, TessPageIteratorLevel level, BOOL text_only, + BOOL raw_image, int raw_padding, struct Pixa** pixa, int** blockids, + int** paraids); + +TESS_API int TESS_CALL +TessBaseAPIGetThresholdedImageScaleFactor(const TessBaseAPI* handle); + +TESS_API TessPageIterator* TESS_CALL +TessBaseAPIAnalyseLayout(TessBaseAPI* handle); + +TESS_API int TESS_CALL TessBaseAPIRecognize(TessBaseAPI* handle, + ETEXT_DESC* monitor); + +#ifndef DISABLED_LEGACY_ENGINE +TESS_API int TESS_CALL TessBaseAPIRecognizeForChopTest(TessBaseAPI* handle, + ETEXT_DESC* monitor); +#endif + +TESS_API BOOL TESS_CALL TessBaseAPIProcessPages(TessBaseAPI* handle, + const char* filename, + const char* retry_config, + int timeout_millisec, + TessResultRenderer* renderer); +TESS_API BOOL TESS_CALL TessBaseAPIProcessPage(TessBaseAPI* handle, + struct Pix* pix, int page_index, + const char* filename, + const char* retry_config, + int timeout_millisec, + TessResultRenderer* renderer); + +TESS_API TessResultIterator* TESS_CALL +TessBaseAPIGetIterator(TessBaseAPI* handle); +TESS_API TessMutableIterator* TESS_CALL +TessBaseAPIGetMutableIterator(TessBaseAPI* handle); + +TESS_API char* TESS_CALL TessBaseAPIGetUTF8Text(TessBaseAPI* handle); +TESS_API char* TESS_CALL TessBaseAPIGetHOCRText(TessBaseAPI* handle, + int page_number); + +TESS_API char* TESS_CALL TessBaseAPIGetAltoText(TessBaseAPI* handle, + int page_number); +TESS_API char* TESS_CALL TessBaseAPIGetTsvText(TessBaseAPI* handle, + int page_number); + +TESS_API char* TESS_CALL TessBaseAPIGetBoxText(TessBaseAPI* handle, + int page_number); +TESS_API char* TESS_CALL TessBaseAPIGetLSTMBoxText(TessBaseAPI* handle, + int page_number); +TESS_API char* TESS_CALL TessBaseAPIGetWordStrBoxText(TessBaseAPI* handle, + int page_number); + +TESS_API char* TESS_CALL TessBaseAPIGetUNLVText(TessBaseAPI* handle); +TESS_API int TESS_CALL TessBaseAPIMeanTextConf(TessBaseAPI* handle); + +TESS_API int* TESS_CALL TessBaseAPIAllWordConfidences(TessBaseAPI* handle); + +#ifndef DISABLED_LEGACY_ENGINE +TESS_API BOOL TESS_CALL TessBaseAPIAdaptToWordStr(TessBaseAPI* handle, + TessPageSegMode mode, + const char* wordstr); +#endif // ndef DISABLED_LEGACY_ENGINE + +TESS_API void TESS_CALL TessBaseAPIClear(TessBaseAPI* handle); +TESS_API void TESS_CALL TessBaseAPIEnd(TessBaseAPI* handle); + +TESS_API int TESS_CALL TessBaseAPIIsValidWord(TessBaseAPI* handle, + const char* word); +TESS_API BOOL TESS_CALL TessBaseAPIGetTextDirection(TessBaseAPI* handle, + int* out_offset, + float* out_slope); + +#ifdef TESS_CAPI_INCLUDE_BASEAPI + +TESS_API void TESS_CALL TessBaseAPISetDictFunc(TessBaseAPI* handle, + TessDictFunc f); + +TESS_API void TESS_CALL TessBaseAPIClearPersistentCache(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPISetProbabilityInContextFunc( + TessBaseAPI* handle, TessProbabilityInContextFunc f); + +// Call TessDeleteText(*best_script_name) to free memory allocated by this +// function +TESS_API BOOL TESS_CALL TessBaseAPIDetectOrientationScript( + TessBaseAPI* handle, int* orient_deg, float* orient_conf, + const char** script_name, float* script_conf); + +#endif // def TESS_CAPI_INCLUDE_BASEAPI + +TESS_API const char* TESS_CALL TessBaseAPIGetUnichar(TessBaseAPI* handle, + int unichar_id); + +TESS_API void TESS_CALL TessBaseAPISetMinOrientationMargin(TessBaseAPI* handle, + double margin); + +#ifdef TESS_CAPI_INCLUDE_BASEAPI + +TESS_API const TessDawg* TESS_CALL TessBaseAPIGetDawg(const TessBaseAPI* handle, + int i); + +TESS_API int TESS_CALL TessBaseAPINumDawgs(const TessBaseAPI* handle); + +TESS_API TessOcrEngineMode TESS_CALL TessBaseAPIOem(const TessBaseAPI* handle); + +TESS_API void TESS_CALL TessBaseAPIInitTruthCallback(TessBaseAPI* handle, + TessTruthCallback* cb); + +TESS_API void TESS_CALL TessBaseGetBlockTextOrientations( + TessBaseAPI* handle, int** block_orientation, bool** vertical_writing); + +#endif + +/* Page iterator */ + +TESS_API void TESS_CALL TessPageIteratorDelete(TessPageIterator* handle); + +TESS_API TessPageIterator* TESS_CALL +TessPageIteratorCopy(const TessPageIterator* handle); + +TESS_API void TESS_CALL TessPageIteratorBegin(TessPageIterator* handle); + +TESS_API BOOL TESS_CALL TessPageIteratorNext(TessPageIterator* handle, + TessPageIteratorLevel level); + +TESS_API BOOL TESS_CALL TessPageIteratorIsAtBeginningOf( + const TessPageIterator* handle, TessPageIteratorLevel level); + +TESS_API BOOL TESS_CALL TessPageIteratorIsAtFinalElement( + const TessPageIterator* handle, TessPageIteratorLevel level, + TessPageIteratorLevel element); + +TESS_API BOOL TESS_CALL TessPageIteratorBoundingBox( + const TessPageIterator* handle, TessPageIteratorLevel level, int* left, + int* top, int* right, int* bottom); + +TESS_API TessPolyBlockType TESS_CALL +TessPageIteratorBlockType(const TessPageIterator* handle); + +TESS_API struct Pix* TESS_CALL TessPageIteratorGetBinaryImage( + const TessPageIterator* handle, TessPageIteratorLevel level); + +TESS_API struct Pix* TESS_CALL TessPageIteratorGetImage( + const TessPageIterator* handle, TessPageIteratorLevel level, int padding, + struct Pix* original_image, int* left, int* top); + +TESS_API BOOL TESS_CALL TessPageIteratorBaseline(const TessPageIterator* handle, + TessPageIteratorLevel level, + int* x1, int* y1, int* x2, + int* y2); + +TESS_API void TESS_CALL TessPageIteratorOrientation( + TessPageIterator* handle, TessOrientation* orientation, + TessWritingDirection* writing_direction, TessTextlineOrder* textline_order, + float* deskew_angle); + +TESS_API void TESS_CALL TessPageIteratorParagraphInfo( + TessPageIterator* handle, TessParagraphJustification* justification, + BOOL* is_list_item, BOOL* is_crown, int* first_line_indent); + +/* Result iterator */ + +TESS_API void TESS_CALL TessResultIteratorDelete(TessResultIterator* handle); +TESS_API TessResultIterator* TESS_CALL +TessResultIteratorCopy(const TessResultIterator* handle); +TESS_API TessPageIterator* TESS_CALL +TessResultIteratorGetPageIterator(TessResultIterator* handle); +TESS_API const TessPageIterator* TESS_CALL +TessResultIteratorGetPageIteratorConst(const TessResultIterator* handle); +TESS_API TessChoiceIterator* TESS_CALL +TessResultIteratorGetChoiceIterator(const TessResultIterator* handle); + +TESS_API BOOL TESS_CALL TessResultIteratorNext(TessResultIterator* handle, + TessPageIteratorLevel level); +TESS_API char* TESS_CALL TessResultIteratorGetUTF8Text( + const TessResultIterator* handle, TessPageIteratorLevel level); +TESS_API float TESS_CALL TessResultIteratorConfidence( + const TessResultIterator* handle, TessPageIteratorLevel level); +TESS_API const char* TESS_CALL +TessResultIteratorWordRecognitionLanguage(const TessResultIterator* handle); +TESS_API const char* TESS_CALL TessResultIteratorWordFontAttributes( + const TessResultIterator* handle, BOOL* is_bold, BOOL* is_italic, + BOOL* is_underlined, BOOL* is_monospace, BOOL* is_serif, BOOL* is_smallcaps, + int* pointsize, int* font_id); + +TESS_API BOOL TESS_CALL +TessResultIteratorWordIsFromDictionary(const TessResultIterator* handle); +TESS_API BOOL TESS_CALL +TessResultIteratorWordIsNumeric(const TessResultIterator* handle); +TESS_API BOOL TESS_CALL +TessResultIteratorSymbolIsSuperscript(const TessResultIterator* handle); +TESS_API BOOL TESS_CALL +TessResultIteratorSymbolIsSubscript(const TessResultIterator* handle); +TESS_API BOOL TESS_CALL +TessResultIteratorSymbolIsDropcap(const TessResultIterator* handle); + +TESS_API void TESS_CALL TessChoiceIteratorDelete(TessChoiceIterator* handle); +TESS_API BOOL TESS_CALL TessChoiceIteratorNext(TessChoiceIterator* handle); +TESS_API const char* TESS_CALL +TessChoiceIteratorGetUTF8Text(const TessChoiceIterator* handle); +TESS_API float TESS_CALL +TessChoiceIteratorConfidence(const TessChoiceIterator* handle); + +/* Progress monitor */ + +TESS_API ETEXT_DESC* TESS_CALL TessMonitorCreate(); +TESS_API void TESS_CALL TessMonitorDelete(ETEXT_DESC* monitor); +TESS_API void TESS_CALL TessMonitorSetCancelFunc(ETEXT_DESC* monitor, + TessCancelFunc cancelFunc); +TESS_API void TESS_CALL TessMonitorSetCancelThis(ETEXT_DESC* monitor, + void* cancelThis); +TESS_API void* TESS_CALL TessMonitorGetCancelThis(ETEXT_DESC* monitor); +TESS_API void TESS_CALL +TessMonitorSetProgressFunc(ETEXT_DESC* monitor, TessProgressFunc progressFunc); +TESS_API int TESS_CALL TessMonitorGetProgress(ETEXT_DESC* monitor); +TESS_API void TESS_CALL TessMonitorSetDeadlineMSecs(ETEXT_DESC* monitor, + int deadline); + +#ifndef DISABLED_LEGACY_ENGINE + +# ifdef TESS_CAPI_INCLUDE_BASEAPI +TESS_API void TESS_CALL TessBaseAPISetFillLatticeFunc(TessBaseAPI* handle, + TessFillLatticeFunc f); + +TESS_API void TESS_CALL TessBaseAPIGetFeaturesForBlob( + TessBaseAPI* handle, TBLOB* blob, INT_FEATURE_STRUCT* int_features, + int* num_features, int* FeatureOutlineIndex); + +TESS_API ROW* TESS_CALL TessFindRowForBox(BLOCK_LIST* blocks, int left, int top, + int right, int bottom); + +TESS_API void TESS_CALL TessBaseAPIRunAdaptiveClassifier( + TessBaseAPI* handle, TBLOB* blob, int num_max_matches, int* unichar_ids, + float* ratings, int* num_matches_returned); + +TESS_API ROW* TESS_CALL TessMakeTessOCRRow(float baseline, float xheight, + float descender, float ascender); + +TESS_API TBLOB* TESS_CALL TessMakeTBLOB(Pix* pix); + +TESS_API void TESS_CALL TessNormalizeTBLOB(TBLOB* tblob, ROW* row, + BOOL numeric_mode); + +TESS_API BLOCK_LIST* TESS_CALL +TessBaseAPIFindLinesCreateBlockList(TessBaseAPI* handle); + +TESS_API void TESS_CALL TessDeleteBlockList(BLOCK_LIST* block_list); + +# endif // def TESS_CAPI_INCLUDE_BASEAPI + +#endif // ndef DISABLED_LEGACY_ENGINE + +#ifdef __cplusplus +} +#endif + +#endif // API_CAPI_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/genericvector.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/genericvector.h new file mode 100644 index 00000000..49c389b1 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/genericvector.h @@ -0,0 +1,1211 @@ +/////////////////////////////////////////////////////////////////////// +// File: genericvector.h +// Description: Generic vector class +// Author: Daria Antonova +// +// (C) Copyright 2007, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// +// +#ifndef TESSERACT_CCUTIL_GENERICVECTOR_H_ +#define TESSERACT_CCUTIL_GENERICVECTOR_H_ + +#include +#include +#include // for LONG_MAX +#include // for uint32_t +#include +#include + +#include "helpers.h" +#include "serialis.h" +#include "strngs.h" +#include "tesscallback.h" + +// Use PointerVector below in preference to GenericVector, as that +// provides automatic deletion of pointers, [De]Serialize that works, and +// sort that works. +template +class GenericVector { + public: + GenericVector() { + init(kDefaultVectorSize); + } + GenericVector(int size, const T& init_val) { + init(size); + init_to_size(size, init_val); + } + + // Copy + GenericVector(const GenericVector& other) { + this->init(other.size()); + this->operator+=(other); + } + GenericVector& operator+=(const GenericVector& other); + GenericVector& operator=(const GenericVector& other); + + ~GenericVector(); + + // Reserve some memory. + void reserve(int size); + // Double the size of the internal array. + void double_the_size(); + + // Resizes to size and sets all values to t. + void init_to_size(int size, const T& t); + // Resizes to size without any initialization. + void resize_no_init(int size) { + reserve(size); + size_used_ = size; + } + + // Return the size used. + int size() const { + return size_used_; + } + // Workaround to avoid g++ -Wsign-compare warnings. + size_t unsigned_size() const { + static_assert(sizeof(size_used_) <= sizeof(size_t), + "Wow! sizeof(size_t) < sizeof(int32_t)!!"); + assert(0 <= size_used_); + return static_cast(size_used_); + } + int size_reserved() const { + return size_reserved_; + } + + int length() const { + return size_used_; + } + + // Return true if empty. + bool empty() const { + return size_used_ == 0; + } + + // Return the object from an index. + T& get(int index) const; + T& back() const; + T& operator[](int index) const; + // Returns the last object and removes it. + T pop_back(); + + // Return the index of the T object. + // This method NEEDS a compare_callback to be passed to + // set_compare_callback. + int get_index(const T& object) const; + + // Return true if T is in the array + bool contains(const T& object) const; + + // Return true if the index is valid + T contains_index(int index) const; + + // Push an element in the end of the array + int push_back(T object); + void operator+=(const T& t); + + // Push an element in the end of the array if the same + // element is not already contained in the array. + int push_back_new(const T& object); + + // Push an element in the front of the array + // Note: This function is O(n) + int push_front(const T& object); + + // Set the value at the given index + void set(const T& t, int index); + + // Insert t at the given index, push other elements to the right. + void insert(const T& t, int index); + + // Removes an element at the given index and + // shifts the remaining elements to the left. + void remove(int index); + + // Truncates the array to the given size by removing the end. + // If the current size is less, the array is not expanded. + void truncate(int size) { + if (size < size_used_) { + size_used_ = size; + } + } + + // Add a callback to be called to delete the elements when the array took + // their ownership. + void set_clear_callback(TessCallback1* cb) { + clear_cb_ = cb; + } + + // Add a callback to be called to compare the elements when needed (contains, + // get_id, ...) + void set_compare_callback(TessResultCallback2* cb) { + compare_cb_ = cb; + } + + // Clear the array, calling the clear callback function if any. + // All the owned callbacks are also deleted. + // If you don't want the callbacks to be deleted, before calling clear, set + // the callback to nullptr. + void clear(); + + // Delete objects pointed to by data_[i] + void delete_data_pointers(); + + // This method clears the current object, then, does a shallow copy of + // its argument, and finally invalidates its argument. + // Callbacks are moved to the current object; + void move(GenericVector* from); + + // Read/Write the array to a file. This does _NOT_ read/write the callbacks. + // The callback given must be permanent since they will be called more than + // once. The given callback will be deleted at the end. + // If the callbacks are nullptr, then the data is simply read/written using + // fread (and swapping)/fwrite. + // Returns false on error or if the callback returns false. + // DEPRECATED. Use [De]Serialize[Classes] instead. + bool write(FILE* f, TessResultCallback2* cb) const; + bool read(tesseract::TFile* f, + TessResultCallback2* cb); + // Writes a vector of simple types to the given file. Assumes that bitwise + // read/write of T will work. Returns false in case of error. + // TODO(rays) Change all callers to use TFile and remove deprecated methods. + bool Serialize(FILE* fp) const; + bool Serialize(tesseract::TFile* fp) const; + // Reads a vector of simple types from the given file. Assumes that bitwise + // read/write will work with ReverseN according to sizeof(T). + // Returns false in case of error. + // If swap is true, assumes a big/little-endian swap is needed. + // TFile is assumed to know about swapping. + bool DeSerialize(bool swap, FILE* fp); + bool DeSerialize(tesseract::TFile* fp); + // Skips the deserialization of the vector. + static bool SkipDeSerialize(tesseract::TFile* fp); + // Writes a vector of classes to the given file. Assumes the existence of + // bool T::Serialize(FILE* fp) const that returns false in case of error. + // Returns false in case of error. + bool SerializeClasses(FILE* fp) const; + bool SerializeClasses(tesseract::TFile* fp) const; + // Reads a vector of classes from the given file. Assumes the existence of + // bool T::Deserialize(bool swap, FILE* fp) that returns false in case of + // error. Also needs T::T() and T::T(constT&), as init_to_size is used in + // this function. Returns false in case of error. + // If swap is true, assumes a big/little-endian swap is needed. + bool DeSerializeClasses(bool swap, FILE* fp); + bool DeSerializeClasses(tesseract::TFile* fp); + // Calls SkipDeSerialize on the elements of the vector. + static bool SkipDeSerializeClasses(tesseract::TFile* fp); + + // Allocates a new array of double the current_size, copies over the + // information from data to the new location, deletes data and returns + // the pointed to the new larger array. + // This function uses memcpy to copy the data, instead of invoking + // operator=() for each element like double_the_size() does. + static T* double_the_size_memcpy(int current_size, T* data) { + T* data_new = new T[current_size * 2]; + memcpy(data_new, data, sizeof(T) * current_size); + delete[] data; + return data_new; + } + + // Reverses the elements of the vector. + void reverse() { + for (int i = 0; i < size_used_ / 2; ++i) { + Swap(&data_[i], &data_[size_used_ - 1 - i]); + } + } + + // Sorts the members of this vector using the less than comparator (cmp_lt), + // which compares the values. Useful for GenericVectors to primitive types. + // Will not work so great for pointers (unless you just want to sort some + // pointers). You need to provide a specialization to sort_cmp to use + // your type. + void sort(); + + // Sort the array into the order defined by the qsort function comparator. + // The comparator function is as defined by qsort, ie. it receives pointers + // to two Ts and returns negative if the first element is to appear earlier + // in the result and positive if it is to appear later, with 0 for equal. + void sort(int (*comparator)(const void*, const void*)) { + qsort(data_, size_used_, sizeof(*data_), comparator); + } + + // Searches the array (assuming sorted in ascending order, using sort()) for + // an element equal to target and returns true if it is present. + // Use binary_search to get the index of target, or its nearest candidate. + bool bool_binary_search(const T& target) const { + int index = binary_search(target); + if (index >= size_used_) { + return false; + } + return data_[index] == target; + } + // Searches the array (assuming sorted in ascending order, using sort()) for + // an element equal to target and returns the index of the best candidate. + // The return value is conceptually the largest index i such that + // data_[i] <= target or 0 if target < the whole vector. + // NOTE that this function uses operator> so really the return value is + // the largest index i such that data_[i] > target is false. + int binary_search(const T& target) const { + int bottom = 0; + int top = size_used_; + while (top - bottom > 1) { + int middle = (bottom + top) / 2; + if (data_[middle] > target) { + top = middle; + } else { + bottom = middle; + } + } + return bottom; + } + + // Compact the vector by deleting elements using operator!= on basic types. + // The vector must be sorted. + void compact_sorted() { + if (size_used_ == 0) { + return; + } + + // First element is in no matter what, hence the i = 1. + int last_write = 0; + for (int i = 1; i < size_used_; ++i) { + // Finds next unique item and writes it. + if (data_[last_write] != data_[i]) { + data_[++last_write] = data_[i]; + } + } + // last_write is the index of a valid data cell, so add 1. + size_used_ = last_write + 1; + } + + // Compact the vector by deleting elements for which delete_cb returns + // true. delete_cb is a permanent callback and will be deleted. + void compact(TessResultCallback1* delete_cb) { + int new_size = 0; + int old_index = 0; + // Until the callback returns true, the elements stay the same. + while (old_index < size_used_ && !delete_cb->Run(old_index++)) { + ++new_size; + } + // Now just copy anything else that gets false from delete_cb. + for (; old_index < size_used_; ++old_index) { + if (!delete_cb->Run(old_index)) { + data_[new_size++] = data_[old_index]; + } + } + size_used_ = new_size; + delete delete_cb; + } + + T dot_product(const GenericVector& other) const { + T result = static_cast(0); + for (int i = std::min(size_used_, other.size_used_) - 1; i >= 0; --i) { + result += data_[i] * other.data_[i]; + } + return result; + } + + // Returns the index of what would be the target_index_th item in the array + // if the members were sorted, without actually sorting. Members are + // shuffled around, but it takes O(n) time. + // NOTE: uses operator< and operator== on the members. + int choose_nth_item(int target_index) { + // Make sure target_index is legal. + if (target_index < 0) { + target_index = 0; // ensure legal + } else if (target_index >= size_used_) { + target_index = size_used_ - 1; + } + unsigned int seed = 1; + return choose_nth_item(target_index, 0, size_used_, &seed); + } + + // Swaps the elements with the given indices. + void swap(int index1, int index2) { + if (index1 != index2) { + T tmp = data_[index1]; + data_[index1] = data_[index2]; + data_[index2] = tmp; + } + } + // Returns true if all elements of *this are within the given range. + // Only uses operator< + bool WithinBounds(const T& rangemin, const T& rangemax) const { + for (int i = 0; i < size_used_; ++i) { + if (data_[i] < rangemin || rangemax < data_[i]) { + return false; + } + } + return true; + } + + protected: + // Internal recursive version of choose_nth_item. + int choose_nth_item(int target_index, int start, int end, unsigned int* seed); + + // Init the object, allocating size memory. + void init(int size); + + // We are assuming that the object generally placed in the + // vector are small enough that for efficiency it makes sense + // to start with a larger initial size. + static const int kDefaultVectorSize = 4; + int32_t size_used_{}; + int32_t size_reserved_{}; + T* data_; + TessCallback1* clear_cb_; + // Mutable because Run method is not const + mutable TessResultCallback2* compare_cb_; +}; + +namespace tesseract { + +// The default FileReader loads the whole file into the vector of char, +// returning false on error. +inline bool LoadDataFromFile(const char* filename, GenericVector* data) { + bool result = false; + FILE* fp = fopen(filename, "rb"); + if (fp != nullptr) { + fseek(fp, 0, SEEK_END); + auto size = std::ftell(fp); + fseek(fp, 0, SEEK_SET); + // Trying to open a directory on Linux sets size to LONG_MAX. Catch it here. + if (size > 0 && size < LONG_MAX) { + // reserve an extra byte in case caller wants to append a '\0' character + data->reserve(size + 1); + data->resize_no_init(size); + result = static_cast(fread(&(*data)[0], 1, size, fp)) == size; + } + fclose(fp); + } + return result; +} + +inline bool LoadDataFromFile(const STRING& filename, + GenericVector* data) { + return LoadDataFromFile(filename.string(), data); +} + +// The default FileWriter writes the vector of char to the filename file, +// returning false on error. +inline bool SaveDataToFile(const GenericVector& data, + const STRING& filename) { + FILE* fp = fopen(filename.string(), "wb"); + if (fp == nullptr) { + return false; + } + bool result = + static_cast(fwrite(&data[0], 1, data.size(), fp)) == data.size(); + fclose(fp); + return result; +} + +template +bool cmp_eq(T const& t1, T const& t2) { + return t1 == t2; +} + +// Used by sort() +// return < 0 if t1 < t2 +// return 0 if t1 == t2 +// return > 0 if t1 > t2 +template +int sort_cmp(const void* t1, const void* t2) { + const T* a = static_cast(t1); + const T* b = static_cast(t2); + if (*a < *b) { + return -1; + } + if (*b < *a) { + return 1; + } + return 0; +} + +// Used by PointerVector::sort() +// return < 0 if t1 < t2 +// return 0 if t1 == t2 +// return > 0 if t1 > t2 +template +int sort_ptr_cmp(const void* t1, const void* t2) { + const T* a = *static_cast(t1); + const T* b = *static_cast(t2); + if (*a < *b) { + return -1; + } + if (*b < *a) { + return 1; + } + return 0; +} + +// Subclass for a vector of pointers. Use in preference to GenericVector +// as it provides automatic deletion and correct serialization, with the +// corollary that all copy operations are deep copies of the pointed-to objects. +template +class PointerVector : public GenericVector { + public: + PointerVector() : GenericVector() {} + explicit PointerVector(int size) : GenericVector(size) {} + ~PointerVector() { + // Clear must be called here, even though it is called again by the base, + // as the base will call the wrong clear. + clear(); + } + // Copy must be deep, as the pointers will be automatically deleted on + // destruction. + PointerVector(const PointerVector& other) : GenericVector(other) { + this->init(other.size()); + this->operator+=(other); + } + PointerVector& operator+=(const PointerVector& other) { + this->reserve(this->size_used_ + other.size_used_); + for (int i = 0; i < other.size(); ++i) { + this->push_back(new T(*other.data_[i])); + } + return *this; + } + + PointerVector& operator=(const PointerVector& other) { + if (&other != this) { + this->truncate(0); + this->operator+=(other); + } + return *this; + } + + // Removes an element at the given index and + // shifts the remaining elements to the left. + void remove(int index) { + delete GenericVector::data_[index]; + GenericVector::remove(index); + } + + // Truncates the array to the given size by removing the end. + // If the current size is less, the array is not expanded. + void truncate(int size) { + for (int i = size; i < GenericVector::size_used_; ++i) { + delete GenericVector::data_[i]; + } + GenericVector::truncate(size); + } + + // Compact the vector by deleting elements for which delete_cb returns + // true. delete_cb is a permanent callback and will be deleted. + void compact(TessResultCallback1* delete_cb) { + int new_size = 0; + int old_index = 0; + // Until the callback returns true, the elements stay the same. + while (old_index < GenericVector::size_used_ && + !delete_cb->Run(GenericVector::data_[old_index++])) { + ++new_size; + } + // Now just copy anything else that gets false from delete_cb. + for (; old_index < GenericVector::size_used_; ++old_index) { + if (!delete_cb->Run(GenericVector::data_[old_index])) { + GenericVector::data_[new_size++] = + GenericVector::data_[old_index]; + } else { + delete GenericVector::data_[old_index]; + } + } + GenericVector::size_used_ = new_size; + delete delete_cb; + } + + // Clear the array, calling the clear callback function if any. + // All the owned callbacks are also deleted. + // If you don't want the callbacks to be deleted, before calling clear, set + // the callback to nullptr. + void clear() { + GenericVector::delete_data_pointers(); + GenericVector::clear(); + } + + // Writes a vector of (pointers to) classes to the given file. Assumes the + // existence of bool T::Serialize(FILE*) const that returns false in case of + // error. There is no Serialize for simple types, as you would have a + // normal GenericVector of those. + // Returns false in case of error. + bool Serialize(FILE* fp) const { + int32_t used = GenericVector::size_used_; + if (fwrite(&used, sizeof(used), 1, fp) != 1) { + return false; + } + for (int i = 0; i < used; ++i) { + int8_t non_null = GenericVector::data_[i] != nullptr; + if (fwrite(&non_null, sizeof(non_null), 1, fp) != 1) { + return false; + } + if (non_null && !GenericVector::data_[i]->Serialize(fp)) { + return false; + } + } + return true; + } + bool Serialize(TFile* fp) const { + int32_t used = GenericVector::size_used_; + if (fp->FWrite(&used, sizeof(used), 1) != 1) { + return false; + } + for (int i = 0; i < used; ++i) { + int8_t non_null = GenericVector::data_[i] != nullptr; + if (fp->FWrite(&non_null, sizeof(non_null), 1) != 1) { + return false; + } + if (non_null && !GenericVector::data_[i]->Serialize(fp)) { + return false; + } + } + return true; + } + // Reads a vector of (pointers to) classes to the given file. Assumes the + // existence of bool T::DeSerialize(bool, Tfile*) const that returns false in + // case of error. There is no Serialize for simple types, as you would have a + // normal GenericVector of those. + // If swap is true, assumes a big/little-endian swap is needed. + // Also needs T::T(), as new T is used in this function. + // Returns false in case of error. + bool DeSerialize(bool swap, FILE* fp) { + uint32_t reserved; + if (fread(&reserved, sizeof(reserved), 1, fp) != 1) { + return false; + } + if (swap) { + Reverse32(&reserved); + } + // Arbitrarily limit the number of elements to protect against bad data. + assert(reserved <= UINT16_MAX); + if (reserved > UINT16_MAX) { + return false; + } + GenericVector::reserve(reserved); + truncate(0); + for (uint32_t i = 0; i < reserved; ++i) { + int8_t non_null; + if (fread(&non_null, sizeof(non_null), 1, fp) != 1) { + return false; + } + T* item = nullptr; + if (non_null != 0) { + item = new T; + if (!item->DeSerialize(swap, fp)) { + delete item; + return false; + } + this->push_back(item); + } else { + // Null elements should keep their place in the vector. + this->push_back(nullptr); + } + } + return true; + } + bool DeSerialize(TFile* fp) { + int32_t reserved; + if (!DeSerializeSize(fp, &reserved)) { + return false; + } + GenericVector::reserve(reserved); + truncate(0); + for (int i = 0; i < reserved; ++i) { + if (!DeSerializeElement(fp)) { + return false; + } + } + return true; + } + // Enables deserialization of a selection of elements. Note that in order to + // retain the integrity of the stream, the caller must call some combination + // of DeSerializeElement and DeSerializeSkip of the exact number returned in + // *size, assuming a true return. + static bool DeSerializeSize(TFile* fp, int32_t* size) { + return fp->FReadEndian(size, sizeof(*size), 1) == 1; + } + // Reads and appends to the vector the next element of the serialization. + bool DeSerializeElement(TFile* fp) { + int8_t non_null; + if (fp->FRead(&non_null, sizeof(non_null), 1) != 1) { + return false; + } + T* item = nullptr; + if (non_null != 0) { + item = new T; + if (!item->DeSerialize(fp)) { + delete item; + return false; + } + this->push_back(item); + } else { + // Null elements should keep their place in the vector. + this->push_back(nullptr); + } + return true; + } + // Skips the next element of the serialization. + static bool DeSerializeSkip(TFile* fp) { + int8_t non_null; + if (fp->FRead(&non_null, sizeof(non_null), 1) != 1) { + return false; + } + if (non_null != 0) { + if (!T::SkipDeSerialize(fp)) { + return false; + } + } + return true; + } + + // Sorts the items pointed to by the members of this vector using + // t::operator<(). + void sort() { + this->GenericVector::sort(&sort_ptr_cmp); + } +}; + +} // namespace tesseract + +// A useful vector that uses operator== to do comparisons. +template +class GenericVectorEqEq : public GenericVector { + public: + GenericVectorEqEq() { + GenericVector::set_compare_callback( + NewPermanentTessCallback(tesseract::cmp_eq)); + } + explicit GenericVectorEqEq(int size) : GenericVector(size) { + GenericVector::set_compare_callback( + NewPermanentTessCallback(tesseract::cmp_eq)); + } +}; + +template +void GenericVector::init(int size) { + size_used_ = 0; + if (size <= 0) { + data_ = nullptr; + size_reserved_ = 0; + } else { + if (size < kDefaultVectorSize) { + size = kDefaultVectorSize; + } + data_ = new T[size]; + size_reserved_ = size; + } + clear_cb_ = nullptr; + compare_cb_ = nullptr; +} + +template +GenericVector::~GenericVector() { + clear(); +} + +// Reserve some memory. If the internal array contains elements, they are +// copied. +template +void GenericVector::reserve(int size) { + if (size_reserved_ >= size || size <= 0) { + return; + } + if (size < kDefaultVectorSize) { + size = kDefaultVectorSize; + } + T* new_array = new T[size]; + for (int i = 0; i < size_used_; ++i) { + new_array[i] = data_[i]; + } + delete[] data_; + data_ = new_array; + size_reserved_ = size; +} + +template +void GenericVector::double_the_size() { + if (size_reserved_ == 0) { + reserve(kDefaultVectorSize); + } else { + reserve(2 * size_reserved_); + } +} + +// Resizes to size and sets all values to t. +template +void GenericVector::init_to_size(int size, const T& t) { + reserve(size); + size_used_ = size; + for (int i = 0; i < size; ++i) { + data_[i] = t; + } +} + +// Return the object from an index. +template +T& GenericVector::get(int index) const { + assert(index >= 0 && index < size_used_); + return data_[index]; +} + +template +T& GenericVector::operator[](int index) const { + assert(index >= 0 && index < size_used_); + return data_[index]; +} + +template +T& GenericVector::back() const { + assert(size_used_ > 0); + return data_[size_used_ - 1]; +} +// Returns the last object and removes it. +template +T GenericVector::pop_back() { + assert(size_used_ > 0); + return data_[--size_used_]; +} + +// Return the object from an index. +template +void GenericVector::set(const T& t, int index) { + assert(index >= 0 && index < size_used_); + data_[index] = t; +} + +// Shifts the rest of the elements to the right to make +// space for the new elements and inserts the given element +// at the specified index. +template +void GenericVector::insert(const T& t, int index) { + assert(index >= 0 && index <= size_used_); + if (size_reserved_ == size_used_) { + double_the_size(); + } + for (int i = size_used_; i > index; --i) { + data_[i] = data_[i - 1]; + } + data_[index] = t; + size_used_++; +} + +// Removes an element at the given index and +// shifts the remaining elements to the left. +template +void GenericVector::remove(int index) { + assert(index >= 0 && index < size_used_); + for (int i = index; i < size_used_ - 1; ++i) { + data_[i] = data_[i + 1]; + } + size_used_--; +} + +// Return true if the index is valindex +template +T GenericVector::contains_index(int index) const { + return index >= 0 && index < size_used_; +} + +// Return the index of the T object. +template +int GenericVector::get_index(const T& object) const { + for (int i = 0; i < size_used_; ++i) { + assert(compare_cb_ != nullptr); + if (compare_cb_->Run(object, data_[i])) { + return i; + } + } + return -1; +} + +// Return true if T is in the array +template +bool GenericVector::contains(const T& object) const { + return get_index(object) != -1; +} + +// Add an element in the array +template +int GenericVector::push_back(T object) { + int index = 0; + if (size_used_ == size_reserved_) { + double_the_size(); + } + index = size_used_++; + data_[index] = object; + return index; +} + +template +int GenericVector::push_back_new(const T& object) { + int index = get_index(object); + if (index >= 0) { + return index; + } + return push_back(object); +} + +// Add an element in the array (front) +template +int GenericVector::push_front(const T& object) { + if (size_used_ == size_reserved_) { + double_the_size(); + } + for (int i = size_used_; i > 0; --i) { + data_[i] = data_[i - 1]; + } + data_[0] = object; + ++size_used_; + return 0; +} + +template +void GenericVector::operator+=(const T& t) { + push_back(t); +} + +template +GenericVector& GenericVector::operator+=(const GenericVector& other) { + this->reserve(size_used_ + other.size_used_); + for (int i = 0; i < other.size(); ++i) { + this->operator+=(other.data_[i]); + } + return *this; +} + +template +GenericVector& GenericVector::operator=(const GenericVector& other) { + if (&other != this) { + this->truncate(0); + this->operator+=(other); + } + return *this; +} + +// Clear the array, calling the callback function if any. +template +void GenericVector::clear() { + if (size_reserved_ > 0 && clear_cb_ != nullptr) { + for (int i = 0; i < size_used_; ++i) { + clear_cb_->Run(data_[i]); + } + } + delete[] data_; + data_ = nullptr; + size_used_ = 0; + size_reserved_ = 0; + delete clear_cb_; + clear_cb_ = nullptr; + delete compare_cb_; + compare_cb_ = nullptr; +} + +template +void GenericVector::delete_data_pointers() { + for (int i = 0; i < size_used_; ++i) { + delete data_[i]; + } +} + +template +bool GenericVector::write( + FILE* f, TessResultCallback2* cb) const { + if (fwrite(&size_reserved_, sizeof(size_reserved_), 1, f) != 1) { + return false; + } + if (fwrite(&size_used_, sizeof(size_used_), 1, f) != 1) { + return false; + } + if (cb != nullptr) { + for (int i = 0; i < size_used_; ++i) { + if (!cb->Run(f, data_[i])) { + delete cb; + return false; + } + } + delete cb; + } else { + if (fwrite(data_, sizeof(T), size_used_, f) != unsigned_size()) { + return false; + } + } + return true; +} + +template +bool GenericVector::read( + tesseract::TFile* f, TessResultCallback2* cb) { + int32_t reserved; + if (f->FReadEndian(&reserved, sizeof(reserved), 1) != 1) { + return false; + } + reserve(reserved); + if (f->FReadEndian(&size_used_, sizeof(size_used_), 1) != 1) { + return false; + } + if (cb != nullptr) { + for (int i = 0; i < size_used_; ++i) { + if (!cb->Run(f, data_ + i)) { + delete cb; + return false; + } + } + delete cb; + } else { + if (f->FReadEndian(data_, sizeof(T), size_used_) != size_used_) { + return false; + } + } + return true; +} + +// Writes a vector of simple types to the given file. Assumes that bitwise +// read/write of T will work. Returns false in case of error. +template +bool GenericVector::Serialize(FILE* fp) const { + if (fwrite(&size_used_, sizeof(size_used_), 1, fp) != 1) { + return false; + } + if (fwrite(data_, sizeof(*data_), size_used_, fp) != unsigned_size()) { + return false; + } + return true; +} +template +bool GenericVector::Serialize(tesseract::TFile* fp) const { + if (fp->FWrite(&size_used_, sizeof(size_used_), 1) != 1) { + return false; + } + if (fp->FWrite(data_, sizeof(*data_), size_used_) != size_used_) { + return false; + } + return true; +} + +// Reads a vector of simple types from the given file. Assumes that bitwise +// read/write will work with ReverseN according to sizeof(T). +// Returns false in case of error. +// If swap is true, assumes a big/little-endian swap is needed. +template +bool GenericVector::DeSerialize(bool swap, FILE* fp) { + uint32_t reserved; + if (fread(&reserved, sizeof(reserved), 1, fp) != 1) { + return false; + } + if (swap) { + Reverse32(&reserved); + } + // Arbitrarily limit the number of elements to protect against bad data. + assert(reserved <= UINT16_MAX); + if (reserved > UINT16_MAX) { + return false; + } + reserve(reserved); + size_used_ = reserved; + if (fread(data_, sizeof(T), size_used_, fp) != unsigned_size()) { + return false; + } + if (swap) { + for (int i = 0; i < size_used_; ++i) { + ReverseN(&data_[i], sizeof(data_[i])); + } + } + return true; +} +template +bool GenericVector::DeSerialize(tesseract::TFile* fp) { + uint32_t reserved; + if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) { + return false; + } + // Arbitrarily limit the number of elements to protect against bad data. + const uint32_t limit = 50000000; + assert(reserved <= limit); + if (reserved > limit) { + return false; + } + reserve(reserved); + size_used_ = reserved; + return fp->FReadEndian(data_, sizeof(T), size_used_) == size_used_; +} +template +bool GenericVector::SkipDeSerialize(tesseract::TFile* fp) { + uint32_t reserved; + if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) { + return false; + } + return (uint32_t)fp->FRead(nullptr, sizeof(T), reserved) == reserved; +} + +// Writes a vector of classes to the given file. Assumes the existence of +// bool T::Serialize(FILE* fp) const that returns false in case of error. +// Returns false in case of error. +template +bool GenericVector::SerializeClasses(FILE* fp) const { + if (fwrite(&size_used_, sizeof(size_used_), 1, fp) != 1) { + return false; + } + for (int i = 0; i < size_used_; ++i) { + if (!data_[i].Serialize(fp)) { + return false; + } + } + return true; +} +template +bool GenericVector::SerializeClasses(tesseract::TFile* fp) const { + if (fp->FWrite(&size_used_, sizeof(size_used_), 1) != 1) { + return false; + } + for (int i = 0; i < size_used_; ++i) { + if (!data_[i].Serialize(fp)) { + return false; + } + } + return true; +} + +// Reads a vector of classes from the given file. Assumes the existence of +// bool T::Deserialize(bool swap, FILE* fp) that returns false in case of +// error. Also needs T::T() and T::T(constT&), as init_to_size is used in +// this function. Returns false in case of error. +// If swap is true, assumes a big/little-endian swap is needed. +template +bool GenericVector::DeSerializeClasses(bool swap, FILE* fp) { + int32_t reserved; + if (fread(&reserved, sizeof(reserved), 1, fp) != 1) { + return false; + } + if (swap) { + Reverse32(&reserved); + } + T empty; + init_to_size(reserved, empty); + for (int i = 0; i < reserved; ++i) { + if (!data_[i].DeSerialize(swap, fp)) { + return false; + } + } + return true; +} +template +bool GenericVector::DeSerializeClasses(tesseract::TFile* fp) { + int32_t reserved; + if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) { + return false; + } + T empty; + init_to_size(reserved, empty); + for (int i = 0; i < reserved; ++i) { + if (!data_[i].DeSerialize(fp)) { + return false; + } + } + return true; +} +template +bool GenericVector::SkipDeSerializeClasses(tesseract::TFile* fp) { + int32_t reserved; + if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) { + return false; + } + for (int i = 0; i < reserved; ++i) { + if (!T::SkipDeSerialize(fp)) { + return false; + } + } + return true; +} + +// This method clear the current object, then, does a shallow copy of +// its argument, and finally invalidates its argument. +template +void GenericVector::move(GenericVector* from) { + this->clear(); + this->data_ = from->data_; + this->size_reserved_ = from->size_reserved_; + this->size_used_ = from->size_used_; + this->compare_cb_ = from->compare_cb_; + this->clear_cb_ = from->clear_cb_; + from->data_ = nullptr; + from->clear_cb_ = nullptr; + from->compare_cb_ = nullptr; + from->size_used_ = 0; + from->size_reserved_ = 0; +} + +template +void GenericVector::sort() { + sort(&tesseract::sort_cmp); +} + +// Internal recursive version of choose_nth_item. +// The algorithm used comes from "Algorithms" by Sedgewick: +// http://books.google.com/books/about/Algorithms.html?id=idUdqdDXqnAC +// The principle is to choose a random pivot, and move everything less than +// the pivot to its left, and everything greater than the pivot to the end +// of the array, then recurse on the part that contains the desired index, or +// just return the answer if it is in the equal section in the middle. +// The random pivot guarantees average linear time for the same reason that +// n times vector::push_back takes linear time on average. +// target_index, start and and end are all indices into the full array. +// Seed is a seed for rand_r for thread safety purposes. Its value is +// unimportant as the random numbers do not affect the result except +// between equal answers. +template +int GenericVector::choose_nth_item(int target_index, int start, int end, + unsigned int* seed) { + // Number of elements to process. + int num_elements = end - start; + // Trivial cases. + if (num_elements <= 1) { + return start; + } + if (num_elements == 2) { + if (data_[start] < data_[start + 1]) { + return target_index > start ? start + 1 : start; + } + return target_index > start ? start : start + 1; + } +// Place the pivot at start. +#ifndef rand_r // _MSC_VER, ANDROID + srand(*seed); +# define rand_r(seed) rand() +#endif // _MSC_VER + int pivot = rand_r(seed) % num_elements + start; + swap(pivot, start); + // The invariant condition here is that items [start, next_lesser) are less + // than the pivot (which is at index next_lesser) and items + // [prev_greater, end) are greater than the pivot, with items + // [next_lesser, prev_greater) being equal to the pivot. + int next_lesser = start; + int prev_greater = end; + for (int next_sample = start + 1; next_sample < prev_greater;) { + if (data_[next_sample] < data_[next_lesser]) { + swap(next_lesser++, next_sample++); + } else if (data_[next_sample] == data_[next_lesser]) { + ++next_sample; + } else { + swap(--prev_greater, next_sample); + } + } + // Now the invariant is set up, we recurse on just the section that contains + // the desired index. + if (target_index < next_lesser) { + return choose_nth_item(target_index, start, next_lesser, seed); + } + if (target_index < prev_greater) { + return next_lesser; // In equal bracket. + } + return choose_nth_item(target_index, prev_greater, end, seed); +} + +#endif // TESSERACT_CCUTIL_GENERICVECTOR_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/helpers.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/helpers.h new file mode 100644 index 00000000..9ccde82d --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/helpers.h @@ -0,0 +1,211 @@ +/* -*-C-*- + ******************************************************************************** + * + * File: helpers.h + * Description: General utility functions + * Author: Daria Antonova + * + * (c) Copyright 2009, Google Inc. + ** Licensed under the Apache License, Version 2.0 (the "License"); + ** you may not use this file except in compliance with the License. + ** You may obtain a copy of the License at + ** http://www.apache.org/licenses/LICENSE-2.0 + ** Unless required by applicable law or agreed to in writing, software + ** distributed under the License is distributed on an "AS IS" BASIS, + ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + ** See the License for the specific language governing permissions and + ** limitations under the License. + * + ********************************************************************************/ + +#ifndef TESSERACT_CCUTIL_HELPERS_H_ +#define TESSERACT_CCUTIL_HELPERS_H_ + +#include +#include +#include +#include +#include + +// TODO(rays) Put the rest of the helpers in the namespace. +namespace tesseract { + +// A simple linear congruential random number generator, using Knuth's +// constants from: +// http://en.wikipedia.org/wiki/Linear_congruential_generator. +class TRand { + public: + TRand() = default; + // Sets the seed to the given value. + void set_seed(uint64_t seed) { + seed_ = seed; + } + // Sets the seed using a hash of a string. + void set_seed(const std::string& str) { + std::hash hasher; + set_seed(static_cast(hasher(str))); + } + + // Returns an integer in the range 0 to INT32_MAX. + int32_t IntRand() { + Iterate(); + return seed_ >> 33; + } + // Returns a floating point value in the range [-range, range]. + double SignedRand(double range) { + return range * 2.0 * IntRand() / INT32_MAX - range; + } + // Returns a floating point value in the range [0, range]. + double UnsignedRand(double range) { + return range * IntRand() / INT32_MAX; + } + + private: + // Steps the generator to the next value. + void Iterate() { + seed_ *= 6364136223846793005ULL; + seed_ += 1442695040888963407ULL; + } + + // The current value of the seed. + uint64_t seed_{1}; +}; + +} // namespace tesseract + +// Remove newline (if any) at the end of the string. +inline void chomp_string(char* str) { + int last_index = static_cast(strlen(str)) - 1; + while (last_index >= 0 && + (str[last_index] == '\n' || str[last_index] == '\r')) { + str[last_index--] = '\0'; + } +} + +// Advance the current pointer of the file if it points to a newline character. +inline void SkipNewline(FILE* file) { + if (fgetc(file) != '\n') { + fseek(file, -1, SEEK_CUR); + } +} + +// Swaps the two args pointed to by the pointers. +// Operator= and copy constructor must work on T. +template +inline void Swap(T* p1, T* p2) { + T tmp(*p2); + *p2 = *p1; + *p1 = tmp; +} + +// return the smallest multiple of block_size greater than or equal to n. +inline int RoundUp(int n, int block_size) { + return block_size * ((n + block_size - 1) / block_size); +} + +// Clip a numeric value to the interval [lower_bound, upper_bound]. +template +inline T ClipToRange(const T& x, const T& lower_bound, const T& upper_bound) { + if (x < lower_bound) { + return lower_bound; + } + if (x > upper_bound) { + return upper_bound; + } + return x; +} + +// Extend the range [lower_bound, upper_bound] to include x. +template +inline void UpdateRange(const T1& x, T2* lower_bound, T2* upper_bound) { + if (x < *lower_bound) { + *lower_bound = x; + } + if (x > *upper_bound) { + *upper_bound = x; + } +} + +// Decrease lower_bound to be <= x_lo AND increase upper_bound to be >= x_hi. +template +inline void UpdateRange(const T1& x_lo, const T1& x_hi, T2* lower_bound, + T2* upper_bound) { + if (x_lo < *lower_bound) { + *lower_bound = x_lo; + } + if (x_hi > *upper_bound) { + *upper_bound = x_hi; + } +} + +// Intersect the range [*lower2, *upper2] with the range [lower1, upper1], +// putting the result back in [*lower2, *upper2]. +// If non-intersecting ranges are given, we end up with *lower2 > *upper2. +template +inline void IntersectRange(const T& lower1, const T& upper1, T* lower2, + T* upper2) { + if (lower1 > *lower2) { + *lower2 = lower1; + } + if (upper1 < *upper2) { + *upper2 = upper1; + } +} + +// Proper modulo arithmetic operator. Returns a mod b that works for -ve a. +// For any integer a and positive b, returns r : 0<=r= 0 ? (a + b / 2) / b : (a - b / 2) / b; +} + +// Return a double cast to int with rounding. +inline int IntCastRounded(double x) { + return x >= 0.0 ? static_cast(x + 0.5) : -static_cast(-x + 0.5); +} + +// Return a float cast to int with rounding. +inline int IntCastRounded(float x) { + return x >= 0.0F ? static_cast(x + 0.5F) : -static_cast(-x + 0.5F); +} + +// Reverse the order of bytes in a n byte quantity for big/little-endian switch. +inline void ReverseN(void* ptr, int num_bytes) { + assert(num_bytes == 1 || num_bytes == 2 || num_bytes == 4 || num_bytes == 8); + char* cptr = static_cast(ptr); + int halfsize = num_bytes / 2; + for (int i = 0; i < halfsize; ++i) { + char tmp = cptr[i]; + cptr[i] = cptr[num_bytes - 1 - i]; + cptr[num_bytes - 1 - i] = tmp; + } +} + +// Reverse the order of bytes in a 16 bit quantity for big/little-endian switch. +inline void Reverse16(void* ptr) { + ReverseN(ptr, 2); +} + +// Reverse the order of bytes in a 32 bit quantity for big/little-endian switch. +inline void Reverse32(void* ptr) { + ReverseN(ptr, 4); +} + +// Reverse the order of bytes in a 64 bit quantity for big/little-endian switch. +inline void Reverse64(void* ptr) { + ReverseN(ptr, 8); +} + +#endif // TESSERACT_CCUTIL_HELPERS_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ltrresultiterator.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ltrresultiterator.h new file mode 100644 index 00000000..3764b166 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ltrresultiterator.h @@ -0,0 +1,228 @@ +/////////////////////////////////////////////////////////////////////// +// File: ltrresultiterator.h +// Description: Iterator for tesseract results in strict left-to-right +// order that avoids using tesseract internal data structures. +// Author: Ray Smith +// Created: Fri Feb 26 11:01:06 PST 2010 +// +// (C) Copyright 2010, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H_ +#define TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H_ + +#include "pageiterator.h" // for PageIterator +#include "platform.h" // for TESS_API +#include "publictypes.h" // for PageIteratorLevel +#include "unichar.h" // for StrongScriptDirection + +class BLOB_CHOICE_IT; +class PAGE_RES; +class WERD_RES; + +namespace tesseract { + +class Tesseract; + +// Class to iterate over tesseract results, providing access to all levels +// of the page hierarchy, without including any tesseract headers or having +// to handle any tesseract structures. +// WARNING! This class points to data held within the TessBaseAPI class, and +// therefore can only be used while the TessBaseAPI class still exists and +// has not been subjected to a call of Init, SetImage, Recognize, Clear, End +// DetectOS, or anything else that changes the internal PAGE_RES. +// See apitypes.h for the definition of PageIteratorLevel. +// See also base class PageIterator, which contains the bulk of the interface. +// LTRResultIterator adds text-specific methods for access to OCR output. + +class TESS_API LTRResultIterator : public PageIterator { + friend class ChoiceIterator; + public: + // page_res and tesseract come directly from the BaseAPI. + // The rectangle parameters are copied indirectly from the Thresholder, + // via the BaseAPI. They represent the coordinates of some rectangle in an + // original image (in top-left-origin coordinates) and therefore the top-left + // needs to be added to any output boxes in order to specify coordinates + // in the original image. See TessBaseAPI::SetRectangle. + // The scale and scaled_yres are in case the Thresholder scaled the image + // rectangle prior to thresholding. Any coordinates in tesseract's image + // must be divided by scale before adding (rect_left, rect_top). + // The scaled_yres indicates the effective resolution of the binary image + // that tesseract has been given by the Thresholder. + // After the constructor, Begin has already been called. + LTRResultIterator(PAGE_RES* page_res, Tesseract* tesseract, + int scale, int scaled_yres, + int rect_left, int rect_top, + int rect_width, int rect_height); + + ~LTRResultIterator() override; + + // LTRResultIterators may be copied! This makes it possible to iterate over + // all the objects at a lower level, while maintaining an iterator to + // objects at a higher level. These constructors DO NOT CALL Begin, so + // iterations will continue from the location of src. + // TODO: For now the copy constructor and operator= only need the base class + // versions, but if new data members are added, don't forget to add them! + + // ============= Moving around within the page ============. + + // See PageIterator. + + // ============= Accessing data ==============. + + // Returns the null terminated UTF-8 encoded text string for the current + // object at the given level. Use delete [] to free after use. + char* GetUTF8Text(PageIteratorLevel level) const; + + // Set the string inserted at the end of each text line. "\n" by default. + void SetLineSeparator(const char *new_line); + + // Set the string inserted at the end of each paragraph. "\n" by default. + void SetParagraphSeparator(const char *new_para); + + // Returns the mean confidence of the current object at the given level. + // The number should be interpreted as a percent probability. (0.0f-100.0f) + float Confidence(PageIteratorLevel level) const; + + // Returns the attributes of the current row. + void RowAttributes(float* row_height, float* descenders, + float* ascenders) const; + + // ============= Functions that refer to words only ============. + + // Returns the font attributes of the current word. If iterating at a higher + // level object than words, eg textlines, then this will return the + // attributes of the first word in that textline. + // The actual return value is a string representing a font name. It points + // to an internal table and SHOULD NOT BE DELETED. Lifespan is the same as + // the iterator itself, ie rendered invalid by various members of + // TessBaseAPI, including Init, SetImage, End or deleting the TessBaseAPI. + // Pointsize is returned in printers points (1/72 inch.) + const char* WordFontAttributes(bool* is_bold, + bool* is_italic, + bool* is_underlined, + bool* is_monospace, + bool* is_serif, + bool* is_smallcaps, + int* pointsize, + int* font_id) const; + + // Return the name of the language used to recognize this word. + // On error, nullptr. Do not delete this pointer. + const char* WordRecognitionLanguage() const; + + // Return the overall directionality of this word. + StrongScriptDirection WordDirection() const; + + // Returns true if the current word was found in a dictionary. + bool WordIsFromDictionary() const; + + // Returns the number of blanks before the current word. + int BlanksBeforeWord() const; + + // Returns true if the current word is numeric. + bool WordIsNumeric() const; + + // Returns true if the word contains blamer information. + bool HasBlamerInfo() const; + + // Returns the pointer to ParamsTrainingBundle stored in the BlamerBundle + // of the current word. + const void *GetParamsTrainingBundle() const; + + // Returns a pointer to the string with blamer information for this word. + // Assumes that the word's blamer_bundle is not nullptr. + const char *GetBlamerDebug() const; + + // Returns a pointer to the string with misadaption information for this word. + // Assumes that the word's blamer_bundle is not nullptr. + const char *GetBlamerMisadaptionDebug() const; + + // Returns true if a truth string was recorded for the current word. + bool HasTruthString() const; + + // Returns true if the given string is equivalent to the truth string for + // the current word. + bool EquivalentToTruth(const char *str) const; + + // Returns a null terminated UTF-8 encoded truth string for the current word. + // Use delete [] to free after use. + char* WordTruthUTF8Text() const; + + // Returns a null terminated UTF-8 encoded normalized OCR string for the + // current word. Use delete [] to free after use. + char* WordNormedUTF8Text() const; + + // Returns a pointer to serialized choice lattice. + // Fills lattice_size with the number of bytes in lattice data. + const char *WordLattice(int *lattice_size) const; + + // ============= Functions that refer to symbols only ============. + + // Returns true if the current symbol is a superscript. + // If iterating at a higher level object than symbols, eg words, then + // this will return the attributes of the first symbol in that word. + bool SymbolIsSuperscript() const; + // Returns true if the current symbol is a subscript. + // If iterating at a higher level object than symbols, eg words, then + // this will return the attributes of the first symbol in that word. + bool SymbolIsSubscript() const; + // Returns true if the current symbol is a dropcap. + // If iterating at a higher level object than symbols, eg words, then + // this will return the attributes of the first symbol in that word. + bool SymbolIsDropcap() const; + + protected: + const char *line_separator_; + const char *paragraph_separator_; +}; + +// Class to iterate over the classifier choices for a single RIL_SYMBOL. +class ChoiceIterator { + public: + // Construction is from a LTRResultIterator that points to the symbol of + // interest. The ChoiceIterator allows a one-shot iteration over the + // choices for this symbol and after that is is useless. + explicit ChoiceIterator(const LTRResultIterator& result_it); + ~ChoiceIterator(); + + // Moves to the next choice for the symbol and returns false if there + // are none left. + bool Next(); + + // ============= Accessing data ==============. + + // Returns the null terminated UTF-8 encoded text string for the current + // choice. + // NOTE: Unlike LTRResultIterator::GetUTF8Text, the return points to an + // internal structure and should NOT be delete[]ed to free after use. + const char* GetUTF8Text() const; + + // Returns the confidence of the current choice depending on the used language + // data. If only LSTM traineddata is used the value range is 0.0f - 1.0f. All + // choices for one symbol should roughly add up to 1.0f. + // If only traineddata of the legacy engine is used, the number should be + // interpreted as a percent probability. (0.0f-100.0f) In this case + // probabilities won't add up to 100. Each one stands on its own. + float Confidence() const; + + private: + // Pointer to the WERD_RES object owned by the API. + WERD_RES* word_res_; + // Iterator over the blob choices. + BLOB_CHOICE_IT* choice_it_; +}; + +} // namespace tesseract. + +#endif // TESSERACT_CCMAIN_LTR_RESULT_ITERATOR_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ocrclass.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ocrclass.h new file mode 100644 index 00000000..d39a6dd6 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/ocrclass.h @@ -0,0 +1,167 @@ +/********************************************************************** + * File: ocrclass.h + * Description: Class definitions and constants for the OCR API. + * Author: Hewlett-Packard Co + * + * (C) Copyright 1996, Hewlett-Packard Co. + ** Licensed under the Apache License, Version 2.0 (the "License"); + ** you may not use this file except in compliance with the License. + ** You may obtain a copy of the License at + ** http://www.apache.org/licenses/LICENSE-2.0 + ** Unless required by applicable law or agreed to in writing, software + ** distributed under the License is distributed on an "AS IS" BASIS, + ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + ** See the License for the specific language governing permissions and + ** limitations under the License. + * + **********************************************************************/ + +/********************************************************************** + * This file contains typedefs for all the structures used by + * the HP OCR interface. + * The structures are designed to allow them to be used with any + * structure alignment up to 8. + **********************************************************************/ + +#ifndef CCUTIL_OCRCLASS_H_ +#define CCUTIL_OCRCLASS_H_ + +#include +#include +#ifdef _WIN32 +#include // for timeval +#endif + +/********************************************************************** + * EANYCODE_CHAR + * Description of a single character. The character code is defined by + * the character set of the current font. + * Output text is sent as an array of these structures. + * Spaces and line endings in the output are represented in the + * structures of the surrounding characters. They are not directly + * represented as characters. + * The first character in a word has a positive value of blanks. + * Missing information should be set to the defaults in the comments. + * If word bounds are known, but not character bounds, then the top and + * bottom of each character should be those of the word. The left of the + * first and right of the last char in each word should be set. All other + * lefts and rights should be set to -1. + * If set, the values of right and bottom are left+width and top+height. + * Most of the members come directly from the parameters to ocr_append_char. + * The formatting member uses the enhancement parameter and combines the + * line direction stuff into the top 3 bits. + * The coding is 0=RL char, 1=LR char, 2=DR NL, 3=UL NL, 4=DR Para, + * 5=UL Para, 6=TB char, 7=BT char. API users do not need to know what + * the coding is, only that it is backwards compatible with the previous + * version. + **********************************************************************/ + +typedef struct { /*single character */ + // It should be noted that the format for char_code for version 2.0 and beyond + // is UTF8 which means that ASCII characters will come out as one structure + // but other characters will be returned in two or more instances of this + // structure with a single byte of the UTF8 code in each, but each will have + // the same bounding box. Programs which want to handle languagues with + // different characters sets will need to handle extended characters + // appropriately, but *all* code needs to be prepared to receive UTF8 coded + // characters for characters such as bullet and fancy quotes. + uint16_t char_code; /*character itself */ + int16_t left; /*of char (-1) */ + int16_t right; /*of char (-1) */ + int16_t top; /*of char (-1) */ + int16_t bottom; /*of char (-1) */ + int16_t font_index; /*what font (0) */ + uint8_t confidence; /*0=perfect, 100=reject (0/100) */ + uint8_t point_size; /*of char, 72=i inch, (10) */ + int8_t blanks; /*no of spaces before this char (1) */ + uint8_t formatting; /*char formatting (0) */ +} EANYCODE_CHAR; /*single character */ + +/********************************************************************** + * ETEXT_DESC + * Description of the output of the OCR engine. + * This structure is used as both a progress monitor and the final + * output header, since it needs to be a valid progress monitor while + * the OCR engine is storing its output to shared memory. + * During progress, all the buffer info is -1. + * Progress starts at 0 and increases to 100 during OCR. No other constraint. + * Additionally the progress callback contains the bounding box of the word that + * is currently being processed. + * Every progress callback, the OCR engine must set ocr_alive to 1. + * The HP side will set ocr_alive to 0. Repeated failure to reset + * to 1 indicates that the OCR engine is dead. + * If the cancel function is not null then it is called with the number of + * user words found. If it returns true then operation is cancelled. + **********************************************************************/ +class ETEXT_DESC; + +using CANCEL_FUNC = bool (*)(void*, int); +using PROGRESS_FUNC = bool (*)(int, int, int, int, int); +using PROGRESS_FUNC2 = bool (*)(ETEXT_DESC*, int, int, int, int); + +class ETEXT_DESC { // output header + public: + int16_t count{0}; /// chars in this buffer(0) + int16_t progress{0}; /// percent complete increasing (0-100) + /** Progress monitor covers word recognition and it does not cover layout + * analysis. + * See Ray comment in https://github.com/tesseract-ocr/tesseract/pull/27 */ + int8_t more_to_come{0}; /// true if not last + volatile int8_t ocr_alive{0}; /// ocr sets to 1, HP 0 + int8_t err_code{0}; /// for errcode use + CANCEL_FUNC cancel{nullptr}; /// returns true to cancel + PROGRESS_FUNC progress_callback{ + nullptr}; /// called whenever progress increases + PROGRESS_FUNC2 progress_callback2; /// monitor-aware progress callback + void* cancel_this{nullptr}; /// this or other data for cancel + struct timeval end_time; + /// Time to stop. Expected to be set only + /// by call to set_deadline_msecs(). + EANYCODE_CHAR text[1]{}; /// character data + + ETEXT_DESC() : progress_callback2(&default_progress_func) { + auto chrono_end_time = std::chrono::time_point(); + timePointToTimeval(chrono_end_time, &end_time); + } + + // Sets the end time to be deadline_msecs milliseconds from now. + void set_deadline_msecs(int32_t deadline_msecs) { + if (deadline_msecs > 0) { + auto chrono_end_time = std::chrono::steady_clock::now() + + std::chrono::milliseconds(deadline_msecs); + timePointToTimeval(chrono_end_time, &end_time); + } + } + + // Returns false if we've not passed the end_time, or have not set a deadline. + bool deadline_exceeded() const { + if (end_time.tv_sec == 0 && end_time.tv_usec == 0) + return false; + auto chrono_now = std::chrono::steady_clock::now(); + struct timeval now; + timePointToTimeval(chrono_now, &now); + return (now.tv_sec > end_time.tv_sec || + (now.tv_sec == end_time.tv_sec && now.tv_usec > end_time.tv_usec)); + } + + private: + static void timePointToTimeval( + std::chrono::steady_clock::time_point chrono_point, struct timeval* tv) { + auto millisecs = std::chrono::duration_cast( + chrono_point.time_since_epoch()); + tv->tv_sec = millisecs.count() / 1000; + tv->tv_usec = (millisecs.count() % 1000) * 1000; + } + + static bool default_progress_func(ETEXT_DESC* ths, int left, int right, + int top, int bottom) { + if (ths->progress_callback != nullptr) { + return (*(ths->progress_callback))(ths->progress, left, right, top, + bottom); + } + return true; + } +}; + +#endif // CCUTIL_OCRCLASS_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/osdetect.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/osdetect.h new file mode 100644 index 00000000..c0864c40 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/osdetect.h @@ -0,0 +1,140 @@ +/////////////////////////////////////////////////////////////////////// +// File: osdetect.h +// Description: Orientation and script detection. +// Author: Samuel Charron +// Ranjith Unnikrishnan +// +// (C) Copyright 2008, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCMAIN_OSDETECT_H_ +#define TESSERACT_CCMAIN_OSDETECT_H_ + +#include "platform.h" // for TESS_API + +class BLOBNBOX; +class BLOBNBOX_CLIST; +class BLOB_CHOICE_LIST; +class STRING; +class TO_BLOCK_LIST; +class UNICHARSET; +template class GenericVector; + +namespace tesseract { +class Tesseract; +} + +// Max number of scripts in ICU + "NULL" + Japanese and Korean + Fraktur +const int kMaxNumberOfScripts = 116 + 1 + 2 + 1; + +struct OSBestResult { + OSBestResult() : orientation_id(0), script_id(0), sconfidence(0.0), + oconfidence(0.0) {} + int orientation_id; + int script_id; + float sconfidence; + float oconfidence; +}; + +struct OSResults { + OSResults() : unicharset(nullptr) { + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < kMaxNumberOfScripts; ++j) + scripts_na[i][j] = 0; + orientations[i] = 0; + } + } + void update_best_orientation(); + // Set the estimate of the orientation to the given id. + void set_best_orientation(int orientation_id); + // Update/Compute the best estimate of the script assuming the given + // orientation id. + void update_best_script(int orientation_id); + // Return the index of the script with the highest score for this orientation. + TESS_API int get_best_script(int orientation_id) const; + // Accumulate scores with given OSResults instance and update the best script. + void accumulate(const OSResults& osr); + + // Print statistics. + void print_scores(void) const; + void print_scores(int orientation_id) const; + + // Array holding scores for each orientation id [0,3]. + // Orientation ids [0..3] map to [0, 270, 180, 90] degree orientations of the + // page respectively, where the values refer to the amount of clockwise + // rotation to be applied to the page for the text to be upright and readable. + float orientations[4]; + // Script confidence scores for each of 4 possible orientations. + float scripts_na[4][kMaxNumberOfScripts]; + + UNICHARSET* unicharset; + OSBestResult best_result; +}; + +class OrientationDetector { + public: + OrientationDetector(const GenericVector* allowed_scripts, + OSResults* results); + bool detect_blob(BLOB_CHOICE_LIST* scores); + int get_orientation(); + private: + OSResults* osr_; + const GenericVector* allowed_scripts_; +}; + +class ScriptDetector { + public: + ScriptDetector(const GenericVector* allowed_scripts, + OSResults* osr, tesseract::Tesseract* tess); + void detect_blob(BLOB_CHOICE_LIST* scores); + bool must_stop(int orientation); + private: + OSResults* osr_; + static const char* korean_script_; + static const char* japanese_script_; + static const char* fraktur_script_; + int korean_id_; + int japanese_id_; + int katakana_id_; + int hiragana_id_; + int han_id_; + int hangul_id_; + int latin_id_; + int fraktur_id_; + tesseract::Tesseract* tess_; + const GenericVector* allowed_scripts_; +}; + +int orientation_and_script_detection(STRING& filename, + OSResults*, + tesseract::Tesseract*); + +int os_detect(TO_BLOCK_LIST* port_blocks, + OSResults* osr, + tesseract::Tesseract* tess); + +int os_detect_blobs(const GenericVector* allowed_scripts, + BLOBNBOX_CLIST* blob_list, + OSResults* osr, + tesseract::Tesseract* tess); + +bool os_detect_blob(BLOBNBOX* bbox, OrientationDetector* o, + ScriptDetector* s, OSResults*, + tesseract::Tesseract* tess); + +// Helper method to convert an orientation index to its value in degrees. +// The value represents the amount of clockwise rotation in degrees that must be +// applied for the text to be upright (readable). +TESS_API int OrientationIdToValue(const int& id); + +#endif // TESSERACT_CCMAIN_OSDETECT_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/pageiterator.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/pageiterator.h new file mode 100644 index 00000000..53581428 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/pageiterator.h @@ -0,0 +1,364 @@ +/////////////////////////////////////////////////////////////////////// +// File: pageiterator.h +// Description: Iterator for tesseract page structure that avoids using +// tesseract internal data structures. +// Author: Ray Smith +// Created: Fri Feb 26 11:01:06 PST 2010 +// +// (C) Copyright 2010, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCMAIN_PAGEITERATOR_H_ +#define TESSERACT_CCMAIN_PAGEITERATOR_H_ + +#include "platform.h" +#include "publictypes.h" + +struct BlamerBundle; +class C_BLOB_IT; +class PAGE_RES; +class PAGE_RES_IT; +class WERD; +struct Pix; +struct Pta; + +namespace tesseract { + +class Tesseract; + +/** + * Class to iterate over tesseract page structure, providing access to all + * levels of the page hierarchy, without including any tesseract headers or + * having to handle any tesseract structures. + * WARNING! This class points to data held within the TessBaseAPI class, and + * therefore can only be used while the TessBaseAPI class still exists and + * has not been subjected to a call of Init, SetImage, Recognize, Clear, End + * DetectOS, or anything else that changes the internal PAGE_RES. + * See apitypes.h for the definition of PageIteratorLevel. + * See also ResultIterator, derived from PageIterator, which adds in the + * ability to access OCR output with text-specific methods. + */ + +class TESS_API PageIterator { + public: + /** + * page_res and tesseract come directly from the BaseAPI. + * The rectangle parameters are copied indirectly from the Thresholder, + * via the BaseAPI. They represent the coordinates of some rectangle in an + * original image (in top-left-origin coordinates) and therefore the top-left + * needs to be added to any output boxes in order to specify coordinates + * in the original image. See TessBaseAPI::SetRectangle. + * The scale and scaled_yres are in case the Thresholder scaled the image + * rectangle prior to thresholding. Any coordinates in tesseract's image + * must be divided by scale before adding (rect_left, rect_top). + * The scaled_yres indicates the effective resolution of the binary image + * that tesseract has been given by the Thresholder. + * After the constructor, Begin has already been called. + */ + PageIterator(PAGE_RES* page_res, Tesseract* tesseract, + int scale, int scaled_yres, + int rect_left, int rect_top, + int rect_width, int rect_height); + virtual ~PageIterator(); + + /** + * Page/ResultIterators may be copied! This makes it possible to iterate over + * all the objects at a lower level, while maintaining an iterator to + * objects at a higher level. These constructors DO NOT CALL Begin, so + * iterations will continue from the location of src. + */ + PageIterator(const PageIterator& src); + const PageIterator& operator=(const PageIterator& src); + + /** Are we positioned at the same location as other? */ + bool PositionedAtSameWord(const PAGE_RES_IT* other) const; + + // ============= Moving around within the page ============. + + /** + * Moves the iterator to point to the start of the page to begin an + * iteration. + */ + virtual void Begin(); + + /** + * Moves the iterator to the beginning of the paragraph. + * This class implements this functionality by moving it to the zero indexed + * blob of the first (leftmost) word on the first row of the paragraph. + */ + virtual void RestartParagraph(); + + /** + * Return whether this iterator points anywhere in the first textline of a + * paragraph. + */ + bool IsWithinFirstTextlineOfParagraph() const; + + /** + * Moves the iterator to the beginning of the text line. + * This class implements this functionality by moving it to the zero indexed + * blob of the first (leftmost) word of the row. + */ + virtual void RestartRow(); + + /** + * Moves to the start of the next object at the given level in the + * page hierarchy, and returns false if the end of the page was reached. + * NOTE that RIL_SYMBOL will skip non-text blocks, but all other + * PageIteratorLevel level values will visit each non-text block once. + * Think of non text blocks as containing a single para, with a single line, + * with a single imaginary word. + * Calls to Next with different levels may be freely intermixed. + * This function iterates words in right-to-left scripts correctly, if + * the appropriate language has been loaded into Tesseract. + */ + virtual bool Next(PageIteratorLevel level); + + /** + * Returns true if the iterator is at the start of an object at the given + * level. + * + * For instance, suppose an iterator it is pointed to the first symbol of the + * first word of the third line of the second paragraph of the first block in + * a page, then: + * it.IsAtBeginningOf(RIL_BLOCK) = false + * it.IsAtBeginningOf(RIL_PARA) = false + * it.IsAtBeginningOf(RIL_TEXTLINE) = true + * it.IsAtBeginningOf(RIL_WORD) = true + * it.IsAtBeginningOf(RIL_SYMBOL) = true + */ + virtual bool IsAtBeginningOf(PageIteratorLevel level) const; + + /** + * Returns whether the iterator is positioned at the last element in a + * given level. (e.g. the last word in a line, the last line in a block) + * + * Here's some two-paragraph example + * text. It starts off innocuously + * enough but quickly turns bizarre. + * The author inserts a cornucopia + * of words to guard against confused + * references. + * + * Now take an iterator it pointed to the start of "bizarre." + * it.IsAtFinalElement(RIL_PARA, RIL_SYMBOL) = false + * it.IsAtFinalElement(RIL_PARA, RIL_WORD) = true + * it.IsAtFinalElement(RIL_BLOCK, RIL_WORD) = false + */ + virtual bool IsAtFinalElement(PageIteratorLevel level, + PageIteratorLevel element) const; + + /** + * Returns whether this iterator is positioned + * before other: -1 + * equal to other: 0 + * after other: 1 + */ + int Cmp(const PageIterator &other) const; + + // ============= Accessing data ==============. + // Coordinate system: + // Integer coordinates are at the cracks between the pixels. + // The top-left corner of the top-left pixel in the image is at (0,0). + // The bottom-right corner of the bottom-right pixel in the image is at + // (width, height). + // Every bounding box goes from the top-left of the top-left contained + // pixel to the bottom-right of the bottom-right contained pixel, so + // the bounding box of the single top-left pixel in the image is: + // (0,0)->(1,1). + // If an image rectangle has been set in the API, then returned coordinates + // relate to the original (full) image, rather than the rectangle. + + /** + * Controls what to include in a bounding box. Bounding boxes of all levels + * between RIL_WORD and RIL_BLOCK can include or exclude potential diacritics. + * Between layout analysis and recognition, it isn't known where all + * diacritics belong, so this control is used to include or exclude some + * diacritics that are above or below the main body of the word. In most cases + * where the placement is obvious, and after recognition, it doesn't make as + * much difference, as the diacritics will already be included in the word. + */ + void SetBoundingBoxComponents(bool include_upper_dots, + bool include_lower_dots) { + include_upper_dots_ = include_upper_dots; + include_lower_dots_ = include_lower_dots; + } + + /** + * Returns the bounding rectangle of the current object at the given level. + * See comment on coordinate system above. + * Returns false if there is no such object at the current position. + * The returned bounding box is guaranteed to match the size and position + * of the image returned by GetBinaryImage, but may clip foreground pixels + * from a grey image. The padding argument to GetImage can be used to expand + * the image to include more foreground pixels. See GetImage below. + */ + bool BoundingBox(PageIteratorLevel level, + int* left, int* top, int* right, int* bottom) const; + bool BoundingBox(PageIteratorLevel level, int padding, + int* left, int* top, int* right, int* bottom) const; + /** + * Returns the bounding rectangle of the object in a coordinate system of the + * working image rectangle having its origin at (rect_left_, rect_top_) with + * respect to the original image and is scaled by a factor scale_. + */ + bool BoundingBoxInternal(PageIteratorLevel level, + int* left, int* top, int* right, int* bottom) const; + + /** Returns whether there is no object of a given level. */ + bool Empty(PageIteratorLevel level) const; + + /** + * Returns the type of the current block. See apitypes.h for + * PolyBlockType. + */ + PolyBlockType BlockType() const; + + /** + * Returns the polygon outline of the current block. The returned Pta must + * be ptaDestroy-ed after use. Note that the returned Pta lists the vertices + * of the polygon, and the last edge is the line segment between the last + * point and the first point. nullptr will be returned if the iterator is + * at the end of the document or layout analysis was not used. + */ + Pta* BlockPolygon() const; + + /** + * Returns a binary image of the current object at the given level. + * The position and size match the return from BoundingBoxInternal, and so + * this could be upscaled with respect to the original input image. + * Use pixDestroy to delete the image after use. + */ + Pix* GetBinaryImage(PageIteratorLevel level) const; + + /** + * Returns an image of the current object at the given level in greyscale + * if available in the input. To guarantee a binary image use BinaryImage. + * NOTE that in order to give the best possible image, the bounds are + * expanded slightly over the binary connected component, by the supplied + * padding, so the top-left position of the returned image is returned + * in (left,top). These will most likely not match the coordinates + * returned by BoundingBox. + * If you do not supply an original image, you will get a binary one. + * Use pixDestroy to delete the image after use. + */ + Pix* GetImage(PageIteratorLevel level, int padding, Pix* original_img, + int* left, int* top) const; + + /** + * Returns the baseline of the current object at the given level. + * The baseline is the line that passes through (x1, y1) and (x2, y2). + * WARNING: with vertical text, baselines may be vertical! + * Returns false if there is no baseline at the current position. + */ + bool Baseline(PageIteratorLevel level, + int* x1, int* y1, int* x2, int* y2) const; + + /** + * Returns orientation for the block the iterator points to. + * orientation, writing_direction, textline_order: see publictypes.h + * deskew_angle: after rotating the block so the text orientation is + * upright, how many radians does one have to rotate the + * block anti-clockwise for it to be level? + * -Pi/4 <= deskew_angle <= Pi/4 + */ + void Orientation(tesseract::Orientation *orientation, + tesseract::WritingDirection *writing_direction, + tesseract::TextlineOrder *textline_order, + float *deskew_angle) const; + + /** + * Returns information about the current paragraph, if available. + * + * justification - + * LEFT if ragged right, or fully justified and script is left-to-right. + * RIGHT if ragged left, or fully justified and script is right-to-left. + * unknown if it looks like source code or we have very few lines. + * is_list_item - + * true if we believe this is a member of an ordered or unordered list. + * is_crown - + * true if the first line of the paragraph is aligned with the other + * lines of the paragraph even though subsequent paragraphs have first + * line indents. This typically indicates that this is the continuation + * of a previous paragraph or that it is the very first paragraph in + * the chapter. + * first_line_indent - + * For LEFT aligned paragraphs, the first text line of paragraphs of + * this kind are indented this many pixels from the left edge of the + * rest of the paragraph. + * for RIGHT aligned paragraphs, the first text line of paragraphs of + * this kind are indented this many pixels from the right edge of the + * rest of the paragraph. + * NOTE 1: This value may be negative. + * NOTE 2: if *is_crown == true, the first line of this paragraph is + * actually flush, and first_line_indent is set to the "common" + * first_line_indent for subsequent paragraphs in this block + * of text. + */ + void ParagraphInfo(tesseract::ParagraphJustification *justification, + bool *is_list_item, + bool *is_crown, + int *first_line_indent) const; + + // If the current WERD_RES (it_->word()) is not nullptr, sets the BlamerBundle + // of the current word to the given pointer (takes ownership of the pointer) + // and returns true. + // Can only be used when iterating on the word level. + bool SetWordBlamerBundle(BlamerBundle *blamer_bundle); + + protected: + /** + * Sets up the internal data for iterating the blobs of a new word, then + * moves the iterator to the given offset. + */ + TESS_LOCAL void BeginWord(int offset); + + /** Pointer to the page_res owned by the API. */ + PAGE_RES* page_res_; + /** Pointer to the Tesseract object owned by the API. */ + Tesseract* tesseract_; + /** + * The iterator to the page_res_. Owned by this ResultIterator. + * A pointer just to avoid dragging in Tesseract includes. + */ + PAGE_RES_IT* it_; + /** + * The current input WERD being iterated. If there is an output from OCR, + * then word_ is nullptr. Owned by the API + */ + WERD* word_; + /** The length of the current word_. */ + int word_length_; + /** The current blob index within the word. */ + int blob_index_; + /** + * Iterator to the blobs within the word. If nullptr, then we are iterating + * OCR results in the box_word. + * Owned by this ResultIterator. + */ + C_BLOB_IT* cblob_it_; + /** Control over what to include in bounding boxes. */ + bool include_upper_dots_; + bool include_lower_dots_; + /** Parameters saved from the Thresholder. Needed to rebuild coordinates.*/ + int scale_; + int scaled_yres_; + int rect_left_; + int rect_top_; + int rect_width_; + int rect_height_; +}; + +} // namespace tesseract. + +#endif // TESSERACT_CCMAIN_PAGEITERATOR_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/platform.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/platform.h new file mode 100644 index 00000000..99424bc8 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/platform.h @@ -0,0 +1,59 @@ +/////////////////////////////////////////////////////////////////////// +// File: platform.h +// Description: Place holder +// +// (C) Copyright 2006, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCUTIL_PLATFORM_H_ +#define TESSERACT_CCUTIL_PLATFORM_H_ + +#define DLLSYM +#ifndef _WIN32 +# ifdef __cplusplus +# include +# else /* C compiler*/ +# include +# endif /* __cplusplus */ +# ifndef PATH_MAX +# define MAX_PATH 4096 +# else +# define MAX_PATH PATH_MAX +# endif +#endif + +#if defined(_WIN32) || defined(__CYGWIN__) +# if defined(TESS_EXPORTS) +# define TESS_API __declspec(dllexport) +# elif defined(TESS_IMPORTS) +# define TESS_API __declspec(dllimport) +# else +# define TESS_API +# endif +# define TESS_LOCAL +#else +# if __GNUC__ >= 4 +# if defined(TESS_EXPORTS) || defined(TESS_IMPORTS) +# define TESS_API __attribute__((visibility("default"))) +# define TESS_LOCAL __attribute__((visibility("hidden"))) +# else +# define TESS_API +# define TESS_LOCAL +# endif +# else +# define TESS_API +# define TESS_LOCAL +# endif +#endif + +#endif // TESSERACT_CCUTIL_PLATFORM_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/publictypes.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/publictypes.h new file mode 100644 index 00000000..6013f4e6 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/publictypes.h @@ -0,0 +1,286 @@ +/////////////////////////////////////////////////////////////////////// +// File: publictypes.h +// Description: Types used in both the API and internally +// Author: Ray Smith +// Created: Wed Mar 03 09:22:53 PST 2010 +// +// (C) Copyright 2010, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCSTRUCT_PUBLICTYPES_H_ +#define TESSERACT_CCSTRUCT_PUBLICTYPES_H_ + +// This file contains types that are used both by the API and internally +// to Tesseract. In order to decouple the API from Tesseract and prevent cyclic +// dependencies, THIS FILE SHOULD NOT DEPEND ON ANY OTHER PART OF TESSERACT. +// Restated: It is OK for low-level Tesseract files to include publictypes.h, +// but not for the low-level tesseract code to include top-level API code. +// This file should not use other Tesseract types, as that would drag +// their includes into the API-level. +// API-level code should include apitypes.h in preference to this file. + +/** Number of printers' points in an inch. The unit of the pointsize return. */ +constexpr int kPointsPerInch = 72; +/** + * Minimum believable resolution. Used as a default if there is no other + * information, as it is safer to under-estimate than over-estimate. + */ +constexpr int kMinCredibleResolution = 70; +/** Maximum believable resolution. */ +constexpr int kMaxCredibleResolution = 2400; +/** + * Ratio between median blob size and likely resolution. Used to estimate + * resolution when none is provided. This is basically 1/usual text size in + * inches. */ +constexpr int kResolutionEstimationFactor = 10; + +/** + * Possible types for a POLY_BLOCK or ColPartition. + * Must be kept in sync with kPBColors in polyblk.cpp and PTIs*Type functions + * below, as well as kPolyBlockNames in publictypes.cpp. + * Used extensively by ColPartition, and POLY_BLOCK. +*/ +enum PolyBlockType { + PT_UNKNOWN, // Type is not yet known. Keep as the first element. + PT_FLOWING_TEXT, // Text that lives inside a column. + PT_HEADING_TEXT, // Text that spans more than one column. + PT_PULLOUT_TEXT, // Text that is in a cross-column pull-out region. + PT_EQUATION, // Partition belonging to an equation region. + PT_INLINE_EQUATION, // Partition has inline equation. + PT_TABLE, // Partition belonging to a table region. + PT_VERTICAL_TEXT, // Text-line runs vertically. + PT_CAPTION_TEXT, // Text that belongs to an image. + PT_FLOWING_IMAGE, // Image that lives inside a column. + PT_HEADING_IMAGE, // Image that spans more than one column. + PT_PULLOUT_IMAGE, // Image that is in a cross-column pull-out region. + PT_HORZ_LINE, // Horizontal Line. + PT_VERT_LINE, // Vertical Line. + PT_NOISE, // Lies outside of any column. + PT_COUNT +}; + +/** Returns true if PolyBlockType is of horizontal line type */ +inline bool PTIsLineType(PolyBlockType type) { + return type == PT_HORZ_LINE || type == PT_VERT_LINE; +} +/** Returns true if PolyBlockType is of image type */ +inline bool PTIsImageType(PolyBlockType type) { + return type == PT_FLOWING_IMAGE || type == PT_HEADING_IMAGE || + type == PT_PULLOUT_IMAGE; +} +/** Returns true if PolyBlockType is of text type */ +inline bool PTIsTextType(PolyBlockType type) { + return type == PT_FLOWING_TEXT || type == PT_HEADING_TEXT || + type == PT_PULLOUT_TEXT || type == PT_TABLE || + type == PT_VERTICAL_TEXT || type == PT_CAPTION_TEXT || + type == PT_INLINE_EQUATION; +} +// Returns true if PolyBlockType is of pullout(inter-column) type +inline bool PTIsPulloutType(PolyBlockType type) { + return type == PT_PULLOUT_IMAGE || type == PT_PULLOUT_TEXT; +} + +/** String name for each block type. Keep in sync with PolyBlockType. */ +extern const char* kPolyBlockNames[]; + +namespace tesseract { +/** + * +------------------+ Orientation Example: + * | 1 Aaaa Aaaa Aaaa | ==================== + * | Aaa aa aaa aa | To left is a diagram of some (1) English and + * | aaaaaa A aa aaa. | (2) Chinese text and a (3) photo credit. + * | 2 | + * | ####### c c C | Upright Latin characters are represented as A and a. + * | ####### c c c | '<' represents a latin character rotated + * | < ####### c c c | anti-clockwise 90 degrees. + * | < ####### c c | + * | < ####### . c | Upright Chinese characters are represented C and c. + * | 3 ####### c | + * +------------------+ NOTA BENE: enum values here should match goodoc.proto + + * If you orient your head so that "up" aligns with Orientation, + * then the characters will appear "right side up" and readable. + * + * In the example above, both the English and Chinese paragraphs are oriented + * so their "up" is the top of the page (page up). The photo credit is read + * with one's head turned leftward ("up" is to page left). + * + * The values of this enum match the convention of Tesseract's osdetect.h +*/ +enum Orientation { + ORIENTATION_PAGE_UP = 0, + ORIENTATION_PAGE_RIGHT = 1, + ORIENTATION_PAGE_DOWN = 2, + ORIENTATION_PAGE_LEFT = 3, +}; + +/** + * The grapheme clusters within a line of text are laid out logically + * in this direction, judged when looking at the text line rotated so that + * its Orientation is "page up". + * + * For English text, the writing direction is left-to-right. For the + * Chinese text in the above example, the writing direction is top-to-bottom. +*/ +enum WritingDirection { + WRITING_DIRECTION_LEFT_TO_RIGHT = 0, + WRITING_DIRECTION_RIGHT_TO_LEFT = 1, + WRITING_DIRECTION_TOP_TO_BOTTOM = 2, +}; + +/** + * The text lines are read in the given sequence. + * + * In English, the order is top-to-bottom. + * In Chinese, vertical text lines are read right-to-left. Mongolian is + * written in vertical columns top to bottom like Chinese, but the lines + * order left-to right. + * + * Note that only some combinations make sense. For example, + * WRITING_DIRECTION_LEFT_TO_RIGHT implies TEXTLINE_ORDER_TOP_TO_BOTTOM +*/ +enum TextlineOrder { + TEXTLINE_ORDER_LEFT_TO_RIGHT = 0, + TEXTLINE_ORDER_RIGHT_TO_LEFT = 1, + TEXTLINE_ORDER_TOP_TO_BOTTOM = 2, +}; + +/** + * Possible modes for page layout analysis. These *must* be kept in order + * of decreasing amount of layout analysis to be done, except for OSD_ONLY, + * so that the inequality test macros below work. +*/ +enum PageSegMode { + PSM_OSD_ONLY = 0, ///< Orientation and script detection only. + PSM_AUTO_OSD = 1, ///< Automatic page segmentation with orientation and + ///< script detection. (OSD) + PSM_AUTO_ONLY = 2, ///< Automatic page segmentation, but no OSD, or OCR. + PSM_AUTO = 3, ///< Fully automatic page segmentation, but no OSD. + PSM_SINGLE_COLUMN = 4, ///< Assume a single column of text of variable sizes. + PSM_SINGLE_BLOCK_VERT_TEXT = 5, ///< Assume a single uniform block of vertically + ///< aligned text. + PSM_SINGLE_BLOCK = 6, ///< Assume a single uniform block of text. (Default.) + PSM_SINGLE_LINE = 7, ///< Treat the image as a single text line. + PSM_SINGLE_WORD = 8, ///< Treat the image as a single word. + PSM_CIRCLE_WORD = 9, ///< Treat the image as a single word in a circle. + PSM_SINGLE_CHAR = 10, ///< Treat the image as a single character. + PSM_SPARSE_TEXT = 11, ///< Find as much text as possible in no particular order. + PSM_SPARSE_TEXT_OSD = 12, ///< Sparse text with orientation and script det. + PSM_RAW_LINE = 13, ///< Treat the image as a single text line, bypassing + ///< hacks that are Tesseract-specific. + + PSM_COUNT ///< Number of enum entries. +}; + +/** + * Inline functions that act on a PageSegMode to determine whether components of + * layout analysis are enabled. + * *Depend critically on the order of elements of PageSegMode.* + * NOTE that arg is an int for compatibility with INT_PARAM. +*/ +inline bool PSM_OSD_ENABLED(int pageseg_mode) { + return pageseg_mode <= PSM_AUTO_OSD || pageseg_mode == PSM_SPARSE_TEXT_OSD; +} +inline bool PSM_ORIENTATION_ENABLED(int pageseg_mode) { + return pageseg_mode <= PSM_AUTO || pageseg_mode == PSM_SPARSE_TEXT_OSD; +} +inline bool PSM_COL_FIND_ENABLED(int pageseg_mode) { + return pageseg_mode >= PSM_AUTO_OSD && pageseg_mode <= PSM_AUTO; +} +inline bool PSM_SPARSE(int pageseg_mode) { + return pageseg_mode == PSM_SPARSE_TEXT || pageseg_mode == PSM_SPARSE_TEXT_OSD; +} +inline bool PSM_BLOCK_FIND_ENABLED(int pageseg_mode) { + return pageseg_mode >= PSM_AUTO_OSD && pageseg_mode <= PSM_SINGLE_COLUMN; +} +inline bool PSM_LINE_FIND_ENABLED(int pageseg_mode) { + return pageseg_mode >= PSM_AUTO_OSD && pageseg_mode <= PSM_SINGLE_BLOCK; +} +inline bool PSM_WORD_FIND_ENABLED(int pageseg_mode) { + return (pageseg_mode >= PSM_AUTO_OSD && pageseg_mode <= PSM_SINGLE_LINE) || + pageseg_mode == PSM_SPARSE_TEXT || pageseg_mode == PSM_SPARSE_TEXT_OSD; +} + +/** + * enum of the elements of the page hierarchy, used in ResultIterator + * to provide functions that operate on each level without having to + * have 5x as many functions. +*/ +enum PageIteratorLevel { + RIL_BLOCK, // Block of text/image/separator line. + RIL_PARA, // Paragraph within a block. + RIL_TEXTLINE, // Line within a paragraph. + RIL_WORD, // Word within a textline. + RIL_SYMBOL // Symbol/character within a word. +}; + +/** + * JUSTIFICATION_UNKNOWN + * The alignment is not clearly one of the other options. This could happen + * for example if there are only one or two lines of text or the text looks + * like source code or poetry. + * + * NOTA BENE: Fully justified paragraphs (text aligned to both left and right + * margins) are marked by Tesseract with JUSTIFICATION_LEFT if their text + * is written with a left-to-right script and with JUSTIFICATION_RIGHT if + * their text is written in a right-to-left script. + * + * Interpretation for text read in vertical lines: + * "Left" is wherever the starting reading position is. + * + * JUSTIFICATION_LEFT + * Each line, except possibly the first, is flush to the same left tab stop. + * + * JUSTIFICATION_CENTER + * The text lines of the paragraph are centered about a line going + * down through their middle of the text lines. + * + * JUSTIFICATION_RIGHT + * Each line, except possibly the first, is flush to the same right tab stop. + */ +enum ParagraphJustification { + JUSTIFICATION_UNKNOWN, + JUSTIFICATION_LEFT, + JUSTIFICATION_CENTER, + JUSTIFICATION_RIGHT, +}; + +/** + * When Tesseract/Cube is initialized we can choose to instantiate/load/run + * only the Tesseract part, only the Cube part or both along with the combiner. + * The preference of which engine to use is stored in tessedit_ocr_engine_mode. + * + * ATTENTION: When modifying this enum, please make sure to make the + * appropriate changes to all the enums mirroring it (e.g. OCREngine in + * cityblock/workflow/detection/detection_storage.proto). Such enums will + * mention the connection to OcrEngineMode in the comments. +*/ +enum OcrEngineMode { + OEM_TESSERACT_ONLY, // Run Tesseract only - fastest; deprecated + OEM_LSTM_ONLY, // Run just the LSTM line recognizer. + OEM_TESSERACT_LSTM_COMBINED, // Run the LSTM recognizer, but allow fallback + // to Tesseract when things get difficult. + // deprecated + OEM_DEFAULT, // Specify this mode when calling init_*(), + // to indicate that any of the above modes + // should be automatically inferred from the + // variables in the language-specific config, + // command-line configs, or if not specified + // in any of the above should be set to the + // default OEM_TESSERACT_ONLY. + OEM_COUNT // Number of OEMs +}; + +} // namespace tesseract. + +#endif // TESSERACT_CCSTRUCT_PUBLICTYPES_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/renderer.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/renderer.h new file mode 100644 index 00000000..177fd5ff --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/renderer.h @@ -0,0 +1,310 @@ +/////////////////////////////////////////////////////////////////////// +// File: renderer.h +// Description: Rendering interface to inject into TessBaseAPI +// +// (C) Copyright 2011, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_API_RENDERER_H_ +#define TESSERACT_API_RENDERER_H_ + +// To avoid collision with other typenames include the ABSOLUTE MINIMUM +// complexity of includes here. Use forward declarations wherever possible +// and hide includes of complex types in baseapi.cpp. +#include // for std::string +#include "genericvector.h" +#include "platform.h" +#include "strngs.h" // for STRING + +struct Pix; + +namespace tesseract { + +class TessBaseAPI; + +/** + * Interface for rendering tesseract results into a document, such as text, + * HOCR or pdf. This class is abstract. Specific classes handle individual + * formats. This interface is then used to inject the renderer class into + * tesseract when processing images. + * + * For simplicity implementing this with tesesract version 3.01, + * the renderer contains document state that is cleared from document + * to document just as the TessBaseAPI is. This way the base API can just + * delegate its rendering functionality to injected renderers, and the + * renderers can manage the associated state needed for the specific formats + * in addition to the heuristics for producing it. + */ +class TESS_API TessResultRenderer { + public: + virtual ~TessResultRenderer(); + + // Takes ownership of pointer so must be new'd instance. + // Renderers aren't ordered, but appends the sequences of next parameter + // and existing next(). The renderers should be unique across both lists. + void insert(TessResultRenderer* next); + + // Returns the next renderer or nullptr. + TessResultRenderer* next() { + return next_; + } + + /** + * Starts a new document with the given title. + * This clears the contents of the output data. + * Title should use UTF-8 encoding. + */ + bool BeginDocument(const char* title); + + /** + * Adds the recognized text from the source image to the current document. + * Invalid if BeginDocument not yet called. + * + * Note that this API is a bit weird but is designed to fit into the + * current TessBaseAPI implementation where the api has lots of state + * information that we might want to add in. + */ + bool AddImage(TessBaseAPI* api); + + /** + * Finishes the document and finalizes the output data + * Invalid if BeginDocument not yet called. + */ + bool EndDocument(); + + const char* file_extension() const { + return file_extension_; + } + const char* title() const { + return title_.c_str(); + } + + // Is everything fine? Otherwise something went wrong. + bool happy() { + return happy_; + } + + /** + * Returns the index of the last image given to AddImage + * (i.e. images are incremented whether the image succeeded or not) + * + * This is always defined. It means either the number of the + * current image, the last image ended, or in the completed document + * depending on when in the document lifecycle you are looking at it. + * Will return -1 if a document was never started. + */ + int imagenum() const { + return imagenum_; + } + + protected: + /** + * Called by concrete classes. + * + * outputbase is the name of the output file excluding + * extension. For example, "/path/to/chocolate-chip-cookie-recipe" + * + * extension indicates the file extension to be used for output + * files. For example "pdf" will produce a .pdf file, and "hocr" + * will produce .hocr files. + */ + TessResultRenderer(const char* outputbase, const char* extension); + + // Hook for specialized handling in BeginDocument() + virtual bool BeginDocumentHandler(); + + // This must be overridden to render the OCR'd results + virtual bool AddImageHandler(TessBaseAPI* api) = 0; + + // Hook for specialized handling in EndDocument() + virtual bool EndDocumentHandler(); + + // Renderers can call this to append '\0' terminated strings into + // the output string returned by GetOutput. + // This method will grow the output buffer if needed. + void AppendString(const char* s); + + // Renderers can call this to append binary byte sequences into + // the output string returned by GetOutput. Note that s is not necessarily + // '\0' terminated (and can contain '\0' within it). + // This method will grow the output buffer if needed. + void AppendData(const char* s, int len); + + private: + const char* file_extension_; // standard extension for generated output + STRING title_; // title of document being renderered + int imagenum_; // index of last image added + + FILE* fout_; // output file pointer + TessResultRenderer* next_; // Can link multiple renderers together + bool happy_; // I get grumpy when the disk fills up, etc. +}; + +/** + * Renders tesseract output into a plain UTF-8 text string + */ +class TESS_API TessTextRenderer : public TessResultRenderer { + public: + explicit TessTextRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +/** + * Renders tesseract output into an hocr text string + */ +class TESS_API TessHOcrRenderer : public TessResultRenderer { + public: + explicit TessHOcrRenderer(const char* outputbase, bool font_info); + explicit TessHOcrRenderer(const char* outputbase); + + protected: + bool BeginDocumentHandler() override; + bool AddImageHandler(TessBaseAPI* api) override; + bool EndDocumentHandler() override; + + private: + bool font_info_; // whether to print font information +}; + +/** + * Renders tesseract output into an alto text string + */ +class TESS_API TessAltoRenderer : public TessResultRenderer { + public: + explicit TessAltoRenderer(const char* outputbase); + + protected: + bool BeginDocumentHandler() override; + bool AddImageHandler(TessBaseAPI* api) override; + bool EndDocumentHandler() override; +}; + +/** + * Renders Tesseract output into a TSV string + */ +class TESS_API TessTsvRenderer : public TessResultRenderer { + public: + explicit TessTsvRenderer(const char* outputbase, bool font_info); + explicit TessTsvRenderer(const char* outputbase); + + protected: + bool BeginDocumentHandler() override; + bool AddImageHandler(TessBaseAPI* api) override; + bool EndDocumentHandler() override; + + private: + bool font_info_; // whether to print font information +}; + +/** + * Renders tesseract output into searchable PDF + */ +class TESS_API TessPDFRenderer : public TessResultRenderer { + public: + // datadir is the location of the TESSDATA. We need it because + // we load a custom PDF font from this location. + TessPDFRenderer(const char* outputbase, const char* datadir, + bool textonly = false); + + protected: + bool BeginDocumentHandler() override; + bool AddImageHandler(TessBaseAPI* api) override; + bool EndDocumentHandler() override; + + private: + // We don't want to have every image in memory at once, + // so we store some metadata as we go along producing + // PDFs one page at a time. At the end, that metadata is + // used to make everything that isn't easily handled in a + // streaming fashion. + long int obj_; // counter for PDF objects + GenericVector offsets_; // offset of every PDF object in bytes + GenericVector pages_; // object number for every /Page object + std::string datadir_; // where to find the custom font + bool textonly_; // skip images if set + // Bookkeeping only. DIY = Do It Yourself. + void AppendPDFObjectDIY(size_t objectsize); + // Bookkeeping + emit data. + void AppendPDFObject(const char* data); + // Create the /Contents object for an entire page. + char* GetPDFTextObjects(TessBaseAPI* api, double width, double height); + // Turn an image into a PDF object. Only transcode if we have to. + static bool imageToPDFObj(Pix* pix, const char* filename, long int objnum, + char** pdf_object, long int* pdf_object_size, + int jpg_quality); +}; + +/** + * Renders tesseract output into a plain UTF-8 text string + */ +class TESS_API TessUnlvRenderer : public TessResultRenderer { + public: + explicit TessUnlvRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +/** + * Renders tesseract output into a plain UTF-8 text string for LSTMBox + */ +class TESS_API TessLSTMBoxRenderer : public TessResultRenderer { + public: + explicit TessLSTMBoxRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +/** + * Renders tesseract output into a plain UTF-8 text string + */ +class TESS_API TessBoxTextRenderer : public TessResultRenderer { + public: + explicit TessBoxTextRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +/** + * Renders tesseract output into a plain UTF-8 text string in WordStr format + */ +class TESS_API TessWordStrBoxRenderer : public TessResultRenderer { + public: + explicit TessWordStrBoxRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +#ifndef DISABLED_LEGACY_ENGINE + +/** + * Renders tesseract output into an osd text string + */ +class TESS_API TessOsdRenderer : public TessResultRenderer { + public: + explicit TessOsdRenderer(const char* outputbase); + + protected: + bool AddImageHandler(TessBaseAPI* api) override; +}; + +#endif // ndef DISABLED_LEGACY_ENGINE + +} // namespace tesseract. + +#endif // TESSERACT_API_RENDERER_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/resultiterator.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/resultiterator.h new file mode 100644 index 00000000..4333012b --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/resultiterator.h @@ -0,0 +1,257 @@ +/////////////////////////////////////////////////////////////////////// +// File: resultiterator.h +// Description: Iterator for tesseract results that is capable of +// iterating in proper reading order over Bi Directional +// (e.g. mixed Hebrew and English) text. +// Author: David Eger +// Created: Fri May 27 13:58:06 PST 2011 +// +// (C) Copyright 2011, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCMAIN_RESULT_ITERATOR_H_ +#define TESSERACT_CCMAIN_RESULT_ITERATOR_H_ + +#include // for std::pair +#include // for std::vector +#include "ltrresultiterator.h" // for LTRResultIterator +#include "platform.h" // for TESS_API, TESS_LOCAL +#include "publictypes.h" // for PageIteratorLevel +#include "unichar.h" // for StrongScriptDirection + +template class GenericVector; +template class GenericVectorEqEq; + +class STRING; + +namespace tesseract { + +class Tesseract; + +class TESS_API ResultIterator : public LTRResultIterator { + public: + static ResultIterator *StartOfParagraph(const LTRResultIterator &resit); + + /** + * ResultIterator is copy constructible! + * The default copy constructor works just fine for us. + */ + ~ResultIterator() override = default; + + // ============= Moving around within the page ============. + /** + * Moves the iterator to point to the start of the page to begin + * an iteration. + */ + void Begin() override; + + /** + * Moves to the start of the next object at the given level in the + * page hierarchy in the appropriate reading order and returns false if + * the end of the page was reached. + * NOTE that RIL_SYMBOL will skip non-text blocks, but all other + * PageIteratorLevel level values will visit each non-text block once. + * Think of non text blocks as containing a single para, with a single line, + * with a single imaginary word. + * Calls to Next with different levels may be freely intermixed. + * This function iterates words in right-to-left scripts correctly, if + * the appropriate language has been loaded into Tesseract. + */ + bool Next(PageIteratorLevel level) override; + + /** + * IsAtBeginningOf() returns whether we're at the logical beginning of the + * given level. (as opposed to ResultIterator's left-to-right top-to-bottom + * order). Otherwise, this acts the same as PageIterator::IsAtBeginningOf(). + * For a full description, see pageiterator.h + */ + bool IsAtBeginningOf(PageIteratorLevel level) const override; + + /** + * Implement PageIterator's IsAtFinalElement correctly in a BiDi context. + * For instance, IsAtFinalElement(RIL_PARA, RIL_WORD) returns whether we + * point at the last word in a paragraph. See PageIterator for full comment. + */ + bool IsAtFinalElement(PageIteratorLevel level, + PageIteratorLevel element) const override; + + // ============= Functions that refer to words only ============. + // Returns the number of blanks before the current word. + int BlanksBeforeWord() const; + + // ============= Accessing data ==============. + + /** + * Returns the null terminated UTF-8 encoded text string for the current + * object at the given level. Use delete [] to free after use. + */ + virtual char* GetUTF8Text(PageIteratorLevel level) const; + + /** + * Returns the LSTM choices for every LSTM timestep for the current word. + */ + virtual std::vector>>* + GetBestLSTMSymbolChoices() const; + + /** + * Return whether the current paragraph's dominant reading direction + * is left-to-right (as opposed to right-to-left). + */ + bool ParagraphIsLtr() const; + + // ============= Exposed only for testing =============. + + /** + * Yields the reading order as a sequence of indices and (optional) + * meta-marks for a set of words (given left-to-right). + * The meta marks are passed as negative values: + * kMinorRunStart Start of minor direction text. + * kMinorRunEnd End of minor direction text. + * kComplexWord The next indexed word contains both left-to-right and + * right-to-left characters and was treated as neutral. + * + * For example, suppose we have five words in a text line, + * indexed [0,1,2,3,4] from the leftmost side of the text line. + * The following are all believable reading_orders: + * + * Left-to-Right (in ltr paragraph): + * { 0, 1, 2, 3, 4 } + * Left-to-Right (in rtl paragraph): + * { kMinorRunStart, 0, 1, 2, 3, 4, kMinorRunEnd } + * Right-to-Left (in rtl paragraph): + * { 4, 3, 2, 1, 0 } + * Left-to-Right except for an RTL phrase in words 2, 3 in an ltr paragraph: + * { 0, 1, kMinorRunStart, 3, 2, kMinorRunEnd, 4 } + */ + static void CalculateTextlineOrder( + bool paragraph_is_ltr, + const GenericVector &word_dirs, + GenericVectorEqEq *reading_order); + + static const int kMinorRunStart; + static const int kMinorRunEnd; + static const int kComplexWord; + + protected: + /** + * We presume the data associated with the given iterator will outlive us. + * NB: This is private because it does something that is non-obvious: + * it resets to the beginning of the paragraph instead of staying wherever + * resit might have pointed. + */ + TESS_LOCAL explicit ResultIterator(const LTRResultIterator &resit); + + private: + /** + * Calculates the current paragraph's dominant writing direction. + * Typically, members should use current_paragraph_ltr_ instead. + */ + bool CurrentParagraphIsLtr() const; + + /** + * Returns word indices as measured from resit->RestartRow() = index 0 + * for the reading order of words within a textline given an iterator + * into the middle of the text line. + * In addition to non-negative word indices, the following negative values + * may be inserted: + * kMinorRunStart Start of minor direction text. + * kMinorRunEnd End of minor direction text. + * kComplexWord The previous word contains both left-to-right and + * right-to-left characters and was treated as neutral. + */ + void CalculateTextlineOrder(bool paragraph_is_ltr, + const LTRResultIterator &resit, + GenericVectorEqEq *indices) const; + /** Same as above, but the caller's ssd gets filled in if ssd != nullptr. */ + void CalculateTextlineOrder(bool paragraph_is_ltr, + const LTRResultIterator &resit, + GenericVector *ssd, + GenericVectorEqEq *indices) const; + + /** + * What is the index of the current word in a strict left-to-right reading + * of the row? + */ + int LTRWordIndex() const; + + /** + * Given an iterator pointing at a word, returns the logical reading order + * of blob indices for the word. + */ + void CalculateBlobOrder(GenericVector *blob_indices) const; + + /** Precondition: current_paragraph_is_ltr_ is set. */ + void MoveToLogicalStartOfTextline(); + + /** + * Precondition: current_paragraph_is_ltr_ and in_minor_direction_ + * are set. + */ + void MoveToLogicalStartOfWord(); + + /** Are we pointing at the final (reading order) symbol of the word? */ + bool IsAtFinalSymbolOfWord() const; + + /** Are we pointing at the first (reading order) symbol of the word? */ + bool IsAtFirstSymbolOfWord() const; + + /** + * Append any extra marks that should be appended to this word when printed. + * Mostly, these are Unicode BiDi control characters. + */ + void AppendSuffixMarks(STRING *text) const; + + /** Appends the current word in reading order to the given buffer.*/ + void AppendUTF8WordText(STRING *text) const; + + /** + * Appends the text of the current text line, *assuming this iterator is + * positioned at the beginning of the text line* This function + * updates the iterator to point to the first position past the text line. + * Each textline is terminated in a single newline character. + * If the textline ends a paragraph, it gets a second terminal newline. + */ + void IterateAndAppendUTF8TextlineText(STRING *text); + + /** + * Appends the text of the current paragraph in reading order + * to the given buffer. + * Each textline is terminated in a single newline character, and the + * paragraph gets an extra newline at the end. + */ + void AppendUTF8ParagraphText(STRING *text) const; + + /** Returns whether the bidi_debug flag is set to at least min_level. */ + bool BidiDebug(int min_level) const; + + bool current_paragraph_is_ltr_; + + /** + * Is the currently pointed-at character at the beginning of + * a minor-direction run? + */ + bool at_beginning_of_minor_run_; + + /** Is the currently pointed-at character in a minor-direction sequence? */ + bool in_minor_direction_; + + /** + * Should detected inter-word spaces be preserved, or "compressed" to a single + * space character (default behavior). + */ + bool preserve_interword_spaces_; +}; + +} // namespace tesseract. + +#endif // TESSERACT_CCMAIN_RESULT_ITERATOR_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/serialis.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/serialis.h new file mode 100644 index 00000000..095b9227 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/serialis.h @@ -0,0 +1,162 @@ +/********************************************************************** + * File: serialis.h (Formerly serialmac.h) + * Description: Inline routines and macros for serialisation functions + * Author: Phil Cheatle + * + * (C) Copyright 1990, Hewlett-Packard Ltd. + ** Licensed under the Apache License, Version 2.0 (the "License"); + ** you may not use this file except in compliance with the License. + ** You may obtain a copy of the License at + ** http://www.apache.org/licenses/LICENSE-2.0 + ** Unless required by applicable law or agreed to in writing, software + ** distributed under the License is distributed on an "AS IS" BASIS, + ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + ** See the License for the specific language governing permissions and + ** limitations under the License. + * + **********************************************************************/ + +#ifndef SERIALIS_H +#define SERIALIS_H + +#include // uint8_t +#include +#include +#include + +template +class GenericVector; +class STRING; + +/*********************************************************************** + QUOTE_IT MACRO DEFINITION + =========================== +Replace with "". may be an arbitrary number of tokens +***********************************************************************/ + +#define QUOTE_IT(parm) #parm + +namespace tesseract { + +// Return number of elements of an array. +template +constexpr size_t countof(T const (&)[N]) noexcept { + return N; +} + +// Function to read a GenericVector from a whole file. +// Returns false on failure. +using FileReader = bool (*)(const STRING&, GenericVector*); +// Function to write a GenericVector to a whole file. +// Returns false on failure. +using FileWriter = bool (*)(const GenericVector&, const STRING&); + +// Deserialize data from file. +bool DeSerialize(FILE* fp, char* data, size_t n = 1); +bool DeSerialize(FILE* fp, float* data, size_t n = 1); +bool DeSerialize(FILE* fp, int8_t* data, size_t n = 1); +bool DeSerialize(FILE* fp, int16_t* data, size_t n = 1); +bool DeSerialize(FILE* fp, int32_t* data, size_t n = 1); +bool DeSerialize(FILE* fp, uint8_t* data, size_t n = 1); +bool DeSerialize(FILE* fp, uint16_t* data, size_t n = 1); +bool DeSerialize(FILE* fp, uint32_t* data, size_t n = 1); + +// Serialize data to file. +bool Serialize(FILE* fp, const char* data, size_t n = 1); +bool Serialize(FILE* fp, const float* data, size_t n = 1); +bool Serialize(FILE* fp, const int8_t* data, size_t n = 1); +bool Serialize(FILE* fp, const int16_t* data, size_t n = 1); +bool Serialize(FILE* fp, const int32_t* data, size_t n = 1); +bool Serialize(FILE* fp, const uint8_t* data, size_t n = 1); +bool Serialize(FILE* fp, const uint16_t* data, size_t n = 1); +bool Serialize(FILE* fp, const uint32_t* data, size_t n = 1); + +// Simple file class. +// Allows for portable file input from memory and from foreign file systems. +class TFile { + public: + TFile(); + ~TFile(); + + // All the Open methods load the whole file into memory for reading. + // Opens a file with a supplied reader, or nullptr to use the default. + // Note that mixed read/write is not supported. + bool Open(const STRING& filename, FileReader reader); + // From an existing memory buffer. + bool Open(const char* data, int size); + // From an open file and an end offset. + bool Open(FILE* fp, int64_t end_offset); + // Sets the value of the swap flag, so that FReadEndian does the right thing. + void set_swap(bool value) { + swap_ = value; + } + + // Deserialize data. + bool DeSerialize(char* data, size_t count = 1); + bool DeSerialize(double* data, size_t count = 1); + bool DeSerialize(float* data, size_t count = 1); + bool DeSerialize(int8_t* data, size_t count = 1); + bool DeSerialize(int16_t* data, size_t count = 1); + bool DeSerialize(int32_t* data, size_t count = 1); + bool DeSerialize(int64_t* data, size_t count = 1); + bool DeSerialize(uint8_t* data, size_t count = 1); + bool DeSerialize(uint16_t* data, size_t count = 1); + bool DeSerialize(uint32_t* data, size_t count = 1); + bool DeSerialize(uint64_t* data, size_t count = 1); + + // Serialize data. + bool Serialize(const char* data, size_t count = 1); + bool Serialize(const double* data, size_t count = 1); + bool Serialize(const float* data, size_t count = 1); + bool Serialize(const int8_t* data, size_t count = 1); + bool Serialize(const int16_t* data, size_t count = 1); + bool Serialize(const int32_t* data, size_t count = 1); + bool Serialize(const int64_t* data, size_t count = 1); + bool Serialize(const uint8_t* data, size_t count = 1); + bool Serialize(const uint16_t* data, size_t count = 1); + bool Serialize(const uint32_t* data, size_t count = 1); + bool Serialize(const uint64_t* data, size_t count = 1); + + // Skip data. + bool Skip(size_t count); + + // Reads a line like fgets. Returns nullptr on EOF, otherwise buffer. + // Reads at most buffer_size bytes, including '\0' terminator, even if + // the line is longer. Does nothing if buffer_size <= 0. + char* FGets(char* buffer, int buffer_size); + // Replicates fread, followed by a swap of the bytes if needed, returning the + // number of items read. If swap_ is true then the count items will each have + // size bytes reversed. + int FReadEndian(void* buffer, size_t size, int count); + // Replicates fread, returning the number of items read. + int FRead(void* buffer, size_t size, int count); + // Resets the TFile as if it has been Opened, but nothing read. + // Only allowed while reading! + void Rewind(); + + // Open for writing. Either supply a non-nullptr data with OpenWrite before + // calling FWrite, (no close required), or supply a nullptr data to OpenWrite + // and call CloseWrite to write to a file after the FWrites. + void OpenWrite(GenericVector* data); + bool CloseWrite(const STRING& filename, FileWriter writer); + + // Replicates fwrite, returning the number of items written. + // To use fprintf, use snprintf and FWrite. + int FWrite(const void* buffer, size_t size, int count); + + private: + // The number of bytes used so far. + int offset_; + // The buffered data from the file. + GenericVector* data_; + // True if the data_ pointer is owned by *this. + bool data_is_owned_; + // True if the TFile is open for writing. + bool is_writing_; + // True if bytes need to be swapped in FReadEndian. + bool swap_; +}; + +} // namespace tesseract. + +#endif diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/strngs.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/strngs.h new file mode 100644 index 00000000..2f15064f --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/strngs.h @@ -0,0 +1,188 @@ +/********************************************************************** + * File: strngs.h (Formerly strings.h) + * Description: STRING class definition. + * Author: Ray Smith + * + * (C) Copyright 1991, Hewlett-Packard Ltd. + ** Licensed under the Apache License, Version 2.0 (the "License"); + ** you may not use this file except in compliance with the License. + ** You may obtain a copy of the License at + ** http://www.apache.org/licenses/LICENSE-2.0 + ** Unless required by applicable law or agreed to in writing, software + ** distributed under the License is distributed on an "AS IS" BASIS, + ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + ** See the License for the specific language governing permissions and + ** limitations under the License. + * + **********************************************************************/ + +#ifndef STRNGS_H +#define STRNGS_H + +#include // for assert +#include // for uint32_t +#include // for FILE +#include // for strncpy +#include "platform.h" // for TESS_API + +namespace tesseract { +class TFile; +} // namespace tesseract. + +// STRING_IS_PROTECTED means that string[index] = X is invalid +// because you have to go through strings interface to modify it. +// This allows the string to ensure internal integrity and maintain +// its own string length. Unfortunately this is not possible because +// STRINGS are used as direct-manipulation data buffers for things +// like length arrays and many places cast away the const on string() +// to mutate the string. Turning this off means that internally we +// cannot assume we know the strlen. +#define STRING_IS_PROTECTED 0 + +template +class GenericVector; + +class TESS_API STRING { + public: + STRING(); + STRING(const STRING& string); + STRING(const char* string); + STRING(const char* data, int length); + ~STRING(); + + // Writes to the given file. Returns false in case of error. + bool Serialize(FILE* fp) const; + // Reads from the given file. Returns false in case of error. + // If swap is true, assumes a big/little-endian swap is needed. + bool DeSerialize(bool swap, FILE* fp); + // Writes to the given file. Returns false in case of error. + bool Serialize(tesseract::TFile* fp) const; + // Reads from the given file. Returns false in case of error. + // If swap is true, assumes a big/little-endian swap is needed. + bool DeSerialize(tesseract::TFile* fp); + // As DeSerialize, but only seeks past the data - hence a static method. + static bool SkipDeSerialize(tesseract::TFile* fp); + + bool contains(char c) const; + int32_t length() const; + int32_t size() const { + return length(); + } + // Workaround to avoid g++ -Wsign-compare warnings. + uint32_t unsigned_size() const { + const int32_t len = length(); + assert(0 <= len); + return static_cast(len); + } + const char* string() const; + const char* c_str() const; + + inline char* strdup() const { + int32_t len = length() + 1; + return strncpy(new char[len], GetCStr(), len); + } + +#if STRING_IS_PROTECTED + const char& operator[](int32_t index) const; + // len is number of chars in s to insert starting at index in this string + void insert_range(int32_t index, const char* s, int len); + void erase_range(int32_t index, int len); +#else + char& operator[](int32_t index) const; +#endif + void split(char c, GenericVector* splited); + void truncate_at(int32_t index); + + bool operator==(const STRING& string) const; + bool operator!=(const STRING& string) const; + bool operator!=(const char* string) const; + + STRING& operator=(const char* string); + STRING& operator=(const STRING& string); + + STRING operator+(const STRING& string) const; + STRING operator+(char ch) const; + + STRING& operator+=(const char* string); + STRING& operator+=(const STRING& string); + STRING& operator+=(char ch); + + // Assignment for strings which are not null-terminated. + void assign(const char* cstr, int len); + + // Appends the given string and int (as a %d) to this. + // += cannot be used for ints as there as a char += operator that would + // be ambiguous, and ints usually need a string before or between them + // anyway. + void add_str_int(const char* str, int number); + // Appends the given string and double (as a %.8g) to this. + void add_str_double(const char* str, double number); + + // ensure capacity but keep pointer encapsulated + inline void ensure(int32_t min_capacity) { + ensure_cstr(min_capacity); + } + + private: + typedef struct STRING_HEADER { + // How much space was allocated in the string buffer for char data. + int capacity_; + + // used_ is how much of the capacity is currently being used, + // including a '\0' terminator. + // + // If used_ is 0 then string is nullptr (not even the '\0') + // else if used_ > 0 then it is strlen() + 1 (because it includes '\0') + // else strlen is >= 0 (not nullptr) but needs to be computed. + // this condition is set when encapsulation is violated because + // an API returned a mutable string. + // + // capacity_ - used_ = excess capacity that the string can grow + // without reallocating + mutable int used_; + } STRING_HEADER; + + // To preserve the behavior of the old serialization, we only have space + // for one pointer in this structure. So we are embedding a data structure + // at the start of the storage that will hold additional state variables, + // then storing the actual string contents immediately after. + STRING_HEADER* data_; + + // returns the header part of the storage + inline STRING_HEADER* GetHeader() { + return data_; + } + inline const STRING_HEADER* GetHeader() const { + return data_; + } + + // returns the string data part of storage + inline char* GetCStr() { + return (reinterpret_cast(data_)) + sizeof(STRING_HEADER); + } + + inline const char* GetCStr() const { + return (reinterpret_cast(data_)) + sizeof(STRING_HEADER); + } + inline bool InvariantOk() const { +#if STRING_IS_PROTECTED + return (GetHeader()->used_ == 0) + ? (string() == nullptr) + : (GetHeader()->used_ == (strlen(string()) + 1)); +#else + return true; +#endif + } + + // Ensure string has requested capacity as optimization + // to avoid unnecessary reallocations. + // The return value is a cstr buffer with at least requested capacity + char* ensure_cstr(int32_t min_capacity); + + void FixHeader() const; // make used_ non-negative, even if const + + char* AllocData(int used, int capacity); + void DiscardData(); +}; + +#endif diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tess_version.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tess_version.h new file mode 100644 index 00000000..9620eb5d --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tess_version.h @@ -0,0 +1,30 @@ +/////////////////////////////////////////////////////////////////////// +// File: version.h +// Description: Version information +// +// (C) Copyright 2018, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_API_VERSION_H_ +#define TESSERACT_API_VERSION_H_ + +#define TESSERACT_MAJOR_VERSION 4 +#define TESSERACT_MINOR_VERSION 1 +#define TESSERACT_MICRO_VERSION 1 +#define TESSERACT_VERSION \ + (TESSERACT_MAJOR_VERSION << 16 | \ + TESSERACT_MINOR_VERSION << 8 | \ + TESSERACT_MICRO_VERSION) +#define TESSERACT_VERSION_STR "4.1.1" + +#endif // TESSERACT_API_VERSION_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tesscallback.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tesscallback.h new file mode 100644 index 00000000..4cf19ae1 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/tesscallback.h @@ -0,0 +1,915 @@ +/////////////////////////////////////////////////////////////////////// +// File: tesscallback.h +// Description: classes and functions to replace pointer-to-functions +// Author: Samuel Charron +// +// (C) Copyright 2006, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESS_CALLBACK_SPECIALIZATIONS_H_ +#define TESS_CALLBACK_SPECIALIZATIONS_H_ + +class TessClosure { + public: + virtual ~TessClosure(); + virtual void Run() = 0; +}; + +template +class TessResultCallback { + public: + virtual ~TessResultCallback() = default; + virtual R Run() = 0; +}; + +template +class _TessMemberResultCallback_0_0 : public TessResultCallback { + public: + using base = TessResultCallback; + using MemberSignature = R (T::*)(); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_0(T* object, MemberSignature member) + : object_(object), member_(member) {} + + R Run() override { + if (!del) { + R result = (object_->*member_)(); + return result; + } + R result = (object_->*member_)(); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessMemberResultCallback_0_0 : public TessClosure { + public: + using base = TessClosure; + using MemberSignature = void (T::*)(); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_0(T* object, MemberSignature member) + : object_(object), member_(member) {} + + void Run() override { + if (!del) { + (object_->*member_)(); + } else { + (object_->*member_)(); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _TessMemberResultCallback_0_0::base* +NewTessCallback(T1* obj, R (T2::*member)()) { + return new _TessMemberResultCallback_0_0(obj, member); +} +#endif + +template +class _TessFunctionResultCallback_0_0 : public TessResultCallback { + public: + using base = TessResultCallback; + using FunctionSignature = R (*)(); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_0(FunctionSignature function) + : function_(function) {} + + virtual R Run() { + if (!del) { + R result = (*function_)(); + return result; + } + R result = (*function_)(); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessFunctionResultCallback_0_0 : public TessClosure { + public: + using base = TessClosure; + using FunctionSignature = void (*)(); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_0(FunctionSignature function) + : function_(function) {} + + void Run() override { + if (!del) { + (*function_)(); + } else { + (*function_)(); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + } + } +}; + +template +inline typename _TessFunctionResultCallback_0_0::base* NewTessCallback( + R (*function)()) { + return new _TessFunctionResultCallback_0_0(function); +} + +// Specified by TR1 [4.7.2] Reference modifications. +template +struct remove_reference; +template +struct remove_reference { + using type = T; +}; +template +struct remove_reference { + using type = T; +}; + +// Identity::type is a typedef of T. Useful for preventing the +// compiler from inferring the type of an argument in templates. +template +struct Identity { + using type = T; +}; + +template +class _ConstTessMemberResultCallback_5_0 : public TessResultCallback { + public: + using base = TessResultCallback; + using MemberSignature = R (T::*)(P1, P2, P3, P4, P5) const; + + private: + const T* object_; + MemberSignature member_; + typename remove_reference::type p1_; + typename remove_reference::type p2_; + typename remove_reference::type p3_; + typename remove_reference::type p4_; + typename remove_reference::type p5_; + + public: + inline _ConstTessMemberResultCallback_5_0(const T* object, + MemberSignature member, P1 p1, + P2 p2, P3 p3, P4 p4, P5 p5) + : object_(object), + member_(member), + p1_(p1), + p2_(p2), + p3_(p3), + p4_(p4), + p5_(p5) {} + + R Run() override { + if (!del) { + R result = (object_->*member_)(p1_, p2_, p3_, p4_, p5_); + return result; + } + R result = (object_->*member_)(p1_, p2_, p3_, p4_, p5_); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _ConstTessMemberResultCallback_5_0 + : public TessClosure { + public: + using base = TessClosure; + using MemberSignature = void (T::*)(P1, P2, P3, P4, P5) const; + + private: + const T* object_; + MemberSignature member_; + typename remove_reference::type p1_; + typename remove_reference::type p2_; + typename remove_reference::type p3_; + typename remove_reference::type p4_; + typename remove_reference::type p5_; + + public: + inline _ConstTessMemberResultCallback_5_0(const T* object, + MemberSignature member, P1 p1, + P2 p2, P3 p3, P4 p4, P5 p5) + : object_(object), + member_(member), + p1_(p1), + p2_(p2), + p3_(p3), + p4_(p4), + p5_(p5) {} + + void Run() override { + if (!del) { + (object_->*member_)(p1_, p2_, p3_, p4_, p5_); + } else { + (object_->*member_)(p1_, p2_, p3_, p4_, p5_); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _ConstTessMemberResultCallback_5_0::base* +NewPermanentTessCallback(const T1* obj, + R (T2::*member)(P1, P2, P3, P4, P5) const, + typename Identity::type p1, + typename Identity::type p2, + typename Identity::type p3, + typename Identity::type p4, + typename Identity::type p5) { + return new _ConstTessMemberResultCallback_5_0(obj, member, p1, p2, p3, p4, + p5); +} +#endif + +template +class _ConstTessMemberResultCallback_6_0 : public TessResultCallback { + public: + using base = TessResultCallback; + using MemberSignature = R (T::*)(P1, P2, P3, P4, P5, P6) const; + + private: + const T* object_; + MemberSignature member_; + typename remove_reference::type p1_; + typename remove_reference::type p2_; + typename remove_reference::type p3_; + typename remove_reference::type p4_; + typename remove_reference::type p5_; + typename remove_reference::type p6_; + + public: + inline _ConstTessMemberResultCallback_6_0(const T* object, + MemberSignature member, P1 p1, + P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) + : object_(object), + member_(member), + p1_(p1), + p2_(p2), + p3_(p3), + p4_(p4), + p5_(p5), + p6_(p6) {} + + R Run() override { + if (!del) { + R result = (object_->*member_)(p1_, p2_, p3_, p4_, p5_, p6_); + return result; + } + R result = (object_->*member_)(p1_, p2_, p3_, p4_, p5_, p6_); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _ConstTessMemberResultCallback_6_0 + : public TessClosure { + public: + using base = TessClosure; + using MemberSignature = void (T::*)(P1, P2, P3, P4, P5, P6) const; + + private: + const T* object_; + MemberSignature member_; + typename remove_reference::type p1_; + typename remove_reference::type p2_; + typename remove_reference::type p3_; + typename remove_reference::type p4_; + typename remove_reference::type p5_; + typename remove_reference::type p6_; + + public: + inline _ConstTessMemberResultCallback_6_0(const T* object, + MemberSignature member, P1 p1, + P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) + : object_(object), + member_(member), + p1_(p1), + p2_(p2), + p3_(p3), + p4_(p4), + p5_(p5), + p6_(p6) {} + + void Run() override { + if (!del) { + (object_->*member_)(p1_, p2_, p3_, p4_, p5_, p6_); + } else { + (object_->*member_)(p1_, p2_, p3_, p4_, p5_, p6_); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _ConstTessMemberResultCallback_6_0::base* +NewPermanentTessCallback( + const T1* obj, R (T2::*member)(P1, P2, P3, P4, P5, P6) const, + typename Identity::type p1, typename Identity::type p2, + typename Identity::type p3, typename Identity::type p4, + typename Identity::type p5, typename Identity::type p6) { + return new _ConstTessMemberResultCallback_6_0(obj, member, p1, p2, p3, + p4, p5, p6); +} +#endif + +template +class TessCallback1 { + public: + virtual ~TessCallback1() = default; + virtual void Run(A1) = 0; +}; + +template +class TessResultCallback1 { + public: + virtual ~TessResultCallback1() = default; + virtual R Run(A1) = 0; +}; + +template +class TessCallback2 { + public: + virtual ~TessCallback2() = default; + virtual void Run(A1, A2) = 0; +}; + +template +class TessResultCallback2 { + public: + virtual ~TessResultCallback2() = default; + virtual R Run(A1, A2) = 0; +}; + +template +class TessCallback3 { + public: + virtual ~TessCallback3() = default; + virtual void Run(A1, A2, A3) = 0; +}; + +template +class TessResultCallback3 { + public: + virtual ~TessResultCallback3() = default; + virtual R Run(A1, A2, A3) = 0; +}; + +template +class TessCallback4 { + public: + virtual ~TessCallback4() = default; + virtual void Run(A1, A2, A3, A4) = 0; +}; + +template +class TessResultCallback4 { + public: + virtual ~TessResultCallback4() = default; + virtual R Run(A1, A2, A3, A4) = 0; +}; + +template +class _TessMemberResultCallback_0_1 : public TessResultCallback1 { + public: + typedef TessResultCallback1 base; + using MemberSignature = R (T::*)(A1); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_1(T* object, MemberSignature member) + : object_(object), member_(member) {} + + R Run(A1 a1) override { + if (!del) { + R result = (object_->*member_)(a1); + return result; + } + R result = (object_->*member_)(a1); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessMemberResultCallback_0_1 + : public TessCallback1 { + public: + using base = TessCallback1; + using MemberSignature = void (T::*)(A1); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_1(T* object, MemberSignature member) + : object_(object), member_(member) {} + + void Run(A1 a1) override { + if (!del) { + (object_->*member_)(a1); + } else { + (object_->*member_)(a1); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _TessMemberResultCallback_0_1::base* +NewPermanentTessCallback(T1* obj, R (T2::*member)(A1)) { + return new _TessMemberResultCallback_0_1(obj, member); +} +#endif + +template +class _TessFunctionResultCallback_0_1 : public TessResultCallback1 { + public: + typedef TessResultCallback1 base; + using FunctionSignature = R (*)(A1); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_1(FunctionSignature function) + : function_(function) {} + + virtual R Run(A1 a1) { + if (!del) { + R result = (*function_)(a1); + return result; + } + R result = (*function_)(a1); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessFunctionResultCallback_0_1 + : public TessCallback1 { + public: + using base = TessCallback1; + using FunctionSignature = void (*)(A1); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_1(FunctionSignature function) + : function_(function) {} + + void Run(A1 a1) override { + if (!del) { + (*function_)(a1); + } else { + (*function_)(a1); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + } + } +}; + +template +inline typename _TessFunctionResultCallback_0_1::base* +NewPermanentTessCallback(R (*function)(A1)) { + return new _TessFunctionResultCallback_0_1(function); +} + +template +class _TessFunctionResultCallback_1_1 : public TessResultCallback1 { + public: + typedef TessResultCallback1 base; + using FunctionSignature = R (*)(P1, A1); + + private: + FunctionSignature function_; + typename remove_reference::type p1_; + + public: + inline _TessFunctionResultCallback_1_1(FunctionSignature function, P1 p1) + : function_(function), p1_(p1) {} + + virtual R Run(A1 a1) { + if (!del) { + R result = (*function_)(p1_, a1); + return result; + } + R result = (*function_)(p1_, a1); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessFunctionResultCallback_1_1 + : public TessCallback1 { + public: + using base = TessCallback1; + using FunctionSignature = void (*)(P1, A1); + + private: + FunctionSignature function_; + typename remove_reference::type p1_; + + public: + inline _TessFunctionResultCallback_1_1(FunctionSignature function, P1 p1) + : function_(function), p1_(p1) {} + + void Run(A1 a1) override { + if (!del) { + (*function_)(p1_, a1); + } else { + (*function_)(p1_, a1); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + } + } +}; + +template +inline typename _TessFunctionResultCallback_1_1::base* +NewPermanentTessCallback(R (*function)(P1, A1), + typename Identity::type p1) { + return new _TessFunctionResultCallback_1_1(function, p1); +} + +template +class _ConstTessMemberResultCallback_0_2 + : public TessResultCallback2 { + public: + typedef TessResultCallback2 base; + using MemberSignature = R (T::*)(A1, A2) const; + + private: + const T* object_; + MemberSignature member_; + + public: + inline _ConstTessMemberResultCallback_0_2(const T* object, + MemberSignature member) + : object_(object), member_(member) {} + + R Run(A1 a1, A2 a2) override { + if (!del) { + R result = (object_->*member_)(a1, a2); + return result; + } + R result = (object_->*member_)(a1, a2); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _ConstTessMemberResultCallback_0_2 + : public TessCallback2 { + public: + typedef TessCallback2 base; + using MemberSignature = void (T::*)(A1, A2) const; + + private: + const T* object_; + MemberSignature member_; + + public: + inline _ConstTessMemberResultCallback_0_2(const T* object, + MemberSignature member) + : object_(object), member_(member) {} + + virtual void Run(A1 a1, A2 a2) { + if (!del) { + (object_->*member_)(a1, a2); + } else { + (object_->*member_)(a1, a2); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _ConstTessMemberResultCallback_0_2::base* +NewPermanentTessCallback(const T1* obj, R (T2::*member)(A1, A2) const) { + return new _ConstTessMemberResultCallback_0_2(obj, + member); +} +#endif + +template +class _TessMemberResultCallback_0_2 : public TessResultCallback2 { + public: + typedef TessResultCallback2 base; + using MemberSignature = R (T::*)(A1, A2); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_2(T* object, MemberSignature member) + : object_(object), member_(member) {} + + R Run(A1 a1, A2 a2) override { + if (!del) { + R result = (object_->*member_)(a1, a2); + return result; + } + R result = (object_->*member_)(a1, a2); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessMemberResultCallback_0_2 + : public TessCallback2 { + public: + typedef TessCallback2 base; + using MemberSignature = void (T::*)(A1, A2); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_2(T* object, MemberSignature member) + : object_(object), member_(member) {} + + virtual void Run(A1 a1, A2 a2) { + if (!del) { + (object_->*member_)(a1, a2); + } else { + (object_->*member_)(a1, a2); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + } + } +}; + +#ifndef SWIG +template +inline typename _TessMemberResultCallback_0_2::base* +NewPermanentTessCallback(T1* obj, R (T2::*member)(A1, A2)) { + return new _TessMemberResultCallback_0_2(obj, member); +} +#endif + +template +class _TessFunctionResultCallback_0_2 : public TessResultCallback2 { + public: + typedef TessResultCallback2 base; + using FunctionSignature = R (*)(A1, A2); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_2(FunctionSignature function) + : function_(function) {} + + R Run(A1 a1, A2 a2) override { + if (!del) { + R result = (*function_)(a1, a2); + return result; + } + R result = (*function_)(a1, a2); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + return result; + } +}; + +template +class _TessFunctionResultCallback_0_2 + : public TessCallback2 { + public: + typedef TessCallback2 base; + using FunctionSignature = void (*)(A1, A2); + + private: + FunctionSignature function_; + + public: + inline explicit _TessFunctionResultCallback_0_2(FunctionSignature function) + : function_(function) {} + + virtual void Run(A1 a1, A2 a2) { + if (!del) { + (*function_)(a1, a2); + } else { + (*function_)(a1, a2); + // zero out the pointer to ensure segfault if used again + function_ = nullptr; + delete this; + } + } +}; + +template +inline typename _TessFunctionResultCallback_0_2::base* +NewPermanentTessCallback(R (*function)(A1, A2)) { + return new _TessFunctionResultCallback_0_2(function); +} + +template +class _TessMemberResultCallback_2_2 : public TessResultCallback2 { + public: + typedef TessResultCallback2 base; + using MemberSignature = R (T::*)(P1, P2, A1, A2); + + private: + T* object_; + MemberSignature member_; + typename remove_reference::type p1_; + typename remove_reference::type p2_; + + public: + inline _TessMemberResultCallback_2_2(T* object, MemberSignature member, P1 p1, + P2 p2) + : object_(object), member_(member), p1_(p1), p2_(p2) {} + + R Run(A1 a1, A2 a2) override { + if (!del) { + R result = (object_->*member_)(p1_, p2_, a1, a2); + return result; + } + R result = (object_->*member_)(p1_, p2_, a1, a2); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +#ifndef SWIG +template +inline + typename _TessMemberResultCallback_2_2::base* + NewPermanentTessCallback(T1* obj, R (T2::*member)(P1, P2, A1, A2), + typename Identity::type p1, + typename Identity::type p2) { + return new _TessMemberResultCallback_2_2( + obj, member, p1, p2); +} +#endif + +template +class _ConstTessMemberResultCallback_0_3 + : public TessResultCallback3 { + public: + typedef TessResultCallback3 base; + using MemberSignature = R (T::*)(A1, A2, A3) const; + + private: + const T* object_; + MemberSignature member_; + + public: + inline _ConstTessMemberResultCallback_0_3(const T* object, + MemberSignature member) + : object_(object), member_(member) {} + + R Run(A1 a1, A2 a2, A3 a3) override { + if (!del) { + R result = (object_->*member_)(a1, a2, a3); + return result; + } + R result = (object_->*member_)(a1, a2, a3); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +#ifndef SWIG +template +inline + typename _ConstTessMemberResultCallback_0_3::base* + NewPermanentTessCallback(const T1* obj, R (T2::*member)(A1, A2, A3) const) { + return new _ConstTessMemberResultCallback_0_3( + obj, member); +} +#endif + +template +class _TessMemberResultCallback_0_4 + : public TessResultCallback4 { + public: + typedef TessResultCallback4 base; + using MemberSignature = R (T::*)(A1, A2, A3, A4); + + private: + T* object_; + MemberSignature member_; + + public: + inline _TessMemberResultCallback_0_4(T* object, MemberSignature member) + : object_(object), member_(member) {} + + virtual R Run(A1 a1, A2 a2, A3 a3, A4 a4) { + if (!del) { + R result = (object_->*member_)(a1, a2, a3, a4); + return result; + } + R result = (object_->*member_)(a1, a2, a3, a4); + // zero out the pointer to ensure segfault if used again + member_ = nullptr; + delete this; + return result; + } +}; + +#ifndef SWIG +template +inline + typename _TessMemberResultCallback_0_4::base* + NewPermanentTessCallback(T1* obj, R (T2::*member)(A1, A2, A3, A4)) { + return new _TessMemberResultCallback_0_4( + obj, member); +} +#endif + +#endif // TESS_CALLBACK_SPECIALIZATIONS_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/thresholder.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/thresholder.h new file mode 100644 index 00000000..b63f51a2 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/thresholder.h @@ -0,0 +1,189 @@ +/////////////////////////////////////////////////////////////////////// +// File: thresholder.h +// Description: Base API for thresholding images in tesseract. +// Author: Ray Smith +// +// (C) Copyright 2008, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCMAIN_THRESHOLDER_H_ +#define TESSERACT_CCMAIN_THRESHOLDER_H_ + +#include "platform.h" +#include "publictypes.h" + +struct Pix; + +namespace tesseract { + +/// Base class for all tesseract image thresholding classes. +/// Specific classes can add new thresholding methods by +/// overriding ThresholdToPix. +/// Each instance deals with a single image, but the design is intended to +/// be useful for multiple calls to SetRectangle and ThresholdTo* if +/// desired. +class TESS_API ImageThresholder { + public: + ImageThresholder(); + virtual ~ImageThresholder(); + + /// Destroy the Pix if there is one, freeing memory. + virtual void Clear(); + + /// Return true if no image has been set. + bool IsEmpty() const; + + /// SetImage makes a copy of all the image data, so it may be deleted + /// immediately after this call. + /// Greyscale of 8 and color of 24 or 32 bits per pixel may be given. + /// Palette color images will not work properly and must be converted to + /// 24 bit. + /// Binary images of 1 bit per pixel may also be given but they must be + /// byte packed with the MSB of the first byte being the first pixel, and a + /// one pixel is WHITE. For binary images set bytes_per_pixel=0. + void SetImage(const unsigned char* imagedata, int width, int height, + int bytes_per_pixel, int bytes_per_line); + + /// Store the coordinates of the rectangle to process for later use. + /// Doesn't actually do any thresholding. + void SetRectangle(int left, int top, int width, int height); + + /// Get enough parameters to be able to rebuild bounding boxes in the + /// original image (not just within the rectangle). + /// Left and top are enough with top-down coordinates, but + /// the height of the rectangle and the image are needed for bottom-up. + virtual void GetImageSizes(int* left, int* top, int* width, int* height, + int* imagewidth, int* imageheight); + + /// Return true if the source image is color. + bool IsColor() const { + return pix_channels_ >= 3; + } + + /// Returns true if the source image is binary. + bool IsBinary() const { + return pix_channels_ == 0; + } + + int GetScaleFactor() const { + return scale_; + } + + // Set the resolution of the source image in pixels per inch. + // This should be called right after SetImage(), and will let us return + // appropriate font sizes for the text. + void SetSourceYResolution(int ppi) { + yres_ = ppi; + estimated_res_ = ppi; + } + int GetSourceYResolution() const { + return yres_; + } + int GetScaledYResolution() const { + return scale_ * yres_; + } + // Set the resolution of the source image in pixels per inch, as estimated + // by the thresholder from the text size found during thresholding. + // This value will be used to set internal size thresholds during recognition + // and will not influence the output "point size." The default value is + // the same as the source resolution. (yres_) + void SetEstimatedResolution(int ppi) { + estimated_res_ = ppi; + } + // Returns the estimated resolution, including any active scaling. + // This value will be used to set internal size thresholds during recognition. + int GetScaledEstimatedResolution() const { + return scale_ * estimated_res_; + } + + /// Pix vs raw, which to use? Pix is the preferred input for efficiency, + /// since raw buffers are copied. + /// SetImage for Pix clones its input, so the source pix may be pixDestroyed + /// immediately after, but may not go away until after the Thresholder has + /// finished with it. + void SetImage(const Pix* pix); + + /// Threshold the source image as efficiently as possible to the output Pix. + /// Creates a Pix and sets pix to point to the resulting pointer. + /// Caller must use pixDestroy to free the created Pix. + /// Returns false on error. + virtual bool ThresholdToPix(PageSegMode pageseg_mode, Pix** pix); + + // Gets a pix that contains an 8 bit threshold value at each pixel. The + // returned pix may be an integer reduction of the binary image such that + // the scale factor may be inferred from the ratio of the sizes, even down + // to the extreme of a 1x1 pixel thresholds image. + // Ideally the 8 bit threshold should be the exact threshold used to generate + // the binary image in ThresholdToPix, but this is not a hard constraint. + // Returns nullptr if the input is binary. PixDestroy after use. + virtual Pix* GetPixRectThresholds(); + + /// Get a clone/copy of the source image rectangle. + /// The returned Pix must be pixDestroyed. + /// This function will be used in the future by the page layout analysis, and + /// the layout analysis that uses it will only be available with Leptonica, + /// so there is no raw equivalent. + Pix* GetPixRect(); + + // Get a clone/copy of the source image rectangle, reduced to greyscale, + // and at the same resolution as the output binary. + // The returned Pix must be pixDestroyed. + // Provided to the classifier to extract features from the greyscale image. + virtual Pix* GetPixRectGrey(); + + protected: + // ---------------------------------------------------------------------- + // Utility functions that may be useful components for other thresholders. + + /// Common initialization shared between SetImage methods. + virtual void Init(); + + /// Return true if we are processing the full image. + bool IsFullImage() const { + return rect_left_ == 0 && rect_top_ == 0 && + rect_width_ == image_width_ && rect_height_ == image_height_; + } + + // Otsu thresholds the rectangle, taking the rectangle from *this. + void OtsuThresholdRectToPix(Pix* src_pix, Pix** out_pix) const; + + /// Threshold the rectangle, taking everything except the src_pix + /// from the class, using thresholds/hi_values to the output pix. + /// NOTE that num_channels is the size of the thresholds and hi_values + // arrays and also the bytes per pixel in src_pix. + void ThresholdRectToPix(Pix* src_pix, int num_channels, + const int* thresholds, const int* hi_values, + Pix** pix) const; + + protected: + /// Clone or other copy of the source Pix. + /// The pix will always be PixDestroy()ed on destruction of the class. + Pix* pix_; + + int image_width_; ///< Width of source pix_. + int image_height_; ///< Height of source pix_. + int pix_channels_; ///< Number of 8-bit channels in pix_. + int pix_wpl_; ///< Words per line of pix_. + // Limits of image rectangle to be processed. + int scale_; ///< Scale factor from original image. + int yres_; ///< y pixels/inch in source image. + int estimated_res_; ///< Resolution estimate from text size. + int rect_left_; + int rect_top_; + int rect_width_; + int rect_height_; +}; + +} // namespace tesseract. + +#endif // TESSERACT_CCMAIN_THRESHOLDER_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/unichar.h b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/unichar.h new file mode 100644 index 00000000..5daca950 --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/include/tesseract/unichar.h @@ -0,0 +1,176 @@ +/////////////////////////////////////////////////////////////////////// +// File: unichar.h +// Description: Unicode character/ligature class. +// Author: Ray Smith +// +// (C) Copyright 2006, Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +/////////////////////////////////////////////////////////////////////// + +#ifndef TESSERACT_CCUTIL_UNICHAR_H_ +#define TESSERACT_CCUTIL_UNICHAR_H_ + +#include +#include +#include +#include +#include "platform.h" + +// Maximum number of characters that can be stored in a UNICHAR. Must be +// at least 4. Must not exceed 31 without changing the coding of length. +#define UNICHAR_LEN 30 + +// TODO(rays) Move these to the tesseract namespace. +// A UNICHAR_ID is the unique id of a unichar. +using UNICHAR_ID = int; + +// A variable to indicate an invalid or uninitialized unichar id. +static const int INVALID_UNICHAR_ID = -1; +// A special unichar that corresponds to INVALID_UNICHAR_ID. +static const char INVALID_UNICHAR[] = "__INVALID_UNICHAR__"; + +enum StrongScriptDirection { + DIR_NEUTRAL = 0, // Text contains only neutral characters. + DIR_LEFT_TO_RIGHT = 1, // Text contains no Right-to-Left characters. + DIR_RIGHT_TO_LEFT = 2, // Text contains no Left-to-Right characters. + DIR_MIX = 3, // Text contains a mixture of left-to-right + // and right-to-left characters. +}; + +namespace tesseract { + +using char32 = signed int; + +// The UNICHAR class holds a single classification result. This may be +// a single Unicode character (stored as between 1 and 4 utf8 bytes) or +// multiple Unicode characters representing the NFKC expansion of a ligature +// such as fi, ffl etc. These are also stored as utf8. +class UNICHAR { + public: + UNICHAR() { + memset(chars, 0, UNICHAR_LEN); + } + + // Construct from a utf8 string. If len<0 then the string is null terminated. + // If the string is too long to fit in the UNICHAR then it takes only what + // will fit. + UNICHAR(const char* utf8_str, int len); + + // Construct from a single UCS4 character. + explicit UNICHAR(int unicode); + + // Default copy constructor and operator= are OK. + + // Get the first character as UCS-4. + int first_uni() const; + + // Get the length of the UTF8 string. + int utf8_len() const { + int len = chars[UNICHAR_LEN - 1]; + return len >= 0 && len < UNICHAR_LEN ? len : UNICHAR_LEN; + } + + // Get a UTF8 string, but NOT nullptr terminated. + const char* utf8() const { + return chars; + } + + // Get a terminated UTF8 string: Must delete[] it after use. + char* utf8_str() const; + + // Get the number of bytes in the first character of the given utf8 string. + static int utf8_step(const char* utf8_str); + + // A class to simplify iterating over and accessing elements of a UTF8 + // string. Note that unlike the UNICHAR class, const_iterator does NOT COPY or + // take ownership of the underlying byte array. It also does not permit + // modification of the array (as the name suggests). + // + // Example: + // for (UNICHAR::const_iterator it = UNICHAR::begin(str, str_len); + // it != UNICHAR::end(str, len); + // ++it) { + // tprintf("UCS-4 symbol code = %d\n", *it); + // char buf[5]; + // int char_len = it.get_utf8(buf); buf[char_len] = '\0'; + // tprintf("Char = %s\n", buf); + // } + class const_iterator { + using CI = const_iterator; + + public: + // Step to the next UTF8 character. + // If the current position is at an illegal UTF8 character, then print an + // error message and step by one byte. If the current position is at a + // nullptr value, don't step past it. + const_iterator& operator++(); + + // Return the UCS-4 value at the current position. + // If the current position is at an illegal UTF8 value, return a single + // space character. + int operator*() const; + + // Store the UTF-8 encoding of the current codepoint into buf, which must be + // at least 4 bytes long. Return the number of bytes written. + // If the current position is at an illegal UTF8 value, writes a single + // space character and returns 1. + // Note that this method does not null-terminate the buffer. + int get_utf8(char* buf) const; + // Returns the number of bytes of the current codepoint. Returns 1 if the + // current position is at an illegal UTF8 value. + int utf8_len() const; + // Returns true if the UTF-8 encoding at the current position is legal. + bool is_legal() const; + + // Return the pointer into the string at the current position. + const char* utf8_data() const { + return it_; + } + + // Iterator equality operators. + friend bool operator==(const CI& lhs, const CI& rhs) { + return lhs.it_ == rhs.it_; + } + friend bool operator!=(const CI& lhs, const CI& rhs) { + return !(lhs == rhs); + } + + private: + friend class UNICHAR; + explicit const_iterator(const char* it) : it_(it) {} + + const char* it_; // Pointer into the string. + }; + + // Create a start/end iterator pointing to a string. Note that these methods + // are static and do NOT create a copy or take ownership of the underlying + // array. + static const_iterator begin(const char* utf8_str, int byte_length); + static const_iterator end(const char* utf8_str, int byte_length); + + // Converts a utf-8 string to a vector of unicodes. + // Returns an empty vector if the input contains invalid UTF-8. + static std::vector UTF8ToUTF32(const char* utf8_str); + // Converts a vector of unicodes to a utf8 string. + // Returns an empty string if the input contains an invalid unicode. + static std::string UTF32ToUTF8(const std::vector& str32); + + private: + // A UTF-8 representation of 1 or more Unicode characters. + // The last element (chars[UNICHAR_LEN - 1]) is a length if + // its value < UNICHAR_LEN, otherwise it is a genuine character. + char chars[UNICHAR_LEN]{}; +}; + +} // namespace tesseract + +#endif // TESSERACT_CCUTIL_UNICHAR_H_ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/lib/libtesseract.a b/third_party/ocr/tesseract-ocr/uos/sw_64/lib/libtesseract.a new file mode 100644 index 00000000..1ba73f29 Binary files /dev/null and b/third_party/ocr/tesseract-ocr/uos/sw_64/lib/libtesseract.a differ diff --git a/third_party/ocr/tesseract-ocr/uos/sw_64/lib/pkgconfig/tesseract.pc b/third_party/ocr/tesseract-ocr/uos/sw_64/lib/pkgconfig/tesseract.pc new file mode 100644 index 00000000..bc40349b --- /dev/null +++ b/third_party/ocr/tesseract-ocr/uos/sw_64/lib/pkgconfig/tesseract.pc @@ -0,0 +1,12 @@ +prefix=/data/home/SW2023/sane/code_app/third_party/ocr/tesseract-ocr/tesseract-4.1.1/tesseract-4.1.1/build/release +exec_prefix=${prefix}/bin +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libtesseract +Description: An OCR Engine that was developed at HP Labs between 1985 and 1995... and now at Google. +URL: https://github.com/tesseract-ocr/tesseract +Version: 4.1.1 +Libs: -L${libdir} -ltesseract +Libs.private: +Cflags: -I${includedir} diff --git a/third_party/opencv/uos/sw_64/include/opencv/cv.h b/third_party/opencv/uos/sw_64/include/opencv/cv.h new file mode 100644 index 00000000..19a74e29 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cv.h @@ -0,0 +1,73 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_CV_H +#define OPENCV_OLD_CV_H + +#if defined(_MSC_VER) + #define CV_DO_PRAGMA(x) __pragma(x) + #define __CVSTR2__(x) #x + #define __CVSTR1__(x) __CVSTR2__(x) + #define __CVMSVCLOC__ __FILE__ "("__CVSTR1__(__LINE__)") : " + #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (__CVMSVCLOC__ _msg)) +#elif defined(__GNUC__) + #define CV_DO_PRAGMA(x) _Pragma (#x) + #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (_msg)) +#else + #define CV_DO_PRAGMA(x) + #define CV_MSG_PRAGMA(_msg) +#endif +#define CV_WARNING(x) CV_MSG_PRAGMA("Warning: " #x) + +//CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module") + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/photo/photo_c.h" +#include "opencv2/video/tracking_c.h" +#include "opencv2/objdetect/objdetect_c.h" + +#if !defined(CV_IMPL) +#define CV_IMPL extern "C" +#endif //CV_IMPL + +#endif // __OPENCV_OLD_CV_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv/cv.hpp b/third_party/opencv/uos/sw_64/include/opencv/cv.hpp new file mode 100644 index 00000000..86739564 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cv.hpp @@ -0,0 +1,60 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_CV_HPP +#define OPENCV_OLD_CV_HPP + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "cv.h" +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/photo.hpp" +#include "opencv2/video.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/calib3d.hpp" +#include "opencv2/objdetect.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cvaux.h b/third_party/opencv/uos/sw_64/include/opencv/cvaux.h new file mode 100644 index 00000000..c0367cc2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cvaux.h @@ -0,0 +1,57 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_AUX_H +#define OPENCV_OLD_AUX_H + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/photo/photo_c.h" +#include "opencv2/video/tracking_c.h" +#include "opencv2/objdetect/objdetect_c.h" + +#endif + +/* End of file. */ diff --git a/third_party/opencv/uos/sw_64/include/opencv/cvaux.hpp b/third_party/opencv/uos/sw_64/include/opencv/cvaux.hpp new file mode 100644 index 00000000..4888eef2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cvaux.hpp @@ -0,0 +1,52 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_AUX_HPP +#define OPENCV_OLD_AUX_HPP + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "cvaux.h" +#include "opencv2/core/utility.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cvwimage.h b/third_party/opencv/uos/sw_64/include/opencv/cvwimage.h new file mode 100644 index 00000000..ec0ab141 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cvwimage.h @@ -0,0 +1,46 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +#ifndef OPENCV_OLD_WIMAGE_HPP +#define OPENCV_OLD_WIMAGE_HPP + +#include "opencv2/core/wimage.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cxcore.h b/third_party/opencv/uos/sw_64/include/opencv/cxcore.h new file mode 100644 index 00000000..dc070c77 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cxcore.h @@ -0,0 +1,52 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_CXCORE_H +#define OPENCV_OLD_CXCORE_H + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "opencv2/core/core_c.h" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cxcore.hpp b/third_party/opencv/uos/sw_64/include/opencv/cxcore.hpp new file mode 100644 index 00000000..c371677c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cxcore.hpp @@ -0,0 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_CXCORE_HPP +#define OPENCV_OLD_CXCORE_HPP + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "cxcore.h" +#include "opencv2/core.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cxeigen.hpp b/third_party/opencv/uos/sw_64/include/opencv/cxeigen.hpp new file mode 100644 index 00000000..1d3df914 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cxeigen.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_EIGEN_HPP +#define OPENCV_OLD_EIGEN_HPP + +#include "opencv2/core/eigen.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/cxmisc.h b/third_party/opencv/uos/sw_64/include/opencv/cxmisc.h new file mode 100644 index 00000000..9b9bc820 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/cxmisc.h @@ -0,0 +1,8 @@ +#ifndef OPENCV_OLD_CXMISC_H +#define OPENCV_OLD_CXMISC_H + +#ifdef __cplusplus +# include "opencv2/core/utility.hpp" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/highgui.h b/third_party/opencv/uos/sw_64/include/opencv/highgui.h new file mode 100644 index 00000000..69b394e0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/highgui.h @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_HIGHGUI_H +#define OPENCV_OLD_HIGHGUI_H + +#include "opencv2/core/core_c.h" +#include "opencv2/highgui/highgui_c.h" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv/ml.h b/third_party/opencv/uos/sw_64/include/opencv/ml.h new file mode 100644 index 00000000..0c376bac --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv/ml.h @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OLD_ML_H +#define OPENCV_OLD_ML_H + +#include "opencv2/core/core_c.h" +#include "opencv2/ml.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/calib3d.hpp b/third_party/opencv/uos/sw_64/include/opencv2/calib3d.hpp new file mode 100644 index 00000000..1d2e2a96 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/calib3d.hpp @@ -0,0 +1,3270 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CALIB3D_HPP +#define OPENCV_CALIB3D_HPP + +#include "opencv2/core.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/core/affine.hpp" + +/** + @defgroup calib3d Camera Calibration and 3D Reconstruction + +The functions in this section use a so-called pinhole camera model. The view of a scene +is obtained by projecting a scene's 3D point \f$P_w\f$ into the image plane using a perspective +transformation which forms the corresponding pixel \f$p\f$. Both \f$P_w\f$ and \f$p\f$ are +represented in homogeneous coordinates, i.e. as 3D and 2D homogeneous vector respectively. You will +find a brief introduction to projective geometry, homogeneous vectors and homogeneous +transformations at the end of this section's introduction. For more succinct notation, we often drop +the 'homogeneous' and say vector instead of homogeneous vector. + +The distortion-free projective transformation given by a pinhole camera model is shown below. + +\f[s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w,\f] + +where \f$P_w\f$ is a 3D point expressed with respect to the world coordinate system, +\f$p\f$ is a 2D pixel in the image plane, \f$A\f$ is the camera intrinsic matrix, +\f$R\f$ and \f$t\f$ are the rotation and translation that describe the change of coordinates from +world to camera coordinate systems (or camera frame) and \f$s\f$ is the projective transformation's +arbitrary scaling and not part of the camera model. + +The camera intrinsic matrix \f$A\f$ (notation used as in @cite Zhang2000 and also generally notated +as \f$K\f$) projects 3D points given in the camera coordinate system to 2D pixel coordinates, i.e. + +\f[p = A P_c.\f] + +The camera intrinsic matrix \f$A\f$ is composed of the focal lengths \f$f_x\f$ and \f$f_y\f$, which are +expressed in pixel units, and the principal point \f$(c_x, c_y)\f$, that is usually close to the +image center: + +\f[A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1},\f] + +and thus + +\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} \vecthree{X_c}{Y_c}{Z_c}.\f] + +The matrix of intrinsic parameters does not depend on the scene viewed. So, once estimated, it can +be re-used as long as the focal length is fixed (in case of a zoom lens). Thus, if an image from the +camera is scaled by a factor, all of these parameters need to be scaled (multiplied/divided, +respectively) by the same factor. + +The joint rotation-translation matrix \f$[R|t]\f$ is the matrix product of a projective +transformation and a homogeneous transformation. The 3-by-4 projective transformation maps 3D points +represented in camera coordinates to 2D points in the image plane and represented in normalized +camera coordinates \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$: + +\f[Z_c \begin{bmatrix} +x' \\ +y' \\ +1 +\end{bmatrix} = \begin{bmatrix} +1 & 0 & 0 & 0 \\ +0 & 1 & 0 & 0 \\ +0 & 0 & 1 & 0 +\end{bmatrix} +\begin{bmatrix} +X_c \\ +Y_c \\ +Z_c \\ +1 +\end{bmatrix}.\f] + +The homogeneous transformation is encoded by the extrinsic parameters \f$R\f$ and \f$t\f$ and +represents the change of basis from world coordinate system \f$w\f$ to the camera coordinate sytem +\f$c\f$. Thus, given the representation of the point \f$P\f$ in world coordinates, \f$P_w\f$, we +obtain \f$P\f$'s representation in the camera coordinate system, \f$P_c\f$, by + +\f[P_c = \begin{bmatrix} +R & t \\ +0 & 1 +\end{bmatrix} P_w,\f] + +This homogeneous transformation is composed out of \f$R\f$, a 3-by-3 rotation matrix, and \f$t\f$, a +3-by-1 translation vector: + +\f[\begin{bmatrix} +R & t \\ +0 & 1 +\end{bmatrix} = \begin{bmatrix} +r_{11} & r_{12} & r_{13} & t_x \\ +r_{21} & r_{22} & r_{23} & t_y \\ +r_{31} & r_{32} & r_{33} & t_z \\ +0 & 0 & 0 & 1 +\end{bmatrix}, +\f] + +and therefore + +\f[\begin{bmatrix} +X_c \\ +Y_c \\ +Z_c \\ +1 +\end{bmatrix} = \begin{bmatrix} +r_{11} & r_{12} & r_{13} & t_x \\ +r_{21} & r_{22} & r_{23} & t_y \\ +r_{31} & r_{32} & r_{33} & t_z \\ +0 & 0 & 0 & 1 +\end{bmatrix} +\begin{bmatrix} +X_w \\ +Y_w \\ +Z_w \\ +1 +\end{bmatrix}.\f] + +Combining the projective transformation and the homogeneous transformation, we obtain the projective +transformation that maps 3D points in world coordinates into 2D points in the image plane and in +normalized camera coordinates: + +\f[Z_c \begin{bmatrix} +x' \\ +y' \\ +1 +\end{bmatrix} = \begin{bmatrix} R|t \end{bmatrix} \begin{bmatrix} +X_w \\ +Y_w \\ +Z_w \\ +1 +\end{bmatrix} = \begin{bmatrix} +r_{11} & r_{12} & r_{13} & t_x \\ +r_{21} & r_{22} & r_{23} & t_y \\ +r_{31} & r_{32} & r_{33} & t_z +\end{bmatrix} +\begin{bmatrix} +X_w \\ +Y_w \\ +Z_w \\ +1 +\end{bmatrix},\f] + +with \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$. Putting the equations for instrincs and extrinsics together, we can write out +\f$s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w\f$ as + +\f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} +\begin{bmatrix} +r_{11} & r_{12} & r_{13} & t_x \\ +r_{21} & r_{22} & r_{23} & t_y \\ +r_{31} & r_{32} & r_{33} & t_z +\end{bmatrix} +\begin{bmatrix} +X_w \\ +Y_w \\ +Z_w \\ +1 +\end{bmatrix}.\f] + +If \f$Z_c \ne 0\f$, the transformation above is equivalent to the following, + +\f[\begin{bmatrix} +u \\ +v +\end{bmatrix} = \begin{bmatrix} +f_x X_c/Z_c + c_x \\ +f_y Y_c/Z_c + c_y +\end{bmatrix}\f] + +with + +\f[\vecthree{X_c}{Y_c}{Z_c} = \begin{bmatrix} +R|t +\end{bmatrix} \begin{bmatrix} +X_w \\ +Y_w \\ +Z_w \\ +1 +\end{bmatrix}.\f] + +The following figure illustrates the pinhole camera model. + +![Pinhole camera model](pics/pinhole_camera_model.png) + +Real lenses usually have some distortion, mostly radial distortion, and slight tangential distortion. +So, the above model is extended as: + +\f[\begin{bmatrix} +u \\ +v +\end{bmatrix} = \begin{bmatrix} +f_x x'' + c_x \\ +f_y y'' + c_y +\end{bmatrix}\f] + +where + +\f[\begin{bmatrix} +x'' \\ +y'' +\end{bmatrix} = \begin{bmatrix} +x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\ +y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ +\end{bmatrix}\f] + +with + +\f[r^2 = x'^2 + y'^2\f] + +and + +\f[\begin{bmatrix} +x'\\ +y' +\end{bmatrix} = \begin{bmatrix} +X_c/Z_c \\ +Y_c/Z_c +\end{bmatrix},\f] + +if \f$Z_c \ne 0\f$. + +The distortion parameters are the radial coefficients \f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$ +,\f$p_1\f$ and \f$p_2\f$ are the tangential distortion coefficients, and \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, +are the thin prism distortion coefficients. Higher-order coefficients are not considered in OpenCV. + +The next figures show two common types of radial distortion: barrel distortion +(\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing) +and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing). +Radial distortion is always monotonic for real lenses, +and if the estimator produces a non-monotonic result, +this should be considered a calibration failure. +More generally, radial distortion must be monotonic and the distortion function must be bijective. +A failed estimation result may look deceptively good near the image center +but will work poorly in e.g. AR/SFM applications. +The optimization method used in OpenCV camera calibration does not include these constraints as +the framework does not support the required integer programming and polynomial inequalities. +See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information. + +![](pics/distortion_examples.png) +![](pics/distortion_examples2.png) + +In some cases, the image sensor may be tilted in order to focus an oblique plane in front of the +camera (Scheimpflug principle). This can be useful for particle image velocimetry (PIV) or +triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and +\f$y''\f$. This distortion can be modeled in the following way, see e.g. @cite Louhichi07. + +\f[\begin{bmatrix} +u \\ +v +\end{bmatrix} = \begin{bmatrix} +f_x x''' + c_x \\ +f_y y''' + c_y +\end{bmatrix},\f] + +where + +\f[s\vecthree{x'''}{y'''}{1} = +\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}(\tau_x, \tau_y)} +{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} +{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\f] + +and the matrix \f$R(\tau_x, \tau_y)\f$ is defined by two rotations with angular parameter +\f$\tau_x\f$ and \f$\tau_y\f$, respectively, + +\f[ +R(\tau_x, \tau_y) = +\vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)} +\vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} = +\vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)} +{0}{\cos(\tau_x)}{\sin(\tau_x)} +{\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}. +\f] + +In the functions below the coefficients are passed or returned as + +\f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f] + +vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion +coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera +parameters. And they remain the same regardless of the captured image resolution. If, for example, a +camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion +coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$, +\f$c_x\f$, and \f$c_y\f$ need to be scaled appropriately. + +The functions below use the above model to do the following: + +- Project 3D points to the image plane given intrinsic and extrinsic parameters. +- Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their +projections. +- Estimate intrinsic and extrinsic camera parameters from several views of a known calibration +pattern (every view is described by several 3D-2D point correspondences). +- Estimate the relative position and orientation of the stereo camera "heads" and compute the +*rectification* transformation that makes the camera optical axes parallel. + + Homogeneous Coordinates
+Homogeneous Coordinates are a system of coordinates that are used in projective geometry. Their use +allows to represent points at infinity by finite coordinates and simplifies formulas when compared +to the cartesian counterparts, e.g. they have the advantage that affine transformations can be +expressed as linear homogeneous transformation. + +One obtains the homogeneous vector \f$P_h\f$ by appending a 1 along an n-dimensional cartesian +vector \f$P\f$ e.g. for a 3D cartesian vector the mapping \f$P \rightarrow P_h\f$ is: + +\f[\begin{bmatrix} +X \\ +Y \\ +Z +\end{bmatrix} \rightarrow \begin{bmatrix} +X \\ +Y \\ +Z \\ +1 +\end{bmatrix}.\f] + +For the inverse mapping \f$P_h \rightarrow P\f$, one divides all elements of the homogeneous vector +by its last element, e.g. for a 3D homogeneous vector one gets its 2D cartesian counterpart by: + +\f[\begin{bmatrix} +X \\ +Y \\ +W +\end{bmatrix} \rightarrow \begin{bmatrix} +X / W \\ +Y / W +\end{bmatrix},\f] + +if \f$W \ne 0\f$. + +Due to this mapping, all multiples \f$k P_h\f$, for \f$k \ne 0\f$, of a homogeneous point represent +the same point \f$P_h\f$. An intuitive understanding of this property is that under a projective +transformation, all multiples of \f$P_h\f$ are mapped to the same point. This is the physical +observation one does for pinhole cameras, as all points along a ray through the camera's pinhole are +projected to the same image point, e.g. all points along the red ray in the image of the pinhole +camera model above would be mapped to the same image coordinate. This property is also the source +for the scale ambiguity s in the equation of the pinhole camera model. + +As mentioned, by using homogeneous coordinates we can express any change of basis parameterized by +\f$R\f$ and \f$t\f$ as a linear transformation, e.g. for the change of basis from coordinate system +0 to coordinate system 1 becomes: + +\f[P_1 = R P_0 + t \rightarrow P_{h_1} = \begin{bmatrix} +R & t \\ +0 & 1 +\end{bmatrix} P_{h_0}.\f] + +@note + - Many functions in this module take a camera intrinsic matrix as an input parameter. Although all + functions assume the same structure of this parameter, they may name it differently. The + parameter's description, however, will be clear in that a camera intrinsic matrix with the structure + shown above is required. + - A calibration sample for 3 cameras in a horizontal position can be found at + opencv_source_code/samples/cpp/3calibration.cpp + - A calibration sample based on a sequence of images can be found at + opencv_source_code/samples/cpp/calibration.cpp + - A calibration sample in order to do 3D reconstruction can be found at + opencv_source_code/samples/cpp/build3dmodel.cpp + - A calibration example on stereo calibration can be found at + opencv_source_code/samples/cpp/stereo_calib.cpp + - A calibration example on stereo matching can be found at + opencv_source_code/samples/cpp/stereo_match.cpp + - (Python) A camera calibration sample can be found at + opencv_source_code/samples/python/calibrate.py + + @{ + @defgroup calib3d_fisheye Fisheye camera model + + Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the + matrix X) The coordinate vector of P in the camera reference frame is: + + \f[Xc = R X + T\f] + + where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y + and z the 3 coordinates of Xc: + + \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f] + + The pinhole projection coordinates of P is [a; b] where + + \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f] + + Fisheye distortion: + + \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f] + + The distorted point coordinates are [x'; y'] where + + \f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f] + + Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where: + + \f[u = f_x (x' + \alpha y') + c_x \\ + v = f_y y' + c_y\f] + + @defgroup calib3d_c C API + + @} + */ + +namespace cv +{ + +//! @addtogroup calib3d +//! @{ + +//! type of the robust estimation algorithm +enum { LMEDS = 4, //!< least-median of squares algorithm + RANSAC = 8, //!< RANSAC algorithm + RHO = 16 //!< RHO algorithm + }; + +enum SolvePnPMethod { + SOLVEPNP_ITERATIVE = 0, + SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp + SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete + SOLVEPNP_DLS = 3, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n + //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct + SOLVEPNP_UPNP = 4, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n + //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive + SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17 + SOLVEPNP_IPPE = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n + //!< Object points must be coplanar. + SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n + //!< This is a special case suitable for marker pose estimation.\n + //!< 4 coplanar object points must be defined in the following order: + //!< - point 0: [-squareLength / 2, squareLength / 2, 0] + //!< - point 1: [ squareLength / 2, squareLength / 2, 0] + //!< - point 2: [ squareLength / 2, -squareLength / 2, 0] + //!< - point 3: [-squareLength / 2, -squareLength / 2, 0] + SOLVEPNP_SQPNP = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis20 +#ifndef CV_DOXYGEN + SOLVEPNP_MAX_COUNT //!< Used for count +#endif +}; + +enum { CALIB_CB_ADAPTIVE_THRESH = 1, + CALIB_CB_NORMALIZE_IMAGE = 2, + CALIB_CB_FILTER_QUADS = 4, + CALIB_CB_FAST_CHECK = 8 + }; + +enum { CALIB_CB_SYMMETRIC_GRID = 1, + CALIB_CB_ASYMMETRIC_GRID = 2, + CALIB_CB_CLUSTERING = 4 + }; + +enum { CALIB_USE_INTRINSIC_GUESS = 0x00001, + CALIB_FIX_ASPECT_RATIO = 0x00002, + CALIB_FIX_PRINCIPAL_POINT = 0x00004, + CALIB_ZERO_TANGENT_DIST = 0x00008, + CALIB_FIX_FOCAL_LENGTH = 0x00010, + CALIB_FIX_K1 = 0x00020, + CALIB_FIX_K2 = 0x00040, + CALIB_FIX_K3 = 0x00080, + CALIB_FIX_K4 = 0x00800, + CALIB_FIX_K5 = 0x01000, + CALIB_FIX_K6 = 0x02000, + CALIB_RATIONAL_MODEL = 0x04000, + CALIB_THIN_PRISM_MODEL = 0x08000, + CALIB_FIX_S1_S2_S3_S4 = 0x10000, + CALIB_TILTED_MODEL = 0x40000, + CALIB_FIX_TAUX_TAUY = 0x80000, + CALIB_USE_QR = 0x100000, //!< use QR instead of SVD decomposition for solving. Faster but potentially less precise + CALIB_FIX_TANGENT_DIST = 0x200000, + // only for stereo + CALIB_FIX_INTRINSIC = 0x00100, + CALIB_SAME_FOCAL_LENGTH = 0x00200, + // for stereo rectification + CALIB_ZERO_DISPARITY = 0x00400, + CALIB_USE_LU = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise + CALIB_USE_EXTRINSIC_GUESS = (1 << 22) //!< for stereoCalibrate + }; + +//! the algorithm for finding fundamental matrix +enum { FM_7POINT = 1, //!< 7-point algorithm + FM_8POINT = 2, //!< 8-point algorithm + FM_LMEDS = 4, //!< least-median algorithm. 7-point algorithm is used. + FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used. + }; + +enum HandEyeCalibrationMethod +{ + CALIB_HAND_EYE_TSAI = 0, //!< A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/Eye Calibration @cite Tsai89 + CALIB_HAND_EYE_PARK = 1, //!< Robot Sensor Calibration: Solving AX = XB on the Euclidean Group @cite Park94 + CALIB_HAND_EYE_HORAUD = 2, //!< Hand-eye Calibration @cite Horaud95 + CALIB_HAND_EYE_ANDREFF = 3, //!< On-line Hand-Eye Calibration @cite Andreff99 + CALIB_HAND_EYE_DANIILIDIS = 4 //!< Hand-Eye Calibration Using Dual Quaternions @cite Daniilidis98 +}; + + +/** @brief Converts a rotation matrix to a rotation vector or vice versa. + +@param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). +@param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively. +@param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial +derivatives of the output array components with respect to the input array components. + +\f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f] + +Inverse transformation can be also done easily, since + +\f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f] + +A rotation vector is a convenient and most compact representation of a rotation matrix (since any +rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry +optimization procedures like @ref calibrateCamera, @ref stereoCalibrate, or @ref solvePnP . + +@note More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate +can be found in: + - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi @cite Gallego2014ACF + +@note Useful information on SE(3) and Lie Groups can be found in: + - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco @cite blanco2010tutorial + - Lie Groups for 2D and 3D Transformation, Ethan Eade @cite Eade17 + - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan @cite Sol2018AML + */ +CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() ); + +/** @example samples/cpp/tutorial_code/features2D/Homography/pose_from_homography.cpp +An example program about pose estimation from coplanar points + +Check @ref tutorial_homography "the corresponding tutorial" for more details +*/ + +/** @brief Finds a perspective transformation between two planes. + +@param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 +or vector\ . +@param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or +a vector\ . +@param method Method used to compute a homography matrix. The following methods are possible: +- **0** - a regular method using all the points, i.e., the least squares method +- @ref RANSAC - RANSAC-based robust method +- @ref LMEDS - Least-Median robust method +- @ref RHO - PROSAC-based robust method +@param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier +(used in the RANSAC and RHO methods only). That is, if +\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f] +then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels, +it usually makes sense to set this parameter somewhere in the range of 1 to 10. +@param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input +mask values are ignored. +@param maxIters The maximum number of RANSAC iterations. +@param confidence Confidence level, between 0 and 1. + +The function finds and returns the perspective transformation \f$H\f$ between the source and the +destination planes: + +\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] + +so that the back-projection error + +\f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f] + +is minimized. If the parameter method is set to the default value 0, the function uses all the point +pairs to compute an initial homography estimate with a simple least-squares scheme. + +However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective +transformation (that is, there are some outliers), this initial estimate will be poor. In this case, +you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different +random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix +using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the +computed homography (which is the number of inliers for RANSAC or the least median re-projection error for +LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and +the mask of inliers/outliers. + +Regardless of the method, robust or not, the computed homography matrix is refined further (using +inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the +re-projection error even more. + +The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to +distinguish inliers from outliers. The method LMeDS does not need any threshold but it works +correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the +noise is rather small, use the default method (method=0). + +The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is +determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an \f$H\f$ matrix +cannot be estimated, an empty one will be returned. + +@sa +getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, +perspectiveTransform + */ +CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints, + int method = 0, double ransacReprojThreshold = 3, + OutputArray mask=noArray(), const int maxIters = 2000, + const double confidence = 0.995); + +/** @overload */ +CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints, + OutputArray mask, int method = 0, double ransacReprojThreshold = 3 ); + +/** @brief Computes an RQ decomposition of 3x3 matrices. + +@param src 3x3 input matrix. +@param mtxR Output 3x3 upper-triangular matrix. +@param mtxQ Output 3x3 orthogonal matrix. +@param Qx Optional output 3x3 rotation matrix around x-axis. +@param Qy Optional output 3x3 rotation matrix around y-axis. +@param Qz Optional output 3x3 rotation matrix around z-axis. + +The function computes a RQ decomposition using the given rotations. This function is used in +decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera +and a rotation matrix. + +It optionally returns three rotation matrices, one for each axis, and the three Euler angles in +degrees (as the return value) that could be used in OpenGL. Note, there is always more than one +sequence of rotations about the three principal axes that results in the same orientation of an +object, e.g. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles +are only one of the possible solutions. + */ +CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, + OutputArray Qx = noArray(), + OutputArray Qy = noArray(), + OutputArray Qz = noArray()); + +/** @brief Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. + +@param projMatrix 3x4 input projection matrix P. +@param cameraMatrix Output 3x3 camera intrinsic matrix \f$\cameramatrix{A}\f$. +@param rotMatrix Output 3x3 external rotation matrix R. +@param transVect Output 4x1 translation vector T. +@param rotMatrixX Optional 3x3 rotation matrix around x-axis. +@param rotMatrixY Optional 3x3 rotation matrix around y-axis. +@param rotMatrixZ Optional 3x3 rotation matrix around z-axis. +@param eulerAngles Optional three-element vector containing three Euler angles of rotation in +degrees. + +The function computes a decomposition of a projection matrix into a calibration and a rotation +matrix and the position of a camera. + +It optionally returns three rotation matrices, one for each axis, and three Euler angles that could +be used in OpenGL. Note, there is always more than one sequence of rotations about the three +principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned +tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. + +The function is based on RQDecomp3x3 . + */ +CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, + OutputArray rotMatrix, OutputArray transVect, + OutputArray rotMatrixX = noArray(), + OutputArray rotMatrixY = noArray(), + OutputArray rotMatrixZ = noArray(), + OutputArray eulerAngles =noArray() ); + +/** @brief Computes partial derivatives of the matrix product for each multiplied matrix. + +@param A First multiplied matrix. +@param B Second multiplied matrix. +@param dABdA First output derivative matrix d(A\*B)/dA of size +\f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ . +@param dABdB Second output derivative matrix d(A\*B)/dB of size +\f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ . + +The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to +the elements of each of the two input matrices. The function is used to compute the Jacobian +matrices in stereoCalibrate but can also be used in any other similar optimization function. + */ +CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB ); + +/** @brief Combines two rotation-and-shift transformations. + +@param rvec1 First rotation vector. +@param tvec1 First translation vector. +@param rvec2 Second rotation vector. +@param tvec2 Second translation vector. +@param rvec3 Output rotation vector of the superposition. +@param tvec3 Output translation vector of the superposition. +@param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 +@param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 +@param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 +@param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 +@param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 +@param dt3dt1 Optional output derivative of tvec3 with regard to tvec1 +@param dt3dr2 Optional output derivative of tvec3 with regard to rvec2 +@param dt3dt2 Optional output derivative of tvec3 with regard to tvec2 + +The functions compute: + +\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f] + +where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and +\f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details. + +Also, the functions can compute the derivatives of the output vectors with regards to the input +vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in +your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a +function that contains a matrix multiplication. + */ +CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1, + InputArray rvec2, InputArray tvec2, + OutputArray rvec3, OutputArray tvec3, + OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(), + OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(), + OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(), + OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() ); + +/** @brief Projects 3D points to an image plane. + +@param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3 +1-channel or 1xN/Nx1 3-channel (or vector\ ), where N is the number of points in the view. +@param rvec The rotation vector (@ref Rodrigues) that, together with tvec, performs a change of +basis from world to camera coordinate system, see @ref calibrateCamera for details. +@param tvec The translation vector, see parameter description above. +@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$ . If the vector is empty, the zero distortion coefficients are assumed. +@param imagePoints Output array of image points, 1xN/Nx1 2-channel, or +vector\ . +@param jacobian Optional output 2Nx(10+\) jacobian matrix of derivatives of image +points with respect to components of the rotation vector, translation vector, focal lengths, +coordinates of the principal point and the distortion coefficients. In the old interface different +components of the jacobian are returned via different output parameters. +@param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the +function assumes that the aspect ratio (\f$f_x / f_y\f$) is fixed and correspondingly adjusts the +jacobian matrix. + +The function computes the 2D projections of 3D points to the image plane, given intrinsic and +extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial +derivatives of image points coordinates (as functions of all the input parameters) with respect to +the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global +optimization in @ref calibrateCamera, @ref solvePnP, and @ref stereoCalibrate. The function itself +can also be used to compute a re-projection error, given the current intrinsic and extrinsic +parameters. + +@note By setting rvec = tvec = \f$[0, 0, 0]\f$, or by setting cameraMatrix to a 3x3 identity matrix, +or by passing zero distortion coefficients, one can get various useful partial cases of the +function. This means, one can compute the distorted coordinates for a sparse set of points or apply +a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup. + */ +CV_EXPORTS_W void projectPoints( InputArray objectPoints, + InputArray rvec, InputArray tvec, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray imagePoints, + OutputArray jacobian = noArray(), + double aspectRatio = 0 ); + +/** @example samples/cpp/tutorial_code/features2D/Homography/homography_from_camera_displacement.cpp +An example program about homography from the camera displacement + +Check @ref tutorial_homography "the corresponding tutorial" for more details +*/ + +/** @brief Finds an object pose from 3D-2D point correspondences. +This function returns the rotation and the translation vectors that transform a 3D point expressed in the object +coordinate frame to the camera coordinate frame, using different methods: +- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution. +- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. +- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. +Number of input points must be 4. Object points must be defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] +- for all the other flags, number of input points must be >= 4 and object points can be in any configuration. + +@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or +1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here. +@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, +where N is the number of points. vector\ can be also passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from +the model coordinate system to the camera coordinate system. +@param tvec Output translation vector. +@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses +the provided rvec and tvec values as initial approximations of the rotation and translation +vectors, respectively, and further optimizes them. +@param flags Method for solving a PnP problem: +- @ref SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In +this case the function finds such a pose that minimizes reprojection error, that is the sum +of squared distances between the observed projections imagePoints and the projected (using +@ref projectPoints ) objectPoints . +- @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang +"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete). +In this case the function requires exactly four object and image points. +- @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis +"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). +In this case the function requires exactly four object and image points. +- @ref SOLVEPNP_EPNP Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the +paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp). +- @ref SOLVEPNP_DLS **Broken implementation. Using this flag will fallback to EPnP.** \n +Method is based on the paper of J. Hesch and S. Roumeliotis. +"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct). +- @ref SOLVEPNP_UPNP **Broken implementation. Using this flag will fallback to EPnP.** \n +Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto, +F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length +Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$ +assuming that both have the same value. Then the cameraMatrix is updated with the estimated +focal length. +- @ref SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli. +"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points. +- @ref SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli. +"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation. +It requires 4 coplanar object points defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] +- @ref SOLVEPNP_SQPNP Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the +Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (@cite Terzakis20). It requires 3 or more points. + + +The function estimates the object pose given a set of object points, their corresponding image +projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below +(more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward +and the Z-axis forward). + +![](pnp.jpg) + +Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$ +using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$: + +\f[ + \begin{align*} + \begin{bmatrix} + u \\ + v \\ + 1 + \end{bmatrix} &= + \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} \\ + \begin{bmatrix} + u \\ + v \\ + 1 + \end{bmatrix} &= + \begin{bmatrix} + f_x & 0 & c_x \\ + 0 & f_y & c_y \\ + 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + 1 & 0 & 0 & 0 \\ + 0 & 1 & 0 & 0 \\ + 0 & 0 & 1 & 0 + \end{bmatrix} + \begin{bmatrix} + r_{11} & r_{12} & r_{13} & t_x \\ + r_{21} & r_{22} & r_{23} & t_y \\ + r_{31} & r_{32} & r_{33} & t_z \\ + 0 & 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} + \end{align*} +\f] + +The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming +a 3D point expressed in the world frame into the camera frame: + +\f[ + \begin{align*} + \begin{bmatrix} + X_c \\ + Y_c \\ + Z_c \\ + 1 + \end{bmatrix} &= + \hspace{0.2em} ^{c}\bf{T}_w + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} \\ + \begin{bmatrix} + X_c \\ + Y_c \\ + Z_c \\ + 1 + \end{bmatrix} &= + \begin{bmatrix} + r_{11} & r_{12} & r_{13} & t_x \\ + r_{21} & r_{22} & r_{23} & t_y \\ + r_{31} & r_{32} & r_{33} & t_z \\ + 0 & 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} + \end{align*} +\f] + +@note + - An example of how to use solvePnP for planar augmented reality can be found at + opencv_source_code/samples/python/plane_ar.py + - If you are using Python: + - Numpy array slices won't work as input because solvePnP requires contiguous + arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of + modules/calib3d/src/solvepnp.cpp version 2.4.9) + - The P3P algorithm requires image points to be in an array of shape (N,1,2) due + to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) + which requires 2-channel information. + - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of + it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = + np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) + - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are + unstable and sometimes give completely wrong results. If you pass one of these two + flags, @ref SOLVEPNP_EPNP method will be used instead. + - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P + methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions + of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). + - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points + are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the + global solution to converge. + - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. + - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. + Number of input points must be 4. Object points must be defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] + - With @ref SOLVEPNP_SQPNP input points must be >= 3 + */ +CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvec, OutputArray tvec, + bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE ); + +/** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. + +@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or +1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here. +@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, +where N is the number of points. vector\ can be also passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from +the model coordinate system to the camera coordinate system. +@param tvec Output translation vector. +@param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses +the provided rvec and tvec values as initial approximations of the rotation and translation +vectors, respectively, and further optimizes them. +@param iterationsCount Number of iterations. +@param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value +is the maximum allowed distance between the observed and computed point projections to consider it +an inlier. +@param confidence The probability that the algorithm produces a useful result. +@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints . +@param flags Method for solving a PnP problem (see @ref solvePnP ). + +The function estimates an object pose given a set of object points, their corresponding image +projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such +a pose that minimizes reprojection error, that is, the sum of squared distances between the observed +projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC +makes the function resistant to outliers. + +@note + - An example of how to use solvePNPRansac for object detection can be found at + opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ + - The default method used to estimate the camera pose for the Minimal Sample Sets step + is #SOLVEPNP_EPNP. Exceptions are: + - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. + - if the number of input points is equal to 4, #SOLVEPNP_P3P is used. + - The method used to estimate the camera pose using all the inliers is defined by the + flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, + the method #SOLVEPNP_EPNP will be used instead. + */ +CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArray rvec, OutputArray tvec, + bool useExtrinsicGuess = false, int iterationsCount = 100, + float reprojectionError = 8.0, double confidence = 0.99, + OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE ); + +/** @brief Finds an object pose from 3 3D-2D point correspondences. + +@param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or +1x3/3x1 3-channel. vector\ can be also passed here. +@param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel. + vector\ can be also passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from +the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions. +@param tvecs Output translation vectors. +@param flags Method for solving a P3P problem: +- @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang +"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete). +- @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis. +"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). + +The function estimates the object pose given 3 object points, their corresponding image +projections, as well as the camera intrinsic matrix and the distortion coefficients. + +@note +The solutions are sorted by reprojection errors (lowest to highest). + */ +CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + int flags ); + +/** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame +to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. + +@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, +where N is the number of points. vector\ can also be passed here. +@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, +where N is the number of points. vector\ can also be passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from +the model coordinate system to the camera coordinate system. Input values are used as an initial solution. +@param tvec Input/Output translation vector. Input values are used as an initial solution. +@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. + +The function refines the object pose given at least 3 object points, their corresponding image +projections, an initial solution for the rotation and translation vector, +as well as the camera intrinsic matrix and the distortion coefficients. +The function minimizes the projection error with respect to the rotation and the translation vectors, according +to a Levenberg-Marquardt iterative minimization @cite Madsen04 @cite Eade13 process. + */ +CV_EXPORTS_W void solvePnPRefineLM( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + InputOutputArray rvec, InputOutputArray tvec, + TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON)); + +/** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame +to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. + +@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, +where N is the number of points. vector\ can also be passed here. +@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, +where N is the number of points. vector\ can also be passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from +the model coordinate system to the camera coordinate system. Input values are used as an initial solution. +@param tvec Input/Output translation vector. Input values are used as an initial solution. +@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. +@param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$ +gain in the Damped Gauss-Newton formulation. + +The function refines the object pose given at least 3 object points, their corresponding image +projections, an initial solution for the rotation and translation vector, +as well as the camera intrinsic matrix and the distortion coefficients. +The function minimizes the projection error with respect to the rotation and the translation vectors, using a +virtual visual servoing (VVS) @cite Chaumette06 @cite Marchand16 scheme. + */ +CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + InputOutputArray rvec, InputOutputArray tvec, + TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), + double VVSlambda = 1); + +/** @brief Finds an object pose from 3D-2D point correspondences. +This function returns a list of all the possible solutions (a solution is a +couple), depending on the number of input points and the chosen method: +- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. +- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. +- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. +Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] +- for all the other flags, number of input points must be >= 4 and object points can be in any configuration. +Only 1 solution is returned. + +@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or +1xN/Nx1 3-channel, where N is the number of points. vector\ can be also passed here. +@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, +where N is the number of points. vector\ can be also passed here. +@param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from +the model coordinate system to the camera coordinate system. +@param tvecs Vector of output translation vectors. +@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses +the provided rvec and tvec values as initial approximations of the rotation and translation +vectors, respectively, and further optimizes them. +@param flags Method for solving a PnP problem: +- @ref SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In +this case the function finds such a pose that minimizes reprojection error, that is the sum +of squared distances between the observed projections imagePoints and the projected (using +projectPoints ) objectPoints . +- @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang +"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete). +In this case the function requires exactly four object and image points. +- @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis +"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). +In this case the function requires exactly four object and image points. +- @ref SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the +paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp). +- @ref SOLVEPNP_DLS **Broken implementation. Using this flag will fallback to EPnP.** \n +Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis. +"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct). +- @ref SOLVEPNP_UPNP **Broken implementation. Using this flag will fallback to EPnP.** \n +Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto, +F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length +Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$ +assuming that both have the same value. Then the cameraMatrix is updated with the estimated +focal length. +- @ref SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli. +"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points. +- @ref SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli. +"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation. +It requires 4 coplanar object points defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] +@param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE +and useExtrinsicGuess is set to true. +@param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE +and useExtrinsicGuess is set to true. +@param reprojectionError Optional vector of reprojection error, that is the RMS error +(\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points +and the 3D object points projected with the estimated pose. + +The function estimates the object pose given a set of object points, their corresponding image +projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below +(more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward +and the Z-axis forward). + +![](pnp.jpg) + +Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$ +using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$: + +\f[ + \begin{align*} + \begin{bmatrix} + u \\ + v \\ + 1 + \end{bmatrix} &= + \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} \\ + \begin{bmatrix} + u \\ + v \\ + 1 + \end{bmatrix} &= + \begin{bmatrix} + f_x & 0 & c_x \\ + 0 & f_y & c_y \\ + 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + 1 & 0 & 0 & 0 \\ + 0 & 1 & 0 & 0 \\ + 0 & 0 & 1 & 0 + \end{bmatrix} + \begin{bmatrix} + r_{11} & r_{12} & r_{13} & t_x \\ + r_{21} & r_{22} & r_{23} & t_y \\ + r_{31} & r_{32} & r_{33} & t_z \\ + 0 & 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} + \end{align*} +\f] + +The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming +a 3D point expressed in the world frame into the camera frame: + +\f[ + \begin{align*} + \begin{bmatrix} + X_c \\ + Y_c \\ + Z_c \\ + 1 + \end{bmatrix} &= + \hspace{0.2em} ^{c}\bf{T}_w + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} \\ + \begin{bmatrix} + X_c \\ + Y_c \\ + Z_c \\ + 1 + \end{bmatrix} &= + \begin{bmatrix} + r_{11} & r_{12} & r_{13} & t_x \\ + r_{21} & r_{22} & r_{23} & t_y \\ + r_{31} & r_{32} & r_{33} & t_z \\ + 0 & 0 & 0 & 1 + \end{bmatrix} + \begin{bmatrix} + X_{w} \\ + Y_{w} \\ + Z_{w} \\ + 1 + \end{bmatrix} + \end{align*} +\f] + +@note + - An example of how to use solvePnP for planar augmented reality can be found at + opencv_source_code/samples/python/plane_ar.py + - If you are using Python: + - Numpy array slices won't work as input because solvePnP requires contiguous + arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of + modules/calib3d/src/solvepnp.cpp version 2.4.9) + - The P3P algorithm requires image points to be in an array of shape (N,1,2) due + to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) + which requires 2-channel information. + - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of + it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = + np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) + - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are + unstable and sometimes give completely wrong results. If you pass one of these two + flags, @ref SOLVEPNP_EPNP method will be used instead. + - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P + methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions + of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). + - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points + are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the + global solution to converge. + - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. + - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. + Number of input points must be 4. Object points must be defined in the following order: + - point 0: [-squareLength / 2, squareLength / 2, 0] + - point 1: [ squareLength / 2, squareLength / 2, 0] + - point 2: [ squareLength / 2, -squareLength / 2, 0] + - point 3: [-squareLength / 2, -squareLength / 2, 0] + */ +CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints, + InputArray cameraMatrix, InputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, + InputArray rvec = noArray(), InputArray tvec = noArray(), + OutputArray reprojectionError = noArray() ); + +/** @brief Finds an initial camera intrinsic matrix from 3D-2D point correspondences. + +@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern +coordinate space. In the old interface all the per-view vectors are concatenated. See +calibrateCamera for details. +@param imagePoints Vector of vectors of the projections of the calibration pattern points. In the +old interface all the per-view vectors are concatenated. +@param imageSize Image size in pixels used to initialize the principal point. +@param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently. +Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ . + +The function estimates and returns an initial camera intrinsic matrix for the camera calibration process. +Currently, the function only supports planar calibration patterns, which are patterns where each +object point has z-coordinate =0. + */ +CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, + Size imageSize, double aspectRatio = 1.0 ); + +/** @brief Finds the positions of internal corners of the chessboard. + +@param image Source chessboard view. It must be an 8-bit grayscale or color image. +@param patternSize Number of inner corners per a chessboard row and column +( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ). +@param corners Output array of detected corners. +@param flags Various operation flags that can be zero or a combination of the following values: +- @ref CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black +and white, rather than a fixed threshold level (computed from the average image brightness). +- @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before +applying fixed or adaptive thresholding. +- @ref CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter, +square-like shape) to filter out false quads extracted at the contour retrieval stage. +- @ref CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners, +and shortcut the call if none is found. This can drastically speed up the call in the +degenerate condition when no chessboard is observed. + +The function attempts to determine whether the input image is a view of the chessboard pattern and +locate the internal chessboard corners. The function returns a non-zero value if all of the corners +are found and they are placed in a certain order (row by row, left to right in every row). +Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example, +a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black +squares touch each other. The detected coordinates are approximate, and to determine their positions +more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with +different parameters if returned coordinates are not accurate enough. + +Sample usage of detecting and drawing chessboard corners: : +@code + Size patternsize(8,6); //interior number of corners + Mat gray = ....; //source image + vector corners; //this will be filled by the detected corners + + //CALIB_CB_FAST_CHECK saves a lot of time on images + //that do not contain any chessboard corners + bool patternfound = findChessboardCorners(gray, patternsize, corners, + CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + + CALIB_CB_FAST_CHECK); + + if(patternfound) + cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), + TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); + + drawChessboardCorners(img, patternsize, Mat(corners), patternfound); +@endcode +@note The function requires white space (like a square-thick border, the wider the better) around +the board to make the detection more robust in various environments. Otherwise, if there is no +border and the background is dark, the outer black squares cannot be segmented properly and so the +square grouping and ordering algorithm fails. + */ +CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, + int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE ); + +//! finds subpixel-accurate positions of the chessboard corners +CV_EXPORTS_W bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size ); + +/** @brief Renders the detected chessboard corners. + +@param image Destination image. It must be an 8-bit color image. +@param patternSize Number of inner corners per a chessboard row and column +(patternSize = cv::Size(points_per_row,points_per_column)). +@param corners Array of detected corners, the output of findChessboardCorners. +@param patternWasFound Parameter indicating whether the complete board was found or not. The +return value of findChessboardCorners should be passed here. + +The function draws individual chessboard corners detected either as red circles if the board was not +found, or as colored corners connected with lines if the board was found. + */ +CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize, + InputArray corners, bool patternWasFound ); + +/** @brief Draw axes of the world/object coordinate system from pose estimation. @sa solvePnP + +@param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered. +@param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters. +\f$\cameramatrix{A}\f$ +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is empty, the zero distortion coefficients are assumed. +@param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from +the model coordinate system to the camera coordinate system. +@param tvec Translation vector. +@param length Length of the painted axes in the same unit than tvec (usually in meters). +@param thickness Line thickness of the painted axes. + +This function draws the axes of the world/object coordinate system w.r.t. to the camera frame. +OX is drawn in red, OY in green and OZ in blue. + */ +CV_EXPORTS_W void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs, + InputArray rvec, InputArray tvec, float length, int thickness=3); + +struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters +{ + CV_WRAP CirclesGridFinderParameters(); + CV_PROP_RW cv::Size2f densityNeighborhoodSize; + CV_PROP_RW float minDensity; + CV_PROP_RW int kmeansAttempts; + CV_PROP_RW int minDistanceToAddKeypoint; + CV_PROP_RW int keypointScale; + CV_PROP_RW float minGraphConfidence; + CV_PROP_RW float vertexGain; + CV_PROP_RW float vertexPenalty; + CV_PROP_RW float existingVertexGain; + CV_PROP_RW float edgeGain; + CV_PROP_RW float edgePenalty; + CV_PROP_RW float convexHullFactor; + CV_PROP_RW float minRNGEdgeSwitchDist; + + enum GridType + { + SYMMETRIC_GRID, ASYMMETRIC_GRID + }; + GridType gridType; +}; + +struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters2 : public CirclesGridFinderParameters +{ + CV_WRAP CirclesGridFinderParameters2(); + + CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING. + CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING. +}; + +/** @brief Finds centers in the grid of circles. + +@param image grid view of input circles; it must be an 8-bit grayscale or color image. +@param patternSize number of circles per row and column +( patternSize = Size(points_per_row, points_per_colum) ). +@param centers output array of detected centers. +@param flags various operation flags that can be one of the following values: +- @ref CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles. +- @ref CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles. +- @ref CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to +perspective distortions but much more sensitive to background clutter. +@param blobDetector feature detector that finds blobs like dark circles on light background. + If `blobDetector` is NULL then `image` represents Point2f array of candidates. +@param parameters struct for finding circles in a grid pattern. + +The function attempts to determine whether the input image contains a grid of circles. If it is, the +function locates centers of the circles. The function returns a non-zero value if all of the centers +have been found and they have been placed in a certain order (row by row, left to right in every +row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0. + +Sample usage of detecting and drawing the centers of circles: : +@code + Size patternsize(7,7); //number of centers + Mat gray = ...; //source image + vector centers; //this will be filled by the detected centers + + bool patternfound = findCirclesGrid(gray, patternsize, centers); + + drawChessboardCorners(img, patternsize, Mat(centers), patternfound); +@endcode +@note The function requires white space (like a square-thick border, the wider the better) around +the board to make the detection more robust in various environments. + */ +CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, + OutputArray centers, int flags, + const Ptr &blobDetector, + CirclesGridFinderParameters parameters); + +/** @overload */ +CV_EXPORTS_W bool findCirclesGrid2( InputArray image, Size patternSize, + OutputArray centers, int flags, + const Ptr &blobDetector, + CirclesGridFinderParameters2 parameters); + +/** @overload */ +CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, + OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID, + const Ptr &blobDetector = SimpleBlobDetector::create()); + +/** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration +pattern. + +@param objectPoints In the new interface it is a vector of vectors of calibration pattern points in +the calibration pattern coordinate space (e.g. std::vector>). The outer +vector contains as many elements as the number of pattern views. If the same calibration pattern +is shown in each view and it is fully visible, all the vectors will be the same. Although, it is +possible to use partially occluded patterns or even different patterns in different views. Then, +the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's +XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig. +In the old interface all the vectors of object points from different views are concatenated +together. +@param imagePoints In the new interface it is a vector of vectors of the projections of calibration +pattern points (e.g. std::vector>). imagePoints.size() and +objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal, +respectively. In the old interface all the vectors of object points from different views are +concatenated together. +@param imageSize Size of the image used only to initialize the camera intrinsic matrix. +@param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix +\f$\cameramatrix{A}\f$ . If @ref CALIB_USE_INTRINSIC_GUESS +and/or @ref CALIB_FIX_ASPECT_RATIO, @ref CALIB_FIX_PRINCIPAL_POINT or @ref CALIB_FIX_FOCAL_LENGTH +are specified, some or all of fx, fy, cx, cy must be initialized before calling the function. +@param distCoeffs Input/output vector of distortion coefficients +\f$\distcoeffs\f$. +@param rvecs Output vector of rotation vectors (@ref Rodrigues ) estimated for each pattern view +(e.g. std::vector>). That is, each i-th rotation vector together with the corresponding +i-th translation vector (see the next output parameter description) brings the calibration pattern +from the object coordinate space (in which object points are specified) to the camera coordinate +space. In more technical terms, the tuple of the i-th rotation and translation vector performs +a change of basis from object coordinate space to camera coordinate space. Due to its duality, this +tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate +space. +@param tvecs Output vector of translation vectors estimated for each pattern view, see parameter +describtion above. +@param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic +parameters. Order of deviations values: +\f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, + s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. +@param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic +parameters. Order of deviations values: \f$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\f$ where M is +the number of pattern views. \f$R_i, T_i\f$ are concatenated 1x3 vectors. + @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. +@param flags Different flags that may be zero or a combination of the following values: +- @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of +fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image +center ( imageSize is used), and focal distances are computed in a least-squares fashion. +Note, that if intrinsic parameters are known, there is no need to use this function just to +estimate extrinsic parameters. Use @ref solvePnP instead. +- @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global +optimization. It stays at the center or at a different location specified when + @ref CALIB_USE_INTRINSIC_GUESS is set too. +- @ref CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The +ratio fx/fy stays the same as in the input cameraMatrix . When + @ref CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are +ignored, only their ratio is computed and used further. +- @ref CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \f$(p_1, p_2)\f$ are set +to zeros and stay zero. +- @ref CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if + @ref CALIB_USE_INTRINSIC_GUESS is set. +- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 The corresponding radial distortion +coefficient is not changed during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is +set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. +- @ref CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the +backward compatibility, this extra flag should be explicitly specified to make the +calibration function use the rational model and return 8 coefficients or more. +- @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the +backward compatibility, this extra flag should be explicitly specified to make the +calibration function use the thin prism model and return 12 coefficients or more. +- @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during +the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the +supplied distCoeffs matrix is used. Otherwise, it is set to 0. +- @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the +backward compatibility, this extra flag should be explicitly specified to make the +calibration function use the tilted sensor model and return 14 coefficients. +- @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during +the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the +supplied distCoeffs matrix is used. Otherwise, it is set to 0. +@param criteria Termination criteria for the iterative optimization algorithm. + +@return the overall RMS re-projection error. + +The function estimates the intrinsic camera parameters and extrinsic parameters for each of the +views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object +points and their corresponding 2D projections in each view must be specified. That may be achieved +by using an object with known geometry and easily detectable feature points. Such an object is +called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as +a calibration rig (see @ref findChessboardCorners). Currently, initialization of intrinsic +parameters (when @ref CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration +patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also +be used as long as initial cameraMatrix is provided. + +The algorithm performs the following steps: + +- Compute the initial intrinsic parameters (the option only available for planar calibration + patterns) or read them from the input parameters. The distortion coefficients are all set to + zeros initially unless some of CALIB_FIX_K? are specified. + +- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is + done using @ref solvePnP . + +- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, + that is, the total sum of squared distances between the observed feature points imagePoints and + the projected (using the current estimates for camera parameters and the poses) object points + objectPoints. See @ref projectPoints for details. + +@note + If you use a non-square (i.e. non-N-by-N) grid and @ref findChessboardCorners for calibration, + and @ref calibrateCamera returns bad values (zero distortion coefficients, \f$c_x\f$ and + \f$c_y\f$ very far from the image center, and/or large differences between \f$f_x\f$ and + \f$f_y\f$ (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols) + instead of using patternSize=cvSize(cols,rows) in @ref findChessboardCorners. + +@sa + findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort + */ +CV_EXPORTS_AS(calibrateCameraExtended) double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, Size imageSize, + InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + OutputArray stdDeviationsIntrinsics, + OutputArray stdDeviationsExtrinsics, + OutputArray perViewErrors, + int flags = 0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); + +/** @overload double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, Size imageSize, + InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + OutputArray stdDeviations, OutputArray perViewErrors, + int flags = 0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ) + */ +CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints, Size imageSize, + InputOutputArray cameraMatrix, InputOutputArray distCoeffs, + OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, + int flags = 0, TermCriteria criteria = TermCriteria( + TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); + +/** @brief Computes useful camera characteristics from the camera intrinsic matrix. + +@param cameraMatrix Input camera intrinsic matrix that can be estimated by calibrateCamera or +stereoCalibrate . +@param imageSize Input image size in pixels. +@param apertureWidth Physical width in mm of the sensor. +@param apertureHeight Physical height in mm of the sensor. +@param fovx Output field of view in degrees along the horizontal sensor axis. +@param fovy Output field of view in degrees along the vertical sensor axis. +@param focalLength Focal length of the lens in mm. +@param principalPoint Principal point in mm. +@param aspectRatio \f$f_y/f_x\f$ + +The function computes various useful camera characteristics from the previously estimated camera +matrix. + +@note + Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for + the chessboard pitch (it can thus be any value). + */ +CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize, + double apertureWidth, double apertureHeight, + CV_OUT double& fovx, CV_OUT double& fovy, + CV_OUT double& focalLength, CV_OUT Point2d& principalPoint, + CV_OUT double& aspectRatio ); + +/** @brief Calibrates a stereo camera set up. This function finds the intrinsic parameters +for each of the two cameras and the extrinsic parameters between the two cameras. + +@param objectPoints Vector of vectors of the calibration pattern points. The same structure as +in @ref calibrateCamera. For each pattern view, both cameras need to see the same object +points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be +equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to +be equal for each i. +@param imagePoints1 Vector of vectors of the projections of the calibration pattern points, +observed by the first camera. The same structure as in @ref calibrateCamera. +@param imagePoints2 Vector of vectors of the projections of the calibration pattern points, +observed by the second camera. The same structure as in @ref calibrateCamera. +@param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in +@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. +@param distCoeffs1 Input/output vector of distortion coefficients, the same as in +@ref calibrateCamera. +@param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for +cameraMatrix1. +@param distCoeffs2 Input/output lens distortion coefficients for the second camera. See +description for distCoeffs1. +@param imageSize Size of the image used only to initialize the camera intrinsic matrices. +@param R Output rotation matrix. Together with the translation vector T, this matrix brings +points given in the first camera's coordinate system to points in the second camera's +coordinate system. In more technical terms, the tuple of R and T performs a change of basis +from the first camera's coordinate system to the second camera's coordinate system. Due to its +duality, this tuple is equivalent to the position of the first camera with respect to the +second camera coordinate system. +@param T Output translation vector, see description above. +@param E Output essential matrix. +@param F Output fundamental matrix. +@param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. +@param flags Different flags that may be zero or a combination of the following values: +- @ref CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F +matrices are estimated. +- @ref CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters +according to the specified flags. Initial values are provided by the user. +- @ref CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further. +Otherwise R and T are initialized to the median value of the pattern views (each dimension separately). +- @ref CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization. +- @ref CALIB_FIX_FOCAL_LENGTH Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ . +- @ref CALIB_FIX_ASPECT_RATIO Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$ +. +- @ref CALIB_SAME_FOCAL_LENGTH Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ . +- @ref CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to +zeros and fix there. +- @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 Do not change the corresponding radial +distortion coefficient during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, +the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. +- @ref CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward +compatibility, this extra flag should be explicitly specified to make the calibration +function use the rational model and return 8 coefficients. If the flag is not set, the +function computes and returns only 5 distortion coefficients. +- @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the +backward compatibility, this extra flag should be explicitly specified to make the +calibration function use the thin prism model and return 12 coefficients. If the flag is not +set, the function computes and returns only 5 distortion coefficients. +- @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during +the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the +supplied distCoeffs matrix is used. Otherwise, it is set to 0. +- @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the +backward compatibility, this extra flag should be explicitly specified to make the +calibration function use the tilted sensor model and return 14 coefficients. If the flag is not +set, the function computes and returns only 5 distortion coefficients. +- @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during +the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the +supplied distCoeffs matrix is used. Otherwise, it is set to 0. +@param criteria Termination criteria for the iterative optimization algorithm. + +The function estimates the transformation between two cameras making a stereo pair. If one computes +the poses of an object relative to the first camera and to the second camera, +( \f$R_1\f$,\f$T_1\f$ ) and (\f$R_2\f$,\f$T_2\f$), respectively, for a stereo camera where the +relative position and orientation between the two cameras are fixed, then those poses definitely +relate to each other. This means, if the relative position and orientation (\f$R\f$,\f$T\f$) of the +two cameras is known, it is possible to compute (\f$R_2\f$,\f$T_2\f$) when (\f$R_1\f$,\f$T_1\f$) is +given. This is what the described function does. It computes (\f$R\f$,\f$T\f$) such that: + +\f[R_2=R R_1\f] +\f[T_2=R T_1 + T.\f] + +Therefore, one can compute the coordinate representation of a 3D point for the second camera's +coordinate system when given the point's coordinate representation in the first camera's coordinate +system: + +\f[\begin{bmatrix} +X_2 \\ +Y_2 \\ +Z_2 \\ +1 +\end{bmatrix} = \begin{bmatrix} +R & T \\ +0 & 1 +\end{bmatrix} \begin{bmatrix} +X_1 \\ +Y_1 \\ +Z_1 \\ +1 +\end{bmatrix}.\f] + + +Optionally, it computes the essential matrix E: + +\f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\f] + +where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ . +And the function can also compute the fundamental matrix F: + +\f[F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\f] + +Besides the stereo-related information, the function can also perform a full calibration of each of +the two cameras. However, due to the high dimensionality of the parameter space and noise in the +input data, the function can diverge from the correct solution. If the intrinsic parameters can be +estimated with high accuracy for each of the cameras individually (for example, using +calibrateCamera ), you are recommended to do so and then pass @ref CALIB_FIX_INTRINSIC flag to the +function along with the computed intrinsic parameters. Otherwise, if all the parameters are +estimated at once, it makes sense to restrict some parameters, for example, pass + @ref CALIB_SAME_FOCAL_LENGTH and @ref CALIB_ZERO_TANGENT_DIST flags, which is usually a +reasonable assumption. + +Similarly to calibrateCamera, the function minimizes the total re-projection error for all the +points in all the available views from both cameras. The function returns the final value of the +re-projection error. + */ +CV_EXPORTS_AS(stereoCalibrateExtended) double stereoCalibrate( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, + InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, + Size imageSize, InputOutputArray R,InputOutputArray T, OutputArray E, OutputArray F, + OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) ); + +/// @overload +CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, + InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, + InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, + Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F, + int flags = CALIB_FIX_INTRINSIC, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) ); + +/** @brief Computes rectification transforms for each head of a calibrated stereo camera. + +@param cameraMatrix1 First camera intrinsic matrix. +@param distCoeffs1 First camera distortion parameters. +@param cameraMatrix2 Second camera intrinsic matrix. +@param distCoeffs2 Second camera distortion parameters. +@param imageSize Size of the image used for stereo calibration. +@param R Rotation matrix from the coordinate system of the first camera to the second camera, +see @ref stereoCalibrate. +@param T Translation vector from the coordinate system of the first camera to the second camera, +see @ref stereoCalibrate. +@param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix +brings points given in the unrectified first camera's coordinate system to points in the rectified +first camera's coordinate system. In more technical terms, it performs a change of basis from the +unrectified first camera's coordinate system to the rectified first camera's coordinate system. +@param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix +brings points given in the unrectified second camera's coordinate system to points in the rectified +second camera's coordinate system. In more technical terms, it performs a change of basis from the +unrectified second camera's coordinate system to the rectified second camera's coordinate system. +@param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first +camera, i.e. it projects points given in the rectified first camera coordinate system into the +rectified first camera's image. +@param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second +camera, i.e. it projects points given in the rectified first camera coordinate system into the +rectified second camera's image. +@param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see @ref reprojectImageTo3D). +@param flags Operation flags that may be zero or @ref CALIB_ZERO_DISPARITY . If the flag is set, +the function makes the principal points of each camera have the same pixel coordinates in the +rectified views. And if the flag is not set, the function may still shift the images in the +horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the +useful image area. +@param alpha Free scaling parameter. If it is -1 or absent, the function performs the default +scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified +images are zoomed and shifted so that only valid pixels are visible (no black areas after +rectification). alpha=1 means that the rectified image is decimated and shifted so that all the +pixels from the original images from the cameras are retained in the rectified images (no source +image pixels are lost). Any intermediate value yields an intermediate result between +those two extreme cases. +@param newImageSize New image resolution after rectification. The same size should be passed to +initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) +is passed (default), it is set to the original imageSize . Setting it to a larger value can help you +preserve details in the original image, especially when there is a big radial distortion. +@param validPixROI1 Optional output rectangles inside the rectified images where all the pixels +are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller +(see the picture below). +@param validPixROI2 Optional output rectangles inside the rectified images where all the pixels +are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller +(see the picture below). + +The function computes the rotation matrices for each camera that (virtually) make both camera image +planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies +the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate +as input. As output, it provides two rotation matrices and also two projection matrices in the new +coordinates. The function distinguishes the following two cases: + +- **Horizontal stereo**: the first and the second camera views are shifted relative to each other + mainly along the x-axis (with possible small vertical shift). In the rectified images, the + corresponding epipolar lines in the left and right cameras are horizontal and have the same + y-coordinate. P1 and P2 look like: + + \f[\texttt{P1} = \begin{bmatrix} + f & 0 & cx_1 & 0 \\ + 0 & f & cy & 0 \\ + 0 & 0 & 1 & 0 + \end{bmatrix}\f] + + \f[\texttt{P2} = \begin{bmatrix} + f & 0 & cx_2 & T_x*f \\ + 0 & f & cy & 0 \\ + 0 & 0 & 1 & 0 + \end{bmatrix} ,\f] + + where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if + @ref CALIB_ZERO_DISPARITY is set. + +- **Vertical stereo**: the first and the second camera views are shifted relative to each other + mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar + lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: + + \f[\texttt{P1} = \begin{bmatrix} + f & 0 & cx & 0 \\ + 0 & f & cy_1 & 0 \\ + 0 & 0 & 1 & 0 + \end{bmatrix}\f] + + \f[\texttt{P2} = \begin{bmatrix} + f & 0 & cx & 0 \\ + 0 & f & cy_2 & T_y*f \\ + 0 & 0 & 1 & 0 + \end{bmatrix},\f] + + where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if + @ref CALIB_ZERO_DISPARITY is set. + +As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera +matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to +initialize the rectification map for each camera. + +See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through +the corresponding image regions. This means that the images are well rectified, which is what most +stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that +their interiors are all valid pixels. + +![image](pics/stereo_undistort.jpg) + */ +CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + Size imageSize, InputArray R, InputArray T, + OutputArray R1, OutputArray R2, + OutputArray P1, OutputArray P2, + OutputArray Q, int flags = CALIB_ZERO_DISPARITY, + double alpha = -1, Size newImageSize = Size(), + CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 ); + +/** @brief Computes a rectification transform for an uncalibrated stereo camera. + +@param points1 Array of feature points in the first image. +@param points2 The corresponding points in the second image. The same formats as in +findFundamentalMat are supported. +@param F Input fundamental matrix. It can be computed from the same set of point pairs using +findFundamentalMat . +@param imgSize Size of the image. +@param H1 Output rectification homography matrix for the first image. +@param H2 Output rectification homography matrix for the second image. +@param threshold Optional threshold used to filter out the outliers. If the parameter is greater +than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points +for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are +rejected prior to computing the homographies. Otherwise, all the points are considered inliers. + +The function computes the rectification transformations without knowing intrinsic parameters of the +cameras and their relative position in the space, which explains the suffix "uncalibrated". Another +related difference from stereoRectify is that the function outputs not the rectification +transformations in the object (3D) space, but the planar perspective transformations encoded by the +homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 . + +@note + While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily + depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, + it would be better to correct it before computing the fundamental matrix and calling this + function. For example, distortion coefficients can be estimated for each head of stereo camera + separately by using calibrateCamera . Then, the images can be corrected using undistort , or + just the point coordinates can be corrected with undistortPoints . + */ +CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2, + InputArray F, Size imgSize, + OutputArray H1, OutputArray H2, + double threshold = 5 ); + +//! computes the rectification transformations for 3-head camera, where all the heads are on the same line. +CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + InputArray cameraMatrix3, InputArray distCoeffs3, + InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3, + Size imageSize, InputArray R12, InputArray T12, + InputArray R13, InputArray T13, + OutputArray R1, OutputArray R2, OutputArray R3, + OutputArray P1, OutputArray P2, OutputArray P3, + OutputArray Q, double alpha, Size newImgSize, + CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags ); + +/** @brief Returns the new camera intrinsic matrix based on the free scaling parameter. + +@param cameraMatrix Input camera intrinsic matrix. +@param distCoeffs Input vector of distortion coefficients +\f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are +assumed. +@param imageSize Original image size. +@param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are +valid) and 1 (when all the source image pixels are retained in the undistorted image). See +stereoRectify for details. +@param newImgSize Image size after rectification. By default, it is set to imageSize . +@param validPixROI Optional output rectangle that outlines all-good-pixels region in the +undistorted image. See roi1, roi2 description in stereoRectify . +@param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the +principal point should be at the image center or not. By default, the principal point is chosen to +best fit a subset of the source image (determined by alpha) to the corrected image. +@return new_camera_matrix Output new camera intrinsic matrix. + +The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. +By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original +image pixels if there is valuable information in the corners alpha=1 , or get something in between. +When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to +"virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion +coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to +initUndistortRectifyMap to produce the maps for remap . + */ +CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, double alpha, Size newImgSize = Size(), + CV_OUT Rect* validPixROI = 0, + bool centerPrincipalPoint = false); + +/** @brief Computes Hand-Eye calibration: \f$_{}^{g}\textrm{T}_c\f$ + +@param[in] R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point +expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$). +This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, +for all the transformations from gripper frame to robot base frame. +@param[in] t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point +expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$). +This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations +from gripper frame to robot base frame. +@param[in] R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point +expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). +This is a vector (`vector`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, +for all the transformations from calibration target frame to camera frame. +@param[in] t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point +expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). +This is a vector (`vector`) that contains the `(3x1)` translation vectors for all the transformations +from calibration target frame to camera frame. +@param[out] R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point +expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). +@param[out] t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point +expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). +@param[in] method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod + +The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the +rotation then the translation (separable solutions) and the following methods are implemented: + - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89 + - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94 + - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95 + +Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), +with the following implemented method: + - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99 + - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98 + +The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye") +mounted on a robot gripper ("hand") has to be estimated. + +![](pics/hand-eye_figure.png) + +The calibration procedure is the following: + - a static calibration pattern is used to estimate the transformation between the target frame + and the camera frame + - the robot gripper is moved in order to acquire several poses + - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for + instance the robot kinematics +\f[ + \begin{bmatrix} + X_b\\ + Y_b\\ + Z_b\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_g\\ + Y_g\\ + Z_g\\ + 1 + \end{bmatrix} +\f] + - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using + for instance a pose estimation method (PnP) from 2D-3D point correspondences +\f[ + \begin{bmatrix} + X_c\\ + Y_c\\ + Z_c\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_t\\ + Y_t\\ + Z_t\\ + 1 + \end{bmatrix} +\f] + +The Hand-Eye calibration procedure returns the following homogeneous transformation +\f[ + \begin{bmatrix} + X_g\\ + Y_g\\ + Z_g\\ + 1 + \end{bmatrix} + = + \begin{bmatrix} + _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\ + 0_{1 \times 3} & 1 + \end{bmatrix} + \begin{bmatrix} + X_c\\ + Y_c\\ + Z_c\\ + 1 + \end{bmatrix} +\f] + +This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\f$ equation: +\f[ + \begin{align*} + ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= + \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ + + (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &= + \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ + + \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ + \end{align*} +\f] + +\note +Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration). +\note +A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation. +So at least 3 different poses are required, but it is strongly recommended to use many more poses. + + */ +CV_EXPORTS_W void calibrateHandEye( InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base, + InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam, + OutputArray R_cam2gripper, OutputArray t_cam2gripper, + HandEyeCalibrationMethod method=CALIB_HAND_EYE_TSAI ); + +/** @brief Converts points from Euclidean to homogeneous space. + +@param src Input vector of N-dimensional points. +@param dst Output vector of N+1-dimensional points. + +The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of +point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1). + */ +CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst ); + +/** @brief Converts points from homogeneous to Euclidean space. + +@param src Input vector of N-dimensional points. +@param dst Output vector of N-1-dimensional points. + +The function converts points homogeneous to Euclidean space using perspective projection. That is, +each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the +output point coordinates will be (0,0,0,...). + */ +CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst ); + +/** @brief Converts points to/from homogeneous coordinates. + +@param src Input array or vector of 2D, 3D, or 4D points. +@param dst Output vector of 2D, 3D, or 4D points. + +The function converts 2D or 3D points from/to homogeneous coordinates by calling either +convertPointsToHomogeneous or convertPointsFromHomogeneous. + +@note The function is obsolete. Use one of the previous two functions instead. + */ +CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst ); + +/** @brief Calculates a fundamental matrix from the corresponding points in two images. + +@param points1 Array of N points from the first image. The point coordinates should be +floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param method Method for computing a fundamental matrix. +- @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$ +- @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$ +- @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$ +- @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$ +@param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar +line in pixels, beyond which the point is considered an outlier and is not used for computing the +final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the +point localization, image resolution, and the image noise. +@param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level +of confidence (probability) that the estimated matrix is correct. +@param[out] mask optional output mask +@param maxIters The maximum number of robust method iterations. + +The epipolar geometry is described by the following equation: + +\f[[p_2; 1]^T F [p_1; 1] = 0\f] + +where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the +second images, respectively. + +The function calculates the fundamental matrix using one of four methods listed above and returns +the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point +algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3 +matrices sequentially). + +The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the +epipolar lines corresponding to the specified points. It can also be passed to +stereoRectifyUncalibrated to compute the rectification transformation. : +@code + // Example. Estimation of fundamental matrix using the RANSAC algorithm + int point_count = 100; + vector points1(point_count); + vector points2(point_count); + + // initialize the points here ... + for( int i = 0; i < point_count; i++ ) + { + points1[i] = ...; + points2[i] = ...; + } + + Mat fundamental_matrix = + findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99); +@endcode + */ +CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, + int method, double ransacReprojThreshold, double confidence, + int maxIters, OutputArray mask = noArray() ); + +/** @overload */ +CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, + int method = FM_RANSAC, + double ransacReprojThreshold = 3., double confidence = 0.99, + OutputArray mask = noArray() ); + +/** @overload */ +CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, + OutputArray mask, int method = FM_RANSAC, + double ransacReprojThreshold = 3., double confidence = 0.99 ); + +/** @brief Calculates an essential matrix from the corresponding points in two images. + +@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should +be floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . +Note that this function assumes that points1 and points2 are feature points from cameras with the +same camera intrinsic matrix. If this assumption does not hold for your use case, use +`undistortPoints()` with `P = cv::NoArray()` for both cameras to transform image points +to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When +passing these coordinates, pass the identity matrix for this parameter. +@param method Method for computing an essential matrix. +- @ref RANSAC for the RANSAC algorithm. +- @ref LMEDS for the LMedS algorithm. +@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of +confidence (probability) that the estimated matrix is correct. +@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar +line in pixels, beyond which the point is considered an outlier and is not used for computing the +final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the +point localization, image resolution, and the image noise. +@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 +for the other points. The array is computed only in the RANSAC and LMedS methods. +@param maxIters The maximum number of robust method iterations. + +This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 . +@cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation: + +\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f] + +where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the +second images, respectively. The result of this function may be passed further to +decomposeEssentialMat or recoverPose to recover the relative pose between cameras. + */ +CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, + InputArray cameraMatrix, int method, + double prob, double threshold, + int maxIters, OutputArray mask = noArray() ); + +/** @overload */ +CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, + InputArray cameraMatrix, int method = RANSAC, + double prob = 0.999, double threshold = 1.0, + OutputArray mask = noArray() ); + +/** @overload +@param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should +be floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param focal focal length of the camera. Note that this function assumes that points1 and points2 +are feature points from cameras with same focal length and principal point. +@param pp principal point of the camera. +@param method Method for computing a fundamental matrix. +- @ref RANSAC for the RANSAC algorithm. +- @ref LMEDS for the LMedS algorithm. +@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar +line in pixels, beyond which the point is considered an outlier and is not used for computing the +final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the +point localization, image resolution, and the image noise. +@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of +confidence (probability) that the estimated matrix is correct. +@param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 +for the other points. The array is computed only in the RANSAC and LMedS methods. +@param maxIters The maximum number of robust method iterations. + +This function differs from the one above that it computes camera intrinsic matrix from focal length and +principal point: + +\f[A = +\begin{bmatrix} +f & 0 & x_{pp} \\ +0 & f & y_{pp} \\ +0 & 0 & 1 +\end{bmatrix}\f] + */ +CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, + double focal, Point2d pp, + int method, double prob, + double threshold, int maxIters, + OutputArray mask = noArray() ); + +/** @overload */ +CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, + double focal = 1.0, Point2d pp = Point2d(0, 0), + int method = RANSAC, double prob = 0.999, + double threshold = 1.0, OutputArray mask = noArray() ); + +/** @brief Decompose an essential matrix to possible rotations and translation. + +@param E The input essential matrix. +@param R1 One possible rotation matrix. +@param R2 Another possible rotation matrix. +@param t One possible translation. + +This function decomposes the essential matrix E using svd decomposition @cite HartleyZ00. In +general, four possible poses exist for the decomposition of E. They are \f$[R_1, t]\f$, +\f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. + +If E gives the epipolar constraint \f$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\f$ between the image +points \f$p_1\f$ in the first image and \f$p_2\f$ in second image, then any of the tuples +\f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$ is a change of basis from the first +camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one +can only get the direction of the translation. For this reason, the translation t is returned with +unit length. + */ +CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t ); + +/** @brief Recovers the relative camera rotation and the translation from an estimated essential +matrix and the corresponding points in two images, using cheirality check. Returns the number of +inliers that pass the check. + +@param E The input essential matrix. +@param points1 Array of N 2D points from the first image. The point coordinates should be +floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . +Note that this function assumes that points1 and points2 are feature points from cameras with the +same camera intrinsic matrix. +@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple +that performs a change of basis from the first camera's coordinate system to the second camera's +coordinate system. Note that, in general, t can not be used for this tuple, see the parameter +described below. +@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and +therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit +length. +@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks +inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to +recover pose. In the output mask only inliers which pass the cheirality check. + +This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies +possible pose hypotheses by doing cheirality check. The cheirality check means that the +triangulated 3D points should have positive depth. Some details can be found in @cite Nister03. + +This function can be used to process the output E and mask from @ref findEssentialMat. In this +scenario, points1 and points2 are the same input for findEssentialMat.: +@code + // Example. Estimation of fundamental matrix using the RANSAC algorithm + int point_count = 100; + vector points1(point_count); + vector points2(point_count); + + // initialize the points here ... + for( int i = 0; i < point_count; i++ ) + { + points1[i] = ...; + points2[i] = ...; + } + + // cametra matrix with both focal lengths = 1, and principal point = (0, 0) + Mat cameraMatrix = Mat::eye(3, 3, CV_64F); + + Mat E, R, t, mask; + + E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); + recoverPose(E, points1, points2, cameraMatrix, R, t, mask); +@endcode + */ +CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, + InputArray cameraMatrix, OutputArray R, OutputArray t, + InputOutputArray mask = noArray() ); + +/** @overload +@param E The input essential matrix. +@param points1 Array of N 2D points from the first image. The point coordinates should be +floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple +that performs a change of basis from the first camera's coordinate system to the second camera's +coordinate system. Note that, in general, t can not be used for this tuple, see the parameter +description below. +@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and +therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit +length. +@param focal Focal length of the camera. Note that this function assumes that points1 and points2 +are feature points from cameras with same focal length and principal point. +@param pp principal point of the camera. +@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks +inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to +recover pose. In the output mask only inliers which pass the cheirality check. + +This function differs from the one above that it computes camera intrinsic matrix from focal length and +principal point: + +\f[A = +\begin{bmatrix} +f & 0 & x_{pp} \\ +0 & f & y_{pp} \\ +0 & 0 & 1 +\end{bmatrix}\f] + */ +CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, + OutputArray R, OutputArray t, + double focal = 1.0, Point2d pp = Point2d(0, 0), + InputOutputArray mask = noArray() ); + +/** @overload +@param E The input essential matrix. +@param points1 Array of N 2D points from the first image. The point coordinates should be +floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1. +@param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . +Note that this function assumes that points1 and points2 are feature points from cameras with the +same camera intrinsic matrix. +@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple +that performs a change of basis from the first camera's coordinate system to the second camera's +coordinate system. Note that, in general, t can not be used for this tuple, see the parameter +description below. +@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and +therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit +length. +@param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite +points). +@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks +inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to +recover pose. In the output mask only inliers which pass the cheirality check. +@param triangulatedPoints 3D points which were reconstructed by triangulation. + +This function differs from the one above that it outputs the triangulated 3D point that are used for +the cheirality check. + */ +CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, + InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask = noArray(), + OutputArray triangulatedPoints = noArray()); + +/** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image. + +@param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or +vector\ . +@param whichImage Index of the image (1 or 2) that contains the points . +@param F Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify . +@param lines Output vector of the epipolar lines corresponding to the points in the other image. +Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ . + +For every point in one of the two images of a stereo pair, the function finds the equation of the +corresponding epipolar line in the other image. + +From the fundamental matrix definition (see findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second +image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as: + +\f[l^{(2)}_i = F p^{(1)}_i\f] + +And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as: + +\f[l^{(1)}_i = F^T p^{(2)}_i\f] + +Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ . + */ +CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage, + InputArray F, OutputArray lines ); + +/** @brief This function reconstructs 3-dimensional points (in homogeneous coordinates) by using +their observations with a stereo camera. + +@param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points +given in the world's coordinate system into the first image. +@param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points +given in the world's coordinate system into the second image. +@param projPoints1 2xN array of feature points in the first image. In the case of the c++ version, +it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. +@param projPoints2 2xN array of corresponding points in the second image. In the case of the c++ +version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. +@param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are +returned in the world's coordinate system. + +@note + Keep in mind that all input data should be of float type in order for this function to work. + +@note + If the projection matrices from @ref stereoRectify are used, then the returned points are + represented in the first camera's rectified coordinate system. + +@sa + reprojectImageTo3D + */ +CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2, + InputArray projPoints1, InputArray projPoints2, + OutputArray points4D ); + +/** @brief Refines coordinates of corresponding points. + +@param F 3x3 fundamental matrix. +@param points1 1xN array containing the first set of points. +@param points2 1xN array containing the second set of points. +@param newPoints1 The optimized points1. +@param newPoints2 The optimized points2. + +The function implements the Optimal Triangulation Method (see Multiple View Geometry for details). +For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it +computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric +error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the +geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint +\f$newPoints2^T * F * newPoints1 = 0\f$ . + */ +CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2, + OutputArray newPoints1, OutputArray newPoints2 ); + +/** @brief Filters off small noise blobs (speckles) in the disparity map + +@param img The input 16-bit signed disparity image +@param newVal The disparity value used to paint-off the speckles +@param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not +affected by the algorithm +@param maxDiff Maximum difference between neighbor disparity pixels to put them into the same +blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point +disparity map, where disparity values are multiplied by 16, this scale factor should be taken into +account when specifying this parameter value. +@param buf The optional temporary buffer to avoid memory allocation within the function. + */ +CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, + int maxSpeckleSize, double maxDiff, + InputOutputArray buf = noArray() ); + +//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify()) +CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2, + int minDisparity, int numberOfDisparities, + int blockSize ); + +//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm +CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost, + int minDisparity, int numberOfDisparities, + int disp12MaxDisp = 1 ); + +/** @brief Reprojects a disparity image to 3D space. + +@param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit +floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no +fractional bits. If the disparity is 16-bit signed format, as computed by @ref StereoBM or +@ref StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before +being used here. +@param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of +_3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one +uses Q obtained by @ref stereoRectify, then the returned points are represented in the first +camera's rectified coordinate system. +@param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with +@ref stereoRectify. +@param handleMissingValues Indicates, whether the function should handle missing values (i.e. +points where the disparity was not computed). If handleMissingValues=true, then pixels with the +minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed +to 3D points with a very large Z value (currently set to 10000). +@param ddepth The optional output array depth. If it is -1, the output image will have CV_32F +depth. ddepth can also be set to CV_16S, CV_32S or CV_32F. + +The function transforms a single-channel disparity map to a 3-channel image representing a 3D +surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it +computes: + +\f[\begin{bmatrix} +X \\ +Y \\ +Z \\ +W +\end{bmatrix} = Q \begin{bmatrix} +x \\ +y \\ +\texttt{disparity} (x,y) \\ +z +\end{bmatrix}.\f] + +@sa + To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform. + */ +CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, + OutputArray _3dImage, InputArray Q, + bool handleMissingValues = false, + int ddepth = -1 ); + +/** @brief Calculates the Sampson Distance between two points. + +The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as: +\f[ +sd( \texttt{pt1} , \texttt{pt2} )= +\frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2} +{((\texttt{F} \cdot \texttt{pt1})(0))^2 + +((\texttt{F} \cdot \texttt{pt1})(1))^2 + +((\texttt{F}^t \cdot \texttt{pt2})(0))^2 + +((\texttt{F}^t \cdot \texttt{pt2})(1))^2} +\f] +The fundamental matrix may be calculated using the cv::findFundamentalMat function. See @cite HartleyZ00 11.4.3 for details. +@param pt1 first homogeneous 2d point +@param pt2 second homogeneous 2d point +@param F fundamental matrix +@return The computed Sampson distance. +*/ +CV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F); + +/** @brief Computes an optimal affine transformation between two 3D point sets. + +It computes +\f[ +\begin{bmatrix} +x\\ +y\\ +z\\ +\end{bmatrix} += +\begin{bmatrix} +a_{11} & a_{12} & a_{13}\\ +a_{21} & a_{22} & a_{23}\\ +a_{31} & a_{32} & a_{33}\\ +\end{bmatrix} +\begin{bmatrix} +X\\ +Y\\ +Z\\ +\end{bmatrix} ++ +\begin{bmatrix} +b_1\\ +b_2\\ +b_3\\ +\end{bmatrix} +\f] + +@param src First input 3D point set containing \f$(X,Y,Z)\f$. +@param dst Second input 3D point set containing \f$(x,y,z)\f$. +@param out Output 3D affine transformation matrix \f$3 \times 4\f$ of the form +\f[ +\begin{bmatrix} +a_{11} & a_{12} & a_{13} & b_1\\ +a_{21} & a_{22} & a_{23} & b_2\\ +a_{31} & a_{32} & a_{33} & b_3\\ +\end{bmatrix} +\f] +@param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). +@param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as +an inlier. +@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything +between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation +significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. + +The function estimates an optimal 3D affine transformation between two 3D point sets using the +RANSAC algorithm. + */ +CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst, + OutputArray out, OutputArray inliers, + double ransacThreshold = 3, double confidence = 0.99); + +/** @brief Computes an optimal affine transformation between two 2D point sets. + +It computes +\f[ +\begin{bmatrix} +x\\ +y\\ +\end{bmatrix} += +\begin{bmatrix} +a_{11} & a_{12}\\ +a_{21} & a_{22}\\ +\end{bmatrix} +\begin{bmatrix} +X\\ +Y\\ +\end{bmatrix} ++ +\begin{bmatrix} +b_1\\ +b_2\\ +\end{bmatrix} +\f] + +@param from First input 2D point set containing \f$(X,Y)\f$. +@param to Second input 2D point set containing \f$(x,y)\f$. +@param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). +@param method Robust method used to compute transformation. The following methods are possible: +- @ref RANSAC - RANSAC-based robust method +- @ref LMEDS - Least-Median robust method +RANSAC is the default method. +@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider +a point as an inlier. Applies only to RANSAC. +@param maxIters The maximum number of robust method iterations. +@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything +between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation +significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. +@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). +Passing 0 will disable refining, so the output matrix will be output of robust method. + +@return Output 2D affine transformation matrix \f$2 \times 3\f$ or empty matrix if transformation +could not be estimated. The returned matrix has the following form: +\f[ +\begin{bmatrix} +a_{11} & a_{12} & b_1\\ +a_{21} & a_{22} & b_2\\ +\end{bmatrix} +\f] + +The function estimates an optimal 2D affine transformation between two 2D point sets using the +selected robust algorithm. + +The computed transformation is then refined further (using only inliers) with the +Levenberg-Marquardt method to reduce the re-projection error even more. + +@note +The RANSAC method can handle practically any ratio of outliers but needs a threshold to +distinguish inliers from outliers. The method LMeDS does not need any threshold but it works +correctly only when there are more than 50% of inliers. + +@sa estimateAffinePartial2D, getAffineTransform +*/ +CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray from, InputArray to, OutputArray inliers = noArray(), + int method = RANSAC, double ransacReprojThreshold = 3, + size_t maxIters = 2000, double confidence = 0.99, + size_t refineIters = 10); + +/** @brief Computes an optimal limited affine transformation with 4 degrees of freedom between +two 2D point sets. + +@param from First input 2D point set. +@param to Second input 2D point set. +@param inliers Output vector indicating which points are inliers. +@param method Robust method used to compute transformation. The following methods are possible: +- @ref RANSAC - RANSAC-based robust method +- @ref LMEDS - Least-Median robust method +RANSAC is the default method. +@param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider +a point as an inlier. Applies only to RANSAC. +@param maxIters The maximum number of robust method iterations. +@param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything +between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation +significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. +@param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). +Passing 0 will disable refining, so the output matrix will be output of robust method. + +@return Output 2D affine transformation (4 degrees of freedom) matrix \f$2 \times 3\f$ or +empty matrix if transformation could not be estimated. + +The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to +combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust +estimation. + +The computed transformation is then refined further (using only inliers) with the +Levenberg-Marquardt method to reduce the re-projection error even more. + +Estimated transformation matrix is: +\f[ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ + \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y +\end{bmatrix} \f] +Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ t_x, t_y \f$ are +translations in \f$ x, y \f$ axes respectively. + +@note +The RANSAC method can handle practically any ratio of outliers but need a threshold to +distinguish inliers from outliers. The method LMeDS does not need any threshold but it works +correctly only when there are more than 50% of inliers. + +@sa estimateAffine2D, getAffineTransform +*/ +CV_EXPORTS_W cv::Mat estimateAffinePartial2D(InputArray from, InputArray to, OutputArray inliers = noArray(), + int method = RANSAC, double ransacReprojThreshold = 3, + size_t maxIters = 2000, double confidence = 0.99, + size_t refineIters = 10); + +/** @example samples/cpp/tutorial_code/features2D/Homography/decompose_homography.cpp +An example program with homography decomposition. + +Check @ref tutorial_homography "the corresponding tutorial" for more details. +*/ + +/** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s). + +@param H The input homography matrix between two images. +@param K The input camera intrinsic matrix. +@param rotations Array of rotation matrices. +@param translations Array of translation matrices. +@param normals Array of plane normal matrices. + +This function extracts relative camera motion between two views of a planar object and returns up to +four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of +the homography matrix H is described in detail in @cite Malis. + +If the homography H, induced by the plane, gives the constraint +\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] on the source image points +\f$p_i\f$ and the destination image points \f$p'_i\f$, then the tuple of rotations[k] and +translations[k] is a change of basis from the source camera's coordinate system to the destination +camera's coordinate system. However, by decomposing H, one can only get the translation normalized +by the (typically unknown) depth of the scene, i.e. its direction but with normalized length. + +If point correspondences are available, at least two solutions may further be invalidated, by +applying positive depth constraint, i.e. all points must be in front of the camera. + */ +CV_EXPORTS_W int decomposeHomographyMat(InputArray H, + InputArray K, + OutputArrayOfArrays rotations, + OutputArrayOfArrays translations, + OutputArrayOfArrays normals); + +/** @brief Filters homography decompositions based on additional information. + +@param rotations Vector of rotation matrices. +@param normals Vector of plane normal matrices. +@param beforePoints Vector of (rectified) visible reference points before the homography is applied +@param afterPoints Vector of (rectified) visible reference points after the homography is applied +@param possibleSolutions Vector of int indices representing the viable solution set after filtering +@param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the findHomography function + +This function is intended to filter the output of the decomposeHomographyMat based on additional +information as described in @cite Malis . The summary of the method: the decomposeHomographyMat function +returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the +sets of points visible in the camera frame before and after the homography transformation is applied, +we can determine which are the true potential solutions and which are the opposites by verifying which +homographies are consistent with all visible reference points being in front of the camera. The inputs +are left unchanged; the filtered solution set is returned as indices into the existing one. + +*/ +CV_EXPORTS_W void filterHomographyDecompByVisibleRefpoints(InputArrayOfArrays rotations, + InputArrayOfArrays normals, + InputArray beforePoints, + InputArray afterPoints, + OutputArray possibleSolutions, + InputArray pointsMask = noArray()); + +/** @brief The base class for stereo correspondence algorithms. + */ +class CV_EXPORTS_W StereoMatcher : public Algorithm +{ +public: + enum { DISP_SHIFT = 4, + DISP_SCALE = (1 << DISP_SHIFT) + }; + + /** @brief Computes disparity map for the specified stereo pair + + @param left Left 8-bit single-channel image. + @param right Right image of the same size and the same type as the left one. + @param disparity Output disparity map. It has the same size as the input images. Some algorithms, + like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value + has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map. + */ + CV_WRAP virtual void compute( InputArray left, InputArray right, + OutputArray disparity ) = 0; + + CV_WRAP virtual int getMinDisparity() const = 0; + CV_WRAP virtual void setMinDisparity(int minDisparity) = 0; + + CV_WRAP virtual int getNumDisparities() const = 0; + CV_WRAP virtual void setNumDisparities(int numDisparities) = 0; + + CV_WRAP virtual int getBlockSize() const = 0; + CV_WRAP virtual void setBlockSize(int blockSize) = 0; + + CV_WRAP virtual int getSpeckleWindowSize() const = 0; + CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0; + + CV_WRAP virtual int getSpeckleRange() const = 0; + CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0; + + CV_WRAP virtual int getDisp12MaxDiff() const = 0; + CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0; +}; + + +/** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and +contributed to OpenCV by K. Konolige. + */ +class CV_EXPORTS_W StereoBM : public StereoMatcher +{ +public: + enum { PREFILTER_NORMALIZED_RESPONSE = 0, + PREFILTER_XSOBEL = 1 + }; + + CV_WRAP virtual int getPreFilterType() const = 0; + CV_WRAP virtual void setPreFilterType(int preFilterType) = 0; + + CV_WRAP virtual int getPreFilterSize() const = 0; + CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0; + + CV_WRAP virtual int getPreFilterCap() const = 0; + CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0; + + CV_WRAP virtual int getTextureThreshold() const = 0; + CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0; + + CV_WRAP virtual int getUniquenessRatio() const = 0; + CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0; + + CV_WRAP virtual int getSmallerBlockSize() const = 0; + CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0; + + CV_WRAP virtual Rect getROI1() const = 0; + CV_WRAP virtual void setROI1(Rect roi1) = 0; + + CV_WRAP virtual Rect getROI2() const = 0; + CV_WRAP virtual void setROI2(Rect roi2) = 0; + + /** @brief Creates StereoBM object + + @param numDisparities the disparity search range. For each pixel algorithm will find the best + disparity from 0 (default minimum disparity) to numDisparities. The search range can then be + shifted by changing the minimum disparity. + @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd + (as the block is centered at the current pixel). Larger block size implies smoother, though less + accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher + chance for algorithm to find a wrong correspondence. + + The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for + a specific stereo pair. + */ + CV_WRAP static Ptr create(int numDisparities = 0, int blockSize = 21); +}; + +/** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original +one as follows: + +- By default, the algorithm is single-pass, which means that you consider only 5 directions +instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the +algorithm but beware that it may consume a lot of memory. +- The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the +blocks to single pixels. +- Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi +sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well. +- Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for +example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness +check, quadratic interpolation and speckle filtering). + +@note + - (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found + at opencv_source_code/samples/python/stereo_match.py + */ +class CV_EXPORTS_W StereoSGBM : public StereoMatcher +{ +public: + enum + { + MODE_SGBM = 0, + MODE_HH = 1, + MODE_SGBM_3WAY = 2, + MODE_HH4 = 3 + }; + + CV_WRAP virtual int getPreFilterCap() const = 0; + CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0; + + CV_WRAP virtual int getUniquenessRatio() const = 0; + CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0; + + CV_WRAP virtual int getP1() const = 0; + CV_WRAP virtual void setP1(int P1) = 0; + + CV_WRAP virtual int getP2() const = 0; + CV_WRAP virtual void setP2(int P2) = 0; + + CV_WRAP virtual int getMode() const = 0; + CV_WRAP virtual void setMode(int mode) = 0; + + /** @brief Creates StereoSGBM object + + @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes + rectification algorithms can shift images, so this parameter needs to be adjusted accordingly. + @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than + zero. In the current implementation, this parameter must be divisible by 16. + @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be + somewhere in the 3..11 range. + @param P1 The first parameter controlling the disparity smoothness. See below. + @param P2 The second parameter controlling the disparity smoothness. The larger the values are, + the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 + between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor + pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good + P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and + 32\*number_of_image_channels\*blockSize\*blockSize , respectively). + @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right + disparity check. Set it to a non-positive value to disable the check. + @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first + computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. + The result values are passed to the Birchfield-Tomasi pixel cost function. + @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function + value should "win" the second best value to consider the found match correct. Normally, a value + within the 5-15 range is good enough. + @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles + and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the + 50-200 range. + @param speckleRange Maximum disparity variation within each connected component. If you do speckle + filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. + Normally, 1 or 2 is good enough. + @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming + algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and + huge for HD-size pictures. By default, it is set to false . + + The first constructor initializes StereoSGBM with all the default parameters. So, you only have to + set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter + to a custom value. + */ + CV_WRAP static Ptr create(int minDisparity = 0, int numDisparities = 16, int blockSize = 3, + int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, + int preFilterCap = 0, int uniquenessRatio = 0, + int speckleWindowSize = 0, int speckleRange = 0, + int mode = StereoSGBM::MODE_SGBM); +}; + +//! @} calib3d + +/** @brief The methods in this namespace use a so-called fisheye camera model. + @ingroup calib3d_fisheye +*/ +namespace fisheye +{ +//! @addtogroup calib3d_fisheye +//! @{ + + enum{ + CALIB_USE_INTRINSIC_GUESS = 1 << 0, + CALIB_RECOMPUTE_EXTRINSIC = 1 << 1, + CALIB_CHECK_COND = 1 << 2, + CALIB_FIX_SKEW = 1 << 3, + CALIB_FIX_K1 = 1 << 4, + CALIB_FIX_K2 = 1 << 5, + CALIB_FIX_K3 = 1 << 6, + CALIB_FIX_K4 = 1 << 7, + CALIB_FIX_INTRINSIC = 1 << 8, + CALIB_FIX_PRINCIPAL_POINT = 1 << 9, + CALIB_ZERO_DISPARITY = 1 << 10 + }; + + /** @brief Projects points using fisheye model + + @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\ ), where N is + the number of points in the view. + @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or + vector\. + @param affine + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param alpha The skew coefficient. + @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect + to components of the focal lengths, coordinates of the principal point, distortion coefficients, + rotation vector, translation vector, and the skew. In the old interface different components of + the jacobian are returned via different output parameters. + + The function computes projections of 3D points to the image plane given intrinsic and extrinsic + camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of + image points coordinates (as functions of all the input parameters) with respect to the particular + parameters, intrinsic and/or extrinsic. + */ + CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine, + InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); + + /** @overload */ + CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, + InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); + + /** @brief Distorts 2D points using fisheye model. + + @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is + the number of points in the view. + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param alpha The skew coefficient. + @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . + + Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity. + This means if you want to transform back points undistorted with undistortPoints() you have to + multiply them with \f$P^{-1}\f$. + */ + CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); + + /** @brief Undistorts 2D points using fisheye model + + @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\ ), where N is the + number of points in the view. + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 + 1-channel or 1x1 3-channel + @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) + @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\ . + */ + CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, + InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray()); + + /** @brief Computes undistortion and rectification maps for image transform by cv::remap(). If D is empty zero + distortion is used, if R or P is empty identity matrixes are used. + + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 + 1-channel or 1x1 3-channel + @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) + @param size Undistorted image size. + @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps() + for details. + @param map1 The first output map. + @param map2 The second output map. + */ + CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, + const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); + + /** @brief Transforms an image to compensate for fisheye lens distortion. + + @param distorted image with fisheye lens distortion. + @param undistorted Output image with compensated fisheye lens distortion. + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you + may additionally scale and shift the result by using a different matrix. + @param new_size the new size + + The function transforms an image to compensate radial and tangential lens distortion. + + The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap + (with bilinear interpolation). See the former function for details of the transformation being + performed. + + See below the results of undistortImage. + - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, + k_4, k_5, k_6) of distortion were optimized under calibration) + - b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, + k_3, k_4) of fisheye distortion were optimized under calibration) + - c\) original image was captured with fisheye lens + + Pictures a) and b) almost the same. But if we consider points of image located far from the center + of image, we can notice that on image a) these points are distorted. + + ![image](pics/fisheye_undistorted.jpg) + */ + CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, + InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size()); + + /** @brief Estimates new camera intrinsic matrix for undistortion or rectification. + + @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. + @param image_size Size of the image + @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 + 1-channel or 1x1 3-channel + @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) + @param balance Sets the new focal length in range between the min focal length and the max focal + length. Balance is in range of [0, 1]. + @param new_size the new size + @param fov_scale Divisor for new focal length. + */ + CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, + OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0); + + /** @brief Performs camera calibaration + + @param objectPoints vector of vectors of calibration pattern points in the calibration pattern + coordinate space. + @param imagePoints vector of vectors of the projections of calibration pattern points. + imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to + objectPoints[i].size() for each i. + @param image_size Size of the image used only to initialize the camera intrinsic matrix. + @param K Output 3x3 floating-point camera intrinsic matrix + \f$\cameramatrix{A}\f$ . If + @ref fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be + initialized before calling the function. + @param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$. + @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. + That is, each k-th rotation vector together with the corresponding k-th translation vector (see + the next output parameter description) brings the calibration pattern from the model coordinate + space (in which object points are specified) to the world coordinate space, that is, a real + position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). + @param tvecs Output vector of translation vectors estimated for each pattern view. + @param flags Different flags that may be zero or a combination of the following values: + - @ref fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of + fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image + center ( imageSize is used), and focal distances are computed in a least-squares fashion. + - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration + of intrinsic optimization. + - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number. + - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. + - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients + are set to zeros and stay zero. + - @ref fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global +optimization. It stays at the center or at a different location specified when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too. + @param criteria Termination criteria for the iterative optimization algorithm. + */ + CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, + InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); + + /** @brief Stereo rectification for fisheye camera model + + @param K1 First camera intrinsic matrix. + @param D1 First camera distortion parameters. + @param K2 Second camera intrinsic matrix. + @param D2 Second camera distortion parameters. + @param imageSize Size of the image used for stereo calibration. + @param R Rotation matrix between the coordinate systems of the first and the second + cameras. + @param tvec Translation vector between coordinate systems of the cameras. + @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. + @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. + @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first + camera. + @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second + camera. + @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ). + @param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set, + the function makes the principal points of each camera have the same pixel coordinates in the + rectified views. And if the flag is not set, the function may still shift the images in the + horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the + useful image area. + @param newImageSize New image resolution after rectification. The same size should be passed to + initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) + is passed (default), it is set to the original imageSize . Setting it to larger value can help you + preserve details in the original image, especially when there is a big radial distortion. + @param balance Sets the new focal length in range between the min focal length and the max focal + length. Balance is in range of [0, 1]. + @param fov_scale Divisor for new focal length. + */ + CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, + OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(), + double balance = 0.0, double fov_scale = 1.0); + + /** @brief Performs stereo calibration + + @param objectPoints Vector of vectors of the calibration pattern points. + @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, + observed by the first camera. + @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, + observed by the second camera. + @param K1 Input/output first camera intrinsic matrix: + \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If + any of @ref fisheye::CALIB_USE_INTRINSIC_GUESS , @ref fisheye::CALIB_FIX_INTRINSIC are specified, + some or all of the matrix components must be initialized. + @param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements. + @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 . + @param D2 Input/output lens distortion coefficients for the second camera. The parameter is + similar to D1 . + @param imageSize Size of the image used only to initialize camera intrinsic matrix. + @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems. + @param T Output translation vector between the coordinate systems of the cameras. + @param flags Different flags that may be zero or a combination of the following values: + - @ref fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices + are estimated. + - @ref fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of + fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image + center (imageSize is used), and focal distances are computed in a least-squares fashion. + - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration + of intrinsic optimization. + - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number. + - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. + - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay + zero. + @param criteria Termination criteria for the iterative optimization algorithm. + */ + CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, + InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, + OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); + +//! @} calib3d_fisheye +} // end namespace fisheye + +} //end namespace cv + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/calib3d/calib3d_c.h" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d.hpp b/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d.hpp new file mode 100644 index 00000000..b3da45ed --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/calib3d.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d_c.h b/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d_c.h new file mode 100644 index 00000000..8ec6390d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/calib3d/calib3d_c.h @@ -0,0 +1,427 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CALIB3D_C_H +#define OPENCV_CALIB3D_C_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup calib3d_c + @{ + */ + +/****************************************************************************************\ +* Camera Calibration, Pose Estimation and Stereo * +\****************************************************************************************/ + +typedef struct CvPOSITObject CvPOSITObject; + +/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */ +CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count ); + + +/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of + an object given its model and projection in a weak-perspective case */ +CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points, + double focal_length, CvTermCriteria criteria, + float* rotation_matrix, float* translation_vector); + +/* Releases CvPOSITObject structure */ +CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object ); + +/* updates the number of RANSAC iterations */ +CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob, + int model_points, int max_iters ); + +CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst ); + +/* Calculates fundamental matrix given a set of corresponding points */ +#define CV_FM_7POINT 1 +#define CV_FM_8POINT 2 + +#define CV_LMEDS 4 +#define CV_RANSAC 8 + +#define CV_FM_LMEDS_ONLY CV_LMEDS +#define CV_FM_RANSAC_ONLY CV_RANSAC +#define CV_FM_LMEDS CV_LMEDS +#define CV_FM_RANSAC CV_RANSAC + +enum +{ + CV_ITERATIVE = 0, + CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" + CV_P3P = 2, // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem" + CV_DLS = 3 // Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP" +}; + +CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2, + CvMat* fundamental_matrix, + int method CV_DEFAULT(CV_FM_RANSAC), + double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99), + CvMat* status CV_DEFAULT(NULL) ); + +/* For each input point on one of images + computes parameters of the corresponding + epipolar line on the other image */ +CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points, + int which_image, + const CvMat* fundamental_matrix, + CvMat* correspondent_lines ); + +/* Triangulation functions */ + +CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2, + CvMat* projPoints1, CvMat* projPoints2, + CvMat* points4D); + +CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2, + CvMat* new_points1, CvMat* new_points2); + + +/* Computes the optimal new camera matrix according to the free scaling parameter alpha: + alpha=0 - only valid pixels will be retained in the undistorted image + alpha=1 - all the source image pixels will be retained in the undistorted image +*/ +CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix, + const CvMat* dist_coeffs, + CvSize image_size, double alpha, + CvMat* new_camera_matrix, + CvSize new_imag_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pixel_ROI CV_DEFAULT(0), + int center_principal_point CV_DEFAULT(0)); + +/* Converts rotation vector to rotation matrix or vice versa */ +CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst, + CvMat* jacobian CV_DEFAULT(0) ); + +/* Finds perspective transformation between the object plane and image (view) plane */ +CVAPI(int) cvFindHomography( const CvMat* src_points, + const CvMat* dst_points, + CvMat* homography, + int method CV_DEFAULT(0), + double ransacReprojThreshold CV_DEFAULT(3), + CvMat* mask CV_DEFAULT(0), + int maxIters CV_DEFAULT(2000), + double confidence CV_DEFAULT(0.995)); + +/* Computes RQ decomposition for 3x3 matrices */ +CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ, + CvMat *matrixQx CV_DEFAULT(NULL), + CvMat *matrixQy CV_DEFAULT(NULL), + CvMat *matrixQz CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes projection matrix decomposition */ +CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr, + CvMat *rotMatr, CvMat *posVect, + CvMat *rotMatrX CV_DEFAULT(NULL), + CvMat *rotMatrY CV_DEFAULT(NULL), + CvMat *rotMatrZ CV_DEFAULT(NULL), + CvPoint3D64f *eulerAngles CV_DEFAULT(NULL)); + +/* Computes d(AB)/dA and d(AB)/dB */ +CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB ); + +/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)), + t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */ +CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1, + const CvMat* _rvec2, const CvMat* _tvec2, + CvMat* _rvec3, CvMat* _tvec3, + CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0), + CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0), + CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0), + CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) ); + +/* Projects object points to the view plane using + the specified extrinsic and intrinsic camera parameters */ +CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector, + const CvMat* translation_vector, const CvMat* camera_matrix, + const CvMat* distortion_coeffs, CvMat* image_points, + CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL), + CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL), + CvMat* dpddist CV_DEFAULT(NULL), + double aspect_ratio CV_DEFAULT(0)); + +/* Finds extrinsic camera parameters from + a few known corresponding point pairs and intrinsic parameters */ +CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* camera_matrix, + const CvMat* distortion_coeffs, + CvMat* rotation_vector, + CvMat* translation_vector, + int use_extrinsic_guess CV_DEFAULT(0) ); + +/* Computes initial estimate of the intrinsic camera parameters + in case of planar calibration target (e.g. chessboard) */ +CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points, + const CvMat* image_points, + const CvMat* npoints, CvSize image_size, + CvMat* camera_matrix, + double aspect_ratio CV_DEFAULT(1.) ); + +#define CV_CALIB_CB_ADAPTIVE_THRESH 1 +#define CV_CALIB_CB_NORMALIZE_IMAGE 2 +#define CV_CALIB_CB_FILTER_QUADS 4 +#define CV_CALIB_CB_FAST_CHECK 8 + +// Performs a fast check if a chessboard is in the input image. This is a workaround to +// a problem of cvFindChessboardCorners being slow on images with no chessboard +// - src: input image +// - size: chessboard size +// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, +// 0 if there is no chessboard, -1 in case of error +CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size); + + /* Detects corners on a chessboard calibration pattern */ +CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size, + CvPoint2D32f* corners, + int* corner_count CV_DEFAULT(NULL), + int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) ); + +/* Draws individual chessboard corners or the whole chessboard detected */ +CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size, + CvPoint2D32f* corners, + int count, int pattern_was_found ); + +#define CV_CALIB_USE_INTRINSIC_GUESS 1 +#define CV_CALIB_FIX_ASPECT_RATIO 2 +#define CV_CALIB_FIX_PRINCIPAL_POINT 4 +#define CV_CALIB_ZERO_TANGENT_DIST 8 +#define CV_CALIB_FIX_FOCAL_LENGTH 16 +#define CV_CALIB_FIX_K1 32 +#define CV_CALIB_FIX_K2 64 +#define CV_CALIB_FIX_K3 128 +#define CV_CALIB_FIX_K4 2048 +#define CV_CALIB_FIX_K5 4096 +#define CV_CALIB_FIX_K6 8192 +#define CV_CALIB_RATIONAL_MODEL 16384 +#define CV_CALIB_THIN_PRISM_MODEL 32768 +#define CV_CALIB_FIX_S1_S2_S3_S4 65536 +#define CV_CALIB_TILTED_MODEL 262144 +#define CV_CALIB_FIX_TAUX_TAUY 524288 +#define CV_CALIB_FIX_TANGENT_DIST 2097152 + +#define CV_CALIB_NINTRINSIC 18 + +/* Finds intrinsic and extrinsic camera parameters + from a few views of known calibration pattern */ +CVAPI(double) cvCalibrateCamera2( const CvMat* object_points, + const CvMat* image_points, + const CvMat* point_counts, + CvSize image_size, + CvMat* camera_matrix, + CvMat* distortion_coeffs, + CvMat* rotation_vectors CV_DEFAULT(NULL), + CvMat* translation_vectors CV_DEFAULT(NULL), + int flags CV_DEFAULT(0), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) ); + +/* Computes various useful characteristics of the camera from the data computed by + cvCalibrateCamera2 */ +CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix, + CvSize image_size, + double aperture_width CV_DEFAULT(0), + double aperture_height CV_DEFAULT(0), + double *fovx CV_DEFAULT(NULL), + double *fovy CV_DEFAULT(NULL), + double *focal_length CV_DEFAULT(NULL), + CvPoint2D64f *principal_point CV_DEFAULT(NULL), + double *pixel_aspect_ratio CV_DEFAULT(NULL)); + +#define CV_CALIB_FIX_INTRINSIC 256 +#define CV_CALIB_SAME_FOCAL_LENGTH 512 + +/* Computes the transformation from one camera coordinate system to another one + from a few correspondent views of the same calibration target. Optionally, calibrates + both cameras */ +CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1, + const CvMat* image_points2, const CvMat* npoints, + CvMat* camera_matrix1, CvMat* dist_coeffs1, + CvMat* camera_matrix2, CvMat* dist_coeffs2, + CvSize image_size, CvMat* R, CvMat* T, + CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0), + int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC), + CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria( + CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)) ); + +#define CV_CALIB_ZERO_DISPARITY 1024 + +/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both + views parallel (=> to make all the epipolar lines horizontal or vertical) */ +CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2, + const CvMat* dist_coeffs1, const CvMat* dist_coeffs2, + CvSize image_size, const CvMat* R, const CvMat* T, + CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2, + CvMat* Q CV_DEFAULT(0), + int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY), + double alpha CV_DEFAULT(-1), + CvSize new_image_size CV_DEFAULT(cvSize(0,0)), + CvRect* valid_pix_ROI1 CV_DEFAULT(0), + CvRect* valid_pix_ROI2 CV_DEFAULT(0)); + +/* Computes rectification transformations for uncalibrated pair of images using a set + of point correspondences */ +CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2, + const CvMat* F, CvSize img_size, + CvMat* H1, CvMat* H2, + double threshold CV_DEFAULT(5)); + + + +/* stereo correspondence parameters and functions */ + +#define CV_STEREO_BM_NORMALIZED_RESPONSE 0 +#define CV_STEREO_BM_XSOBEL 1 + +/* Block matching algorithm structure */ +typedef struct CvStereoBMState +{ + // pre-filtering (normalization of input images) + int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now + int preFilterSize; // averaging window size: ~5x5..21x21 + int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap] + + // correspondence using Sum of Absolute Difference (SAD) + int SADWindowSize; // ~5x5..21x21 + int minDisparity; // minimum disparity (can be negative) + int numberOfDisparities; // maximum disparity - minimum disparity (> 0) + + // post-filtering + int textureThreshold; // the disparity is only computed for pixels + // with textured enough neighborhood + int uniquenessRatio; // accept the computed disparity d* only if + // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.) + // for any d != d*+/-1 within the search range. + int speckleWindowSize; // disparity variation window + int speckleRange; // acceptable range of variation in window + + int trySmallerWindows; // if 1, the results may be more accurate, + // at the expense of slower processing + CvRect roi1, roi2; + int disp12MaxDiff; + + // temporary buffers + CvMat* preFilteredImg0; + CvMat* preFilteredImg1; + CvMat* slidingSumBuf; + CvMat* cost; + CvMat* disp; +} CvStereoBMState; + +#define CV_STEREO_BM_BASIC 0 +#define CV_STEREO_BM_FISH_EYE 1 +#define CV_STEREO_BM_NARROW 2 + +CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC), + int numberOfDisparities CV_DEFAULT(0)); + +CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state ); + +CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right, + CvArr* disparity, CvStereoBMState* state ); + +CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity, + int numberOfDisparities, int SADWindowSize ); + +CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost, + int minDisparity, int numberOfDisparities, + int disp12MaxDiff CV_DEFAULT(1) ); + +/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */ +CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage, + CvArr* _3dImage, const CvMat* Q, + int handleMissingValues CV_DEFAULT(0) ); + +/** @} calib3d_c */ + +#ifdef __cplusplus +} // extern "C" + +////////////////////////////////////////////////////////////////////////////////////////// +class CV_EXPORTS CvLevMarq +{ +public: + CvLevMarq(); + CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + ~CvLevMarq(); + void init( int nparams, int nerrs, CvTermCriteria criteria= + cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), + bool completeSymmFlag=false ); + bool update( const CvMat*& param, CvMat*& J, CvMat*& err ); + bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm ); + + void clear(); + void step(); + enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 }; + + cv::Ptr mask; + cv::Ptr prevParam; + cv::Ptr param; + cv::Ptr J; + cv::Ptr err; + cv::Ptr JtJ; + cv::Ptr JtJN; + cv::Ptr JtErr; + cv::Ptr JtJV; + cv::Ptr JtJW; + double prevErrNorm, errNorm; + int lambdaLg10; + CvTermCriteria criteria; + int state; + int iters; + bool completeSymmFlag; + int solveMethod; +}; + +#endif + +#endif /* OPENCV_CALIB3D_C_H */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core.hpp new file mode 100644 index 00000000..be0a3a0d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core.hpp @@ -0,0 +1,3298 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2015, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (C) 2015, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_HPP +#define OPENCV_CORE_HPP + +#ifndef __cplusplus +# error core.hpp header must be compiled as C++ +#endif + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/base.hpp" +#include "opencv2/core/cvstd.hpp" +#include "opencv2/core/traits.hpp" +#include "opencv2/core/matx.hpp" +#include "opencv2/core/types.hpp" +#include "opencv2/core/mat.hpp" +#include "opencv2/core/persistence.hpp" + +/** +@defgroup core Core functionality +@{ + @defgroup core_basic Basic structures + @defgroup core_c C structures and operations + @{ + @defgroup core_c_glue Connections with C++ + @} + @defgroup core_array Operations on arrays + @defgroup core_async Asynchronous API + @defgroup core_xml XML/YAML Persistence + @defgroup core_cluster Clustering + @defgroup core_utils Utility and system functions and macros + @{ + @defgroup core_logging Logging facilities + @defgroup core_utils_sse SSE utilities + @defgroup core_utils_neon NEON utilities + @defgroup core_utils_vsx VSX utilities + @defgroup core_utils_softfloat Softfloat support + @defgroup core_utils_samples Utility functions for OpenCV samples + @} + @defgroup core_opengl OpenGL interoperability + @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters + @defgroup core_optim Optimization Algorithms + @defgroup core_directx DirectX interoperability + @defgroup core_eigen Eigen support + @defgroup core_opencl OpenCL support + @defgroup core_va_intel Intel VA-API/OpenCL (CL-VA) interoperability + @defgroup core_hal Hardware Acceleration Layer + @{ + @defgroup core_hal_functions Functions + @defgroup core_hal_interface Interface + @defgroup core_hal_intrin Universal intrinsics + @{ + @defgroup core_hal_intrin_impl Private implementation helpers + @} + @} +@} + */ + +namespace cv { + +//! @addtogroup core_utils +//! @{ + +/*! @brief Class passed to an error. + +This class encapsulates all or almost all necessary +information about the error happened in the program. The exception is +usually constructed and thrown implicitly via CV_Error and CV_Error_ macros. +@see error + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constructor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const String& _err, const String& _func, const String& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw() CV_OVERRIDE; + void formatMessage(); + + String msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + String err; ///< error description + String func; ///< function name. Available only when the compiler supports getting it + String file; ///< source file name where the error has occurred + int line; ///< line number in the source file where the error has occurred +}; + +/*! @brief Signals an error and raises the exception. + +By default the function prints information about the error to stderr, +then it either stops if cv::setBreakOnError() had been called before or raises the exception. +It is possible to alternate error processing by using #redirectError(). +@param exc the exception raisen. +@deprecated drop this version + */ +CV_EXPORTS void error( const Exception& exc ); + +enum SortFlags { SORT_EVERY_ROW = 0, //!< each matrix row is sorted independently + SORT_EVERY_COLUMN = 1, //!< each matrix column is sorted + //!< independently; this flag and the previous one are + //!< mutually exclusive. + SORT_ASCENDING = 0, //!< each matrix row is sorted in the ascending + //!< order. + SORT_DESCENDING = 16 //!< each matrix row is sorted in the + //!< descending order; this flag and the previous one are also + //!< mutually exclusive. + }; + +//! @} core_utils + +//! @addtogroup core +//! @{ + +//! Covariation flags +enum CovarFlags { + /** The output covariance matrix is calculated as: + \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...],\f] + The covariance matrix will be nsamples x nsamples. Such an unusual covariance matrix is used + for fast PCA of a set of very large vectors (see, for example, the EigenFaces technique for + face recognition). Eigenvalues of this "scrambled" matrix match the eigenvalues of the true + covariance matrix. The "true" eigenvectors can be easily calculated from the eigenvectors of + the "scrambled" covariance matrix. */ + COVAR_SCRAMBLED = 0, + /**The output covariance matrix is calculated as: + \f[\texttt{scale} \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...] \cdot [ \texttt{vects} [0]- \texttt{mean} , \texttt{vects} [1]- \texttt{mean} ,...]^T,\f] + covar will be a square matrix of the same size as the total number of elements in each input + vector. One and only one of #COVAR_SCRAMBLED and #COVAR_NORMAL must be specified.*/ + COVAR_NORMAL = 1, + /** If the flag is specified, the function does not calculate mean from + the input vectors but, instead, uses the passed mean vector. This is useful if mean has been + pre-calculated or known in advance, or if the covariance matrix is calculated by parts. In + this case, mean is not a mean vector of the input sub-set of vectors but rather the mean + vector of the whole set.*/ + COVAR_USE_AVG = 2, + /** If the flag is specified, the covariance matrix is scaled. In the + "normal" mode, scale is 1./nsamples . In the "scrambled" mode, scale is the reciprocal of the + total number of elements in each input vector. By default (if the flag is not specified), the + covariance matrix is not scaled ( scale=1 ).*/ + COVAR_SCALE = 4, + /** If the flag is + specified, all the input vectors are stored as rows of the samples matrix. mean should be a + single-row vector in this case.*/ + COVAR_ROWS = 8, + /** If the flag is + specified, all the input vectors are stored as columns of the samples matrix. mean should be a + single-column vector in this case.*/ + COVAR_COLS = 16 +}; + +//! @addtogroup core_cluster +//! @{ + +//! k-Means flags +enum KmeansFlags { + /** Select random initial centers in each attempt.*/ + KMEANS_RANDOM_CENTERS = 0, + /** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/ + KMEANS_PP_CENTERS = 2, + /** During the first (and possibly the only) attempt, use the + user-supplied labels instead of computing them from the initial centers. For the second and + further attempts, use the random or semi-random centers. Use one of KMEANS_\*_CENTERS flag + to specify the exact method.*/ + KMEANS_USE_INITIAL_LABELS = 1 +}; + +//! @} core_cluster + +//! type of line +enum LineTypes { + FILLED = -1, + LINE_4 = 4, //!< 4-connected line + LINE_8 = 8, //!< 8-connected line + LINE_AA = 16 //!< antialiased line +}; + +//! Only a subset of Hershey fonts are supported +enum HersheyFonts { + FONT_HERSHEY_SIMPLEX = 0, //!< normal size sans-serif font + FONT_HERSHEY_PLAIN = 1, //!< small size sans-serif font + FONT_HERSHEY_DUPLEX = 2, //!< normal size sans-serif font (more complex than FONT_HERSHEY_SIMPLEX) + FONT_HERSHEY_COMPLEX = 3, //!< normal size serif font + FONT_HERSHEY_TRIPLEX = 4, //!< normal size serif font (more complex than FONT_HERSHEY_COMPLEX) + FONT_HERSHEY_COMPLEX_SMALL = 5, //!< smaller version of FONT_HERSHEY_COMPLEX + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, //!< hand-writing style font + FONT_HERSHEY_SCRIPT_COMPLEX = 7, //!< more complex variant of FONT_HERSHEY_SCRIPT_SIMPLEX + FONT_ITALIC = 16 //!< flag for italic font +}; + +//! @addtogroup core_array +//! @{ + +enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix. + REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix. + REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix. + REDUCE_MIN = 3 //!< the output is the minimum (column/row-wise) of all rows/columns of the matrix. + }; + +//! @} core_array + +/** @brief Swaps two matrices +*/ +CV_EXPORTS void swap(Mat& a, Mat& b); +/** @overload */ +CV_EXPORTS void swap( UMat& a, UMat& b ); + +//! @} core + +//! @addtogroup core_array +//! @{ + +/** @brief Computes the source location of an extrapolated pixel. + +The function computes and returns the coordinate of a donor pixel corresponding to the specified +extrapolated pixel when using the specified extrapolation border mode. For example, if you use +cv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and +want to compute value of the "virtual" pixel Point(-5, 100) in a floating-point image img , it +looks like: +@code{.cpp} + float val = img.at(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101), + borderInterpolate(-5, img.cols, cv::BORDER_WRAP)); +@endcode +Normally, the function is not called directly. It is used inside filtering functions and also in +copyMakeBorder. +@param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \<0 or \>= len +@param len Length of the array along the corresponding axis. +@param borderType Border type, one of the #BorderTypes, except for #BORDER_TRANSPARENT and +#BORDER_ISOLATED . When borderType==#BORDER_CONSTANT , the function always returns -1, regardless +of p and len. + +@sa copyMakeBorder +*/ +CV_EXPORTS_W int borderInterpolate(int p, int len, int borderType); + +/** @example samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp +An example using copyMakeBorder function. +Check @ref tutorial_copyMakeBorder "the corresponding tutorial" for more details +*/ + +/** @brief Forms a border around an image. + +The function copies the source image into the middle of the destination image. The areas to the +left, to the right, above and below the copied source image will be filled with extrapolated +pixels. This is not what filtering functions based on it do (they extrapolate pixels on-fly), but +what other more complex functions, including your own, may do to simplify image boundary handling. + +The function supports the mode when src is already in the middle of dst . In this case, the +function does not copy src itself but simply constructs the border, for example: + +@code{.cpp} + // let border be the same in all directions + int border=2; + // constructs a larger image to fit both the image and the border + Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth()); + // select the middle part of it w/o copying data + Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows)); + // convert image from RGB to grayscale + cvtColor(rgb, gray, COLOR_RGB2GRAY); + // form a border in-place + copyMakeBorder(gray, gray_buf, border, border, + border, border, BORDER_REPLICATE); + // now do some custom filtering ... + ... +@endcode +@note When the source image is a part (ROI) of a bigger image, the function will try to use the +pixels outside of the ROI to form a border. To disable this feature and always do extrapolation, as +if src was not a ROI, use borderType | #BORDER_ISOLATED. + +@param src Source image. +@param dst Destination image of the same type as src and the size Size(src.cols+left+right, +src.rows+top+bottom) . +@param top the top pixels +@param bottom the bottom pixels +@param left the left pixels +@param right Parameter specifying how many pixels in each direction from the source image rectangle +to extrapolate. For example, top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs +to be built. +@param borderType Border type. See borderInterpolate for details. +@param value Border value if borderType==BORDER_CONSTANT . + +@sa borderInterpolate +*/ +CV_EXPORTS_W void copyMakeBorder(InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value = Scalar() ); + +/** @brief Calculates the per-element sum of two arrays or an array and a scalar. + +The function add calculates: +- Sum of two arrays when both input arrays have the same size and the same number of channels: +\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f] +- Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of +elements as `src1.channels()`: +\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f] +- Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of +elements as `src2.channels()`: +\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} + \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f] +where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each +channel is processed independently. + +The first function in the list above can be replaced with matrix expressions: +@code{.cpp} + dst = src1 + src2; + dst += src1; // equivalent to add(dst, src1, dst); +@endcode +The input arrays and the output array can all have the same or different depths. For example, you +can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit +floating-point array. Depth of the output array is determined by the dtype parameter. In the second +and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can +be set to the default -1. In this case, the output array will have the same depth as the input +array, be it src1, src2 or both. +@note Saturation is not applied when the output array has the depth CV_32S. You may even get +result of an incorrect sign in the case of overflow. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array that has the same size and number of channels as the input array(s); the +depth is defined by dtype or src1/src2. +@param mask optional operation mask - 8-bit single channel array, that specifies elements of the +output array to be changed. +@param dtype optional depth of the output array (see the discussion below). +@sa subtract, addWeighted, scaleAdd, Mat::convertTo +*/ +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask = noArray(), int dtype = -1); + +/** @brief Calculates the per-element difference between two arrays or array and a scalar. + +The function subtract calculates: +- Difference between two arrays, when both input arrays have the same size and the same number of +channels: + \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f] +- Difference between an array and a scalar, when src2 is constructed from Scalar or has the same +number of elements as `src1.channels()`: + \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) - \texttt{src2} ) \quad \texttt{if mask}(I) \ne0\f] +- Difference between a scalar and an array, when src1 is constructed from Scalar or has the same +number of elements as `src2.channels()`: + \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1} - \texttt{src2}(I) ) \quad \texttt{if mask}(I) \ne0\f] +- The reverse difference between a scalar and an array in the case of `SubRS`: + \f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src2} - \texttt{src1}(I) ) \quad \texttt{if mask}(I) \ne0\f] +where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each +channel is processed independently. + +The first function in the list above can be replaced with matrix expressions: +@code{.cpp} + dst = src1 - src2; + dst -= src1; // equivalent to subtract(dst, src1, dst); +@endcode +The input arrays and the output array can all have the same or different depths. For example, you +can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of +the output array is determined by dtype parameter. In the second and third cases above, as well as +in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this +case the output array will have the same depth as the input array, be it src1, src2 or both. +@note Saturation is not applied when the output array has the depth CV_32S. You may even get +result of an incorrect sign in the case of overflow. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array of the same size and the same number of channels as the input array. +@param mask optional operation mask; this is an 8-bit single channel array that specifies elements +of the output array to be changed. +@param dtype optional depth of the output array +@sa add, addWeighted, scaleAdd, Mat::convertTo + */ +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask = noArray(), int dtype = -1); + + +/** @brief Calculates the per-element scaled product of two arrays. + +The function multiply calculates the per-element product of two arrays: + +\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{scale} \cdot \texttt{src1} (I) \cdot \texttt{src2} (I))\f] + +There is also a @ref MatrixExpressions -friendly variant of the first function. See Mat::mul . + +For a not-per-element matrix product, see gemm . + +@note Saturation is not applied when the output array has the depth +CV_32S. You may even get result of an incorrect sign in the case of +overflow. +@param src1 first input array. +@param src2 second input array of the same size and the same type as src1. +@param dst output array of the same size and type as src1. +@param scale optional scale factor. +@param dtype optional depth of the output array +@sa add, subtract, divide, scaleAdd, addWeighted, accumulate, accumulateProduct, accumulateSquare, +Mat::convertTo +*/ +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale = 1, int dtype = -1); + +/** @brief Performs per-element division of two arrays or a scalar by an array. + +The function cv::divide divides one array by another: +\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f] +or a scalar by an array when there is no src1 : +\f[\texttt{dst(I) = saturate(scale/src2(I))}\f] + +When src2(I) is zero, dst(I) will also be zero. Different channels of +multi-channel arrays are processed independently. + +@note Saturation is not applied when the output array has the depth CV_32S. You may even get +result of an incorrect sign in the case of overflow. +@param src1 first input array. +@param src2 second input array of the same size and type as src1. +@param scale scalar factor. +@param dst output array of the same size and type as src2. +@param dtype optional depth of the output array; if -1, dst will have depth src2.depth(), but in +case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth(). +@sa multiply, add, subtract +*/ +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale = 1, int dtype = -1); + +/** @overload */ +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype = -1); + +/** @brief Calculates the sum of a scaled array and another array. + +The function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY +or SAXPY in [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates +the sum of a scaled array and another array: +\f[\texttt{dst} (I)= \texttt{scale} \cdot \texttt{src1} (I) + \texttt{src2} (I)\f] +The function can also be emulated with a matrix expression, for example: +@code{.cpp} + Mat A(3, 3, CV_64F); + ... + A.row(0) = A.row(1)*2 + A.row(2); +@endcode +@param src1 first input array. +@param alpha scale factor for the first array. +@param src2 second input array of the same size and type as src1. +@param dst output array of the same size and type as src1. +@sa add, addWeighted, subtract, Mat::dot, Mat::convertTo +*/ +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +/** @example samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp +Check @ref tutorial_trackbar "the corresponding tutorial" for more details +*/ + +/** @brief Calculates the weighted sum of two arrays. + +The function addWeighted calculates the weighted sum of two arrays as follows: +\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f] +where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each +channel is processed independently. +The function can be replaced with a matrix expression: +@code{.cpp} + dst = src1*alpha + src2*beta + gamma; +@endcode +@note Saturation is not applied when the output array has the depth CV_32S. You may even get +result of an incorrect sign in the case of overflow. +@param src1 first input array. +@param alpha weight of the first array elements. +@param src2 second input array of the same size and channel number as src1. +@param beta weight of the second array elements. +@param gamma scalar added to each sum. +@param dst output array that has the same size and number of channels as the input arrays. +@param dtype optional depth of the output array; when both input arrays have the same depth, dtype +can be set to -1, which will be equivalent to src1.depth(). +@sa add, subtract, scaleAdd, Mat::convertTo +*/ +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype = -1); + +/** @brief Scales, calculates absolute values, and converts the result to 8-bit. + +On each element of the input array, the function convertScaleAbs +performs three operations sequentially: scaling, taking an absolute +value, conversion to an unsigned 8-bit type: +\f[\texttt{dst} (I)= \texttt{saturate\_cast} (| \texttt{src} (I)* \texttt{alpha} + \texttt{beta} |)\f] +In case of multi-channel arrays, the function processes each channel +independently. When the output is not 8-bit, the operation can be +emulated by calling the Mat::convertTo method (or by using matrix +expressions) and then by calculating an absolute value of the result. +For example: +@code{.cpp} + Mat_ A(30,30); + randu(A, Scalar(-100), Scalar(100)); + Mat_ B = A*5 + 3; + B = abs(B); + // Mat_ B = abs(A*5+3) will also do the job, + // but it will allocate a temporary matrix +@endcode +@param src input array. +@param dst output array. +@param alpha optional scale factor. +@param beta optional delta added to the scaled values. +@sa Mat::convertTo, cv::abs(const Mat&) +*/ +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha = 1, double beta = 0); + +/** @brief Converts an array to half precision floating number. + +This function converts FP32 (single precision floating point) from/to FP16 (half precision floating point). CV_16S format is used to represent FP16 data. +There are two use modes (src -> dst): CV_32F -> CV_16S and CV_16S -> CV_32F. The input array has to have type of CV_32F or +CV_16S to represent the bit depth. If the input array is neither of them, the function will raise an error. +The format of half precision floating point is defined in IEEE 754-2008. + +@param src input array. +@param dst output array. +*/ +CV_EXPORTS_W void convertFp16(InputArray src, OutputArray dst); + +/** @brief Performs a look-up table transform of an array. + +The function LUT fills the output array with values from the look-up table. Indices of the entries +are taken from the input array. That is, the function processes each element of src as follows: +\f[\texttt{dst} (I) \leftarrow \texttt{lut(src(I) + d)}\f] +where +\f[d = \fork{0}{if \(\texttt{src}\) has depth \(\texttt{CV_8U}\)}{128}{if \(\texttt{src}\) has depth \(\texttt{CV_8S}\)}\f] +@param src input array of 8-bit elements. +@param lut look-up table of 256 elements; in case of multi-channel input array, the table should +either have a single channel (in this case the same table is used for all channels) or the same +number of channels as in the input array. +@param dst output array of the same size and number of channels as src, and the same depth as lut. +@sa convertScaleAbs, Mat::convertTo +*/ +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst); + +/** @brief Calculates the sum of array elements. + +The function cv::sum calculates and returns the sum of array elements, +independently for each channel. +@param src input array that must have from 1 to 4 channels. +@sa countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce +*/ +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); + +/** @brief Counts non-zero array elements. + +The function returns the number of non-zero elements in src : +\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f] +@param src single-channel array. +@sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix +*/ +CV_EXPORTS_W int countNonZero( InputArray src ); + +/** @brief Returns the list of locations of non-zero pixels + +Given a binary matrix (likely returned from an operation such +as threshold(), compare(), >, ==, etc, return all of +the non-zero indices as a cv::Mat or std::vector (x,y) +For example: +@code{.cpp} + cv::Mat binaryImage; // input, binary image + cv::Mat locations; // output, locations of non-zero pixels + cv::findNonZero(binaryImage, locations); + + // access pixel coordinates + Point pnt = locations.at(i); +@endcode +or +@code{.cpp} + cv::Mat binaryImage; // input, binary image + vector locations; // output, locations of non-zero pixels + cv::findNonZero(binaryImage, locations); + + // access pixel coordinates + Point pnt = locations[i]; +@endcode +@param src single-channel array (type CV_8UC1) +@param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input +*/ +CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx ); + +/** @brief Calculates an average (mean) of array elements. + +The function cv::mean calculates the mean value M of array elements, +independently for each channel, and return it: +\f[\begin{array}{l} N = \sum _{I: \; \texttt{mask} (I) \ne 0} 1 \\ M_c = \left ( \sum _{I: \; \texttt{mask} (I) \ne 0}{ \texttt{mtx} (I)_c} \right )/N \end{array}\f] +When all the mask elements are 0's, the function returns Scalar::all(0) +@param src input array that should have from 1 to 4 channels so that the result can be stored in +Scalar_ . +@param mask optional operation mask. +@sa countNonZero, meanStdDev, norm, minMaxLoc +*/ +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask = noArray()); + +/** Calculates a mean and standard deviation of array elements. + +The function cv::meanStdDev calculates the mean and the standard deviation M +of array elements independently for each channel and returns it via the +output parameters: +\f[\begin{array}{l} N = \sum _{I, \texttt{mask} (I) \ne 0} 1 \\ \texttt{mean} _c = \frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \texttt{src} (I)_c}{N} \\ \texttt{stddev} _c = \sqrt{\frac{\sum_{ I: \; \texttt{mask}(I) \ne 0} \left ( \texttt{src} (I)_c - \texttt{mean} _c \right )^2}{N}} \end{array}\f] +When all the mask elements are 0's, the function returns +mean=stddev=Scalar::all(0). +@note The calculated standard deviation is only the diagonal of the +complete normalized covariance matrix. If the full matrix is needed, you +can reshape the multi-channel array M x N to the single-channel array +M\*N x mtx.channels() (only possible when the matrix is continuous) and +then pass the matrix to calcCovarMatrix . +@param src input array that should have from 1 to 4 channels so that the results can be stored in +Scalar_ 's. +@param mean output parameter: calculated mean value. +@param stddev output parameter: calculated standard deviation. +@param mask optional operation mask. +@sa countNonZero, mean, norm, minMaxLoc, calcCovarMatrix +*/ +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); + +/** @brief Calculates the absolute norm of an array. + +This version of #norm calculates the absolute norm of src1. The type of norm to calculate is specified using #NormTypes. + +As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$. +The \f$ L_{1}, L_{2} \f$ and \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$ +is calculated as follows +\f{align*} + \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\ + \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\ + \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2 +\f} +and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is +\f{align*} + \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\ + \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\ + \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5. +\f} +The following graphic shows all values for the three norm functions \f$\| r(x) \|_{L_1}, \| r(x) \|_{L_2}\f$ and \f$\| r(x) \|_{L_\infty}\f$. +It is notable that the \f$ L_{1} \f$ norm forms the upper and the \f$ L_{\infty} \f$ norm forms the lower border for the example function \f$ r(x) \f$. +![Graphs for the different norm functions from the above example](pics/NormTypes_OneArray_1-2-INF.png) + +When the mask parameter is specified and it is not empty, the norm is + +If normType is not specified, #NORM_L2 is used. +calculated only over the region specified by the mask. + +Multi-channel input arrays are treated as single-channel arrays, that is, +the results for all channels are combined. + +Hamming norms can only be calculated with CV_8U depth arrays. + +@param src1 first input array. +@param normType type of the norm (see #NormTypes). +@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type. +*/ +CV_EXPORTS_W double norm(InputArray src1, int normType = NORM_L2, InputArray mask = noArray()); + +/** @brief Calculates an absolute difference norm or a relative difference norm. + +This version of cv::norm calculates the absolute difference norm +or the relative difference norm of arrays src1 and src2. +The type of norm to calculate is specified using #NormTypes. + +@param src1 first input array. +@param src2 second input array of the same size and the same type as src1. +@param normType type of the norm (see #NormTypes). +@param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type. +*/ +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType = NORM_L2, InputArray mask = noArray()); +/** @overload +@param src first input array. +@param normType type of the norm (see #NormTypes). +*/ +CV_EXPORTS double norm( const SparseMat& src, int normType ); + +/** @brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric. + +This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB), between two input arrays src1 and src2. Arrays must have depth CV_8U. + +The PSNR is calculated as follows: + +\f[ +\texttt{PSNR} = 10 \cdot \log_{10}{\left( \frac{R^2}{MSE} \right) } +\f] + +where R is the maximum integer value of depth CV_8U (255) and MSE is the mean squared error between the two arrays. + +@param src1 first input array. +@param src2 second input array of the same size as src1. + + */ +CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2); + +/** @brief naive nearest neighbor finder + +see http://en.wikipedia.org/wiki/Nearest_neighbor_search +@todo document + */ +CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2, + OutputArray dist, int dtype, OutputArray nidx, + int normType = NORM_L2, int K = 0, + InputArray mask = noArray(), int update = 0, + bool crosscheck = false); + +/** @brief Normalizes the norm or value range of an array. + +The function cv::normalize normalizes scale and shift the input array elements so that +\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f] +(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that +\f[\min _I \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I \texttt{dst} (I)= \texttt{beta}\f] + +when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be +normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this +sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or +min-max but modify the whole array, you can use norm and Mat::convertTo. + +In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this, +the range transformation for sparse matrices is not allowed since it can shift the zero level. + +Possible usage with some positive example data: +@code{.cpp} + vector positiveData = { 2.0, 8.0, 10.0 }; + vector normalizedData_l1, normalizedData_l2, normalizedData_inf, normalizedData_minmax; + + // Norm to probability (total count) + // sum(numbers) = 20.0 + // 2.0 0.1 (2.0/20.0) + // 8.0 0.4 (8.0/20.0) + // 10.0 0.5 (10.0/20.0) + normalize(positiveData, normalizedData_l1, 1.0, 0.0, NORM_L1); + + // Norm to unit vector: ||positiveData|| = 1.0 + // 2.0 0.15 + // 8.0 0.62 + // 10.0 0.77 + normalize(positiveData, normalizedData_l2, 1.0, 0.0, NORM_L2); + + // Norm to max element + // 2.0 0.2 (2.0/10.0) + // 8.0 0.8 (8.0/10.0) + // 10.0 1.0 (10.0/10.0) + normalize(positiveData, normalizedData_inf, 1.0, 0.0, NORM_INF); + + // Norm to range [0.0;1.0] + // 2.0 0.0 (shift to left border) + // 8.0 0.75 (6.0/8.0) + // 10.0 1.0 (shift to right border) + normalize(positiveData, normalizedData_minmax, 1.0, 0.0, NORM_MINMAX); +@endcode + +@param src input array. +@param dst output array of the same size as src . +@param alpha norm value to normalize to or the lower range boundary in case of the range +normalization. +@param beta upper range boundary in case of the range normalization; it is not used for the norm +normalization. +@param norm_type normalization type (see cv::NormTypes). +@param dtype when negative, the output array has the same type as src; otherwise, it has the same +number of channels as src and the depth =CV_MAT_DEPTH(dtype). +@param mask optional operation mask. +@sa norm, Mat::convertTo, SparseMat::convertTo +*/ +CV_EXPORTS_W void normalize( InputArray src, InputOutputArray dst, double alpha = 1, double beta = 0, + int norm_type = NORM_L2, int dtype = -1, InputArray mask = noArray()); + +/** @overload +@param src input array. +@param dst output array of the same size as src . +@param alpha norm value to normalize to or the lower range boundary in case of the range +normalization. +@param normType normalization type (see cv::NormTypes). +*/ +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/** @brief Finds the global minimum and maximum in an array. + +The function cv::minMaxLoc finds the minimum and maximum element values and their positions. The +extremums are searched across the whole array or, if mask is not an empty array, in the specified +array region. + +The function do not work with multi-channel arrays. If you need to find minimum or maximum +elements across all the channels, use Mat::reshape first to reinterpret the array as +single-channel. Or you may extract the particular channel using either extractImageCOI , or +mixChannels , or split . +@param src input single-channel array. +@param minVal pointer to the returned minimum value; NULL is used if not required. +@param maxVal pointer to the returned maximum value; NULL is used if not required. +@param minLoc pointer to the returned minimum location (in 2D case); NULL is used if not required. +@param maxLoc pointer to the returned maximum location (in 2D case); NULL is used if not required. +@param mask optional mask used to select a sub-array. +@sa max, min, compare, inRange, extractImageCOI, mixChannels, split, Mat::reshape +*/ +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal = 0, CV_OUT Point* minLoc = 0, + CV_OUT Point* maxLoc = 0, InputArray mask = noArray()); + + +/** @brief Finds the global minimum and maximum in an array + +The function cv::minMaxIdx finds the minimum and maximum element values and their positions. The +extremums are searched across the whole array or, if mask is not an empty array, in the specified +array region. The function does not work with multi-channel arrays. If you need to find minimum or +maximum elements across all the channels, use Mat::reshape first to reinterpret the array as +single-channel. Or you may extract the particular channel using either extractImageCOI , or +mixChannels , or split . In case of a sparse matrix, the minimum is found among non-zero elements +only. +@note When minIdx is not NULL, it must have at least 2 elements (as well as maxIdx), even if src is +a single-row or single-column matrix. In OpenCV (following MATLAB) each array has at least 2 +dimensions, i.e. single-column matrix is Mx1 matrix (and therefore minIdx/maxIdx will be +(i1,0)/(i2,0)) and single-row matrix is 1xN matrix (and therefore minIdx/maxIdx will be +(0,j1)/(0,j2)). +@param src input single-channel array. +@param minVal pointer to the returned minimum value; NULL is used if not required. +@param maxVal pointer to the returned maximum value; NULL is used if not required. +@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required; +Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element +in each dimension are stored there sequentially. +@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required. +@param mask specified array region +*/ +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal = 0, + int* minIdx = 0, int* maxIdx = 0, InputArray mask = noArray()); + +/** @overload +@param a input single-channel array. +@param minVal pointer to the returned minimum value; NULL is used if not required. +@param maxVal pointer to the returned maximum value; NULL is used if not required. +@param minIdx pointer to the returned minimum location (in nD case); NULL is used if not required; +Otherwise, it must point to an array of src.dims elements, the coordinates of the minimum element +in each dimension are stored there sequentially. +@param maxIdx pointer to the returned maximum location (in nD case). NULL is used if not required. +*/ +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx = 0, int* maxIdx = 0); + +/** @brief Reduces a matrix to a vector. + +The function #reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of +1D vectors and performing the specified operation on the vectors until a single row/column is +obtained. For example, the function can be used to compute horizontal and vertical projections of a +raster image. In case of #REDUCE_MAX and #REDUCE_MIN , the output image should have the same type as the source one. +In case of #REDUCE_SUM and #REDUCE_AVG , the output may have a larger element bit-depth to preserve accuracy. +And multi-channel arrays are also supported in these two reduction modes. + +The following code demonstrates its usage for a single channel matrix. +@snippet snippets/core_reduce.cpp example + +And the following code demonstrates its usage for a two-channel matrix. +@snippet snippets/core_reduce.cpp example2 + +@param src input 2D matrix. +@param dst output vector. Its size and type is defined by dim and dtype parameters. +@param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to +a single row. 1 means that the matrix is reduced to a single column. +@param rtype reduction operation that could be one of #ReduceTypes +@param dtype when negative, the output vector will have the same type as the input matrix, +otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()). +@sa repeat +*/ +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype = -1); + +/** @brief Creates one multi-channel array out of several single-channel ones. + +The function cv::merge merges several arrays to make a single multi-channel array. That is, each +element of the output array will be a concatenation of the elements of the input arrays, where +elements of i-th input array are treated as mv[i].channels()-element vectors. + +The function cv::split does the reverse operation. If you need to shuffle channels in some other +advanced way, use cv::mixChannels. + +The following example shows how to merge 3 single channel matrices into a single 3-channel matrix. +@snippet snippets/core_merge.cpp example + +@param mv input array of matrices to be merged; all the matrices in mv must have the same +size and the same depth. +@param count number of input matrices when mv is a plain C array; it must be greater than zero. +@param dst output array of the same size and the same depth as mv[0]; The number of channels will +be equal to the parameter count. +@sa mixChannels, split, Mat::reshape +*/ +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); + +/** @overload +@param mv input vector of matrices to be merged; all the matrices in mv must have the same +size and the same depth. +@param dst output array of the same size and the same depth as mv[0]; The number of channels will +be the total number of channels in the matrix array. + */ +CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst); + +/** @brief Divides a multi-channel array into several single-channel arrays. + +The function cv::split splits a multi-channel array into separate single-channel arrays: +\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f] +If you need to extract a single channel or do some other sophisticated channel permutation, use +mixChannels . + +The following example demonstrates how to split a 3-channel matrix into 3 single channel matrices. +@snippet snippets/core_split.cpp example + +@param src input multi-channel array. +@param mvbegin output array; the number of arrays must match src.channels(); the arrays themselves are +reallocated, if needed. +@sa merge, mixChannels, cvtColor +*/ +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); + +/** @overload +@param m input multi-channel array. +@param mv output vector of arrays; the arrays themselves are reallocated, if needed. +*/ +CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv); + +/** @brief Copies specified channels from input arrays to the specified channels of +output arrays. + +The function cv::mixChannels provides an advanced mechanism for shuffling image channels. + +cv::split,cv::merge,cv::extractChannel,cv::insertChannel and some forms of cv::cvtColor are partial cases of cv::mixChannels. + +In the example below, the code splits a 4-channel BGRA image into a 3-channel BGR (with B and R +channels swapped) and a separate alpha-channel image: +@code{.cpp} + Mat bgra( 100, 100, CV_8UC4, Scalar(255,0,0,255) ); + Mat bgr( bgra.rows, bgra.cols, CV_8UC3 ); + Mat alpha( bgra.rows, bgra.cols, CV_8UC1 ); + + // forming an array of matrices is a quite efficient operation, + // because the matrix data is not copied, only the headers + Mat out[] = { bgr, alpha }; + // bgra[0] -> bgr[2], bgra[1] -> bgr[1], + // bgra[2] -> bgr[0], bgra[3] -> alpha[0] + int from_to[] = { 0,2, 1,1, 2,0, 3,3 }; + mixChannels( &bgra, 1, out, 2, from_to, 4 ); +@endcode +@note Unlike many other new-style C++ functions in OpenCV (see the introduction section and +Mat::create ), cv::mixChannels requires the output arrays to be pre-allocated before calling the +function. +@param src input array or vector of matrices; all of the matrices must have the same size and the +same depth. +@param nsrcs number of matrices in `src`. +@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and +depth must be the same as in `src[0]`. +@param ndsts number of matrices in `dst`. +@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is +a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in +dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to +src[0].channels()-1, the second input image channels are indexed from src[0].channels() to +src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image +channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is +filled with zero . +@param npairs number of index pairs in `fromTo`. +@sa split, merge, extractChannel, insertChannel, cvtColor +*/ +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); + +/** @overload +@param src input array or vector of matrices; all of the matrices must have the same size and the +same depth. +@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and +depth must be the same as in src[0]. +@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is +a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in +dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to +src[0].channels()-1, the second input image channels are indexed from src[0].channels() to +src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image +channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is +filled with zero . +@param npairs number of index pairs in fromTo. +*/ +CV_EXPORTS void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst, + const int* fromTo, size_t npairs); + +/** @overload +@param src input array or vector of matrices; all of the matrices must have the same size and the +same depth. +@param dst output array or vector of matrices; all the matrices **must be allocated**; their size and +depth must be the same as in src[0]. +@param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\*2] is +a 0-based index of the input channel in src, fromTo[k\*2+1] is an index of the output channel in +dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to +src[0].channels()-1, the second input image channels are indexed from src[0].channels() to +src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image +channels; as a special case, when fromTo[k\*2] is negative, the corresponding output channel is +filled with zero . +*/ +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputOutputArrayOfArrays dst, + const std::vector& fromTo); + +/** @brief Extracts a single channel from src (coi is 0-based index) +@param src input array +@param dst output array +@param coi index of channel to extract +@sa mixChannels, split +*/ +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +/** @brief Inserts a single channel to dst (coi is 0-based index) +@param src input array +@param dst output array +@param coi index of channel for insertion +@sa mixChannels, merge +*/ +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +/** @brief Flips a 2D array around vertical, horizontal, or both axes. + +The function cv::flip flips the array in one of three different ways (row +and column indices are 0-based): +\f[\texttt{dst} _{ij} = +\left\{ +\begin{array}{l l} +\texttt{src} _{\texttt{src.rows}-i-1,j} & if\; \texttt{flipCode} = 0 \\ +\texttt{src} _{i, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} > 0 \\ +\texttt{src} _{ \texttt{src.rows} -i-1, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} < 0 \\ +\end{array} +\right.\f] +The example scenarios of using the function are the following: +* Vertical flipping of the image (flipCode == 0) to switch between + top-left and bottom-left image origin. This is a typical operation + in video processing on Microsoft Windows\* OS. +* Horizontal flipping of the image with the subsequent horizontal + shift and absolute difference calculation to check for a + vertical-axis symmetry (flipCode \> 0). +* Simultaneous horizontal and vertical flipping of the image with + the subsequent shift and absolute difference calculation to check + for a central symmetry (flipCode \< 0). +* Reversing the order of point arrays (flipCode \> 0 or + flipCode == 0). +@param src input array. +@param dst output array of the same size and type as src. +@param flipCode a flag to specify how to flip the array; 0 means +flipping around the x-axis and positive value (for example, 1) means +flipping around y-axis. Negative value (for example, -1) means flipping +around both axes. +@sa transpose , repeat , completeSymm +*/ +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +enum RotateFlags { + ROTATE_90_CLOCKWISE = 0, //! A = (cv::Mat_(3, 2) << 1, 4, + 2, 5, + 3, 6); + cv::Mat_ B = (cv::Mat_(3, 2) << 7, 10, + 8, 11, + 9, 12); + + cv::Mat C; + cv::hconcat(A, B, C); + //C: + //[1, 4, 7, 10; + // 2, 5, 8, 11; + // 3, 6, 9, 12] + @endcode + @param src1 first input array to be considered for horizontal concatenation. + @param src2 second input array to be considered for horizontal concatenation. + @param dst output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2. + */ +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +/** @overload + @code{.cpp} + std::vector matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)), + cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)), + cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),}; + + cv::Mat out; + cv::hconcat( matrices, out ); + //out: + //[1, 2, 3; + // 1, 2, 3; + // 1, 2, 3; + // 1, 2, 3] + @endcode + @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth. + @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src. +same depth. + */ +CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst); + +/** @brief Applies vertical concatenation to given matrices. + +The function vertically concatenates two or more cv::Mat matrices (with the same number of cols). +@code{.cpp} + cv::Mat matArray[] = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)), + cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)), + cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),}; + + cv::Mat out; + cv::vconcat( matArray, 3, out ); + //out: + //[1, 1, 1, 1; + // 2, 2, 2, 2; + // 3, 3, 3, 3] +@endcode +@param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth. +@param nsrc number of matrices in src. +@param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src. +@sa cv::hconcat(const Mat*, size_t, OutputArray), @sa cv::hconcat(InputArrayOfArrays, OutputArray) and @sa cv::hconcat(InputArray, InputArray, OutputArray) +*/ +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +/** @overload + @code{.cpp} + cv::Mat_ A = (cv::Mat_(3, 2) << 1, 7, + 2, 8, + 3, 9); + cv::Mat_ B = (cv::Mat_(3, 2) << 4, 10, + 5, 11, + 6, 12); + + cv::Mat C; + cv::vconcat(A, B, C); + //C: + //[1, 7; + // 2, 8; + // 3, 9; + // 4, 10; + // 5, 11; + // 6, 12] + @endcode + @param src1 first input array to be considered for vertical concatenation. + @param src2 second input array to be considered for vertical concatenation. + @param dst output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2. + */ +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +/** @overload + @code{.cpp} + std::vector matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)), + cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)), + cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),}; + + cv::Mat out; + cv::vconcat( matrices, out ); + //out: + //[1, 1, 1, 1; + // 2, 2, 2, 2; + // 3, 3, 3, 3] + @endcode + @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth + @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src. +same depth. + */ +CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst); + +/** @brief computes bitwise conjunction of the two arrays (dst = src1 & src2) +Calculates the per-element bit-wise conjunction of two arrays or an +array and a scalar. + +The function cv::bitwise_and calculates the per-element bit-wise logical conjunction for: +* Two arrays when src1 and src2 have the same size: + \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +* An array and a scalar when src2 is constructed from Scalar or has + the same number of elements as `src1.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} (I) \wedge \texttt{src2} \quad \texttt{if mask} (I) \ne0\f] +* A scalar and an array when src1 is constructed from Scalar or has + the same number of elements as `src2.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +In case of floating-point arrays, their machine-specific bit +representations (usually IEEE754-compliant) are used for the operation. +In case of multi-channel arrays, each channel is processed +independently. In the second and third cases above, the scalar is first +converted to the array type. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array that has the same size and type as the input +arrays. +@param mask optional operation mask, 8-bit single channel array, that +specifies elements of the output array to be changed. +*/ +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask = noArray()); + +/** @brief Calculates the per-element bit-wise disjunction of two arrays or an +array and a scalar. + +The function cv::bitwise_or calculates the per-element bit-wise logical disjunction for: +* Two arrays when src1 and src2 have the same size: + \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +* An array and a scalar when src2 is constructed from Scalar or has + the same number of elements as `src1.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} (I) \vee \texttt{src2} \quad \texttt{if mask} (I) \ne0\f] +* A scalar and an array when src1 is constructed from Scalar or has + the same number of elements as `src2.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} \vee \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +In case of floating-point arrays, their machine-specific bit +representations (usually IEEE754-compliant) are used for the operation. +In case of multi-channel arrays, each channel is processed +independently. In the second and third cases above, the scalar is first +converted to the array type. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array that has the same size and type as the input +arrays. +@param mask optional operation mask, 8-bit single channel array, that +specifies elements of the output array to be changed. +*/ +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask = noArray()); + +/** @brief Calculates the per-element bit-wise "exclusive or" operation on two +arrays or an array and a scalar. + +The function cv::bitwise_xor calculates the per-element bit-wise logical "exclusive-or" +operation for: +* Two arrays when src1 and src2 have the same size: + \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +* An array and a scalar when src2 is constructed from Scalar or has + the same number of elements as `src1.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} (I) \oplus \texttt{src2} \quad \texttt{if mask} (I) \ne0\f] +* A scalar and an array when src1 is constructed from Scalar or has + the same number of elements as `src2.channels()`: + \f[\texttt{dst} (I) = \texttt{src1} \oplus \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0\f] +In case of floating-point arrays, their machine-specific bit +representations (usually IEEE754-compliant) are used for the operation. +In case of multi-channel arrays, each channel is processed +independently. In the 2nd and 3rd cases above, the scalar is first +converted to the array type. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array that has the same size and type as the input +arrays. +@param mask optional operation mask, 8-bit single channel array, that +specifies elements of the output array to be changed. +*/ +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask = noArray()); + +/** @brief Inverts every bit of an array. + +The function cv::bitwise_not calculates per-element bit-wise inversion of the input +array: +\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f] +In case of a floating-point input array, its machine-specific bit +representation (usually IEEE754-compliant) is used for the operation. In +case of multi-channel arrays, each channel is processed independently. +@param src input array. +@param dst output array that has the same size and type as the input +array. +@param mask optional operation mask, 8-bit single channel array, that +specifies elements of the output array to be changed. +*/ +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask = noArray()); + +/** @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar. + +The function cv::absdiff calculates: +* Absolute difference between two arrays when they have the same + size and type: + \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f] +* Absolute difference between an array and a scalar when the second + array is constructed from Scalar or has as many elements as the + number of channels in `src1`: + \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2} |)\f] +* Absolute difference between a scalar and an array when the first + array is constructed from Scalar or has as many elements as the + number of channels in `src2`: + \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1} - \texttt{src2}(I) |)\f] + where I is a multi-dimensional index of array elements. In case of + multi-channel arrays, each channel is processed independently. +@note Saturation is not applied when the arrays have the depth CV_32S. +You may even get a negative value in the case of overflow. +@param src1 first input array or a scalar. +@param src2 second input array or a scalar. +@param dst output array that has the same size and type as input arrays. +@sa cv::abs(const Mat&) +*/ +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); + +/** @brief Checks if array elements lie between the elements of two other arrays. + +The function checks the range as follows: +- For every element of a single-channel input array: + \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0\f] +- For two-channel arrays: + \f[\texttt{dst} (I)= \texttt{lowerb} (I)_0 \leq \texttt{src} (I)_0 \leq \texttt{upperb} (I)_0 \land \texttt{lowerb} (I)_1 \leq \texttt{src} (I)_1 \leq \texttt{upperb} (I)_1\f] +- and so forth. + +That is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the +specified 1D, 2D, 3D, ... box and 0 otherwise. + +When the lower and/or upper boundary parameters are scalars, the indexes +(I) at lowerb and upperb in the above formulas should be omitted. +@param src first input array. +@param lowerb inclusive lower boundary array or a scalar. +@param upperb inclusive upper boundary array or a scalar. +@param dst output array of the same size as src and CV_8U type. +*/ +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); + +/** @brief Performs the per-element comparison of two arrays or an array and scalar value. + +The function compares: +* Elements of two arrays when src1 and src2 have the same size: + \f[\texttt{dst} (I) = \texttt{src1} (I) \,\texttt{cmpop}\, \texttt{src2} (I)\f] +* Elements of src1 with a scalar src2 when src2 is constructed from + Scalar or has a single element: + \f[\texttt{dst} (I) = \texttt{src1}(I) \,\texttt{cmpop}\, \texttt{src2}\f] +* src1 with elements of src2 when src1 is constructed from Scalar or + has a single element: + \f[\texttt{dst} (I) = \texttt{src1} \,\texttt{cmpop}\, \texttt{src2} (I)\f] +When the comparison result is true, the corresponding element of output +array is set to 255. The comparison operations can be replaced with the +equivalent matrix expressions: +@code{.cpp} + Mat dst1 = src1 >= src2; + Mat dst2 = src1 < 8; + ... +@endcode +@param src1 first input array or a scalar; when it is an array, it must have a single channel. +@param src2 second input array or a scalar; when it is an array, it must have a single channel. +@param dst output array of type ref CV_8U that has the same size and the same number of channels as + the input arrays. +@param cmpop a flag, that specifies correspondence between the arrays (cv::CmpTypes) +@sa checkRange, min, max, threshold +*/ +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); + +/** @brief Calculates per-element minimum of two arrays or an array and a scalar. + +The function cv::min calculates the per-element minimum of two arrays: +\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f] +or array and a scalar: +\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{value} )\f] +@param src1 first input array. +@param src2 second input array of the same size and type as src1. +@param dst output array of the same size and type as src1. +@sa max, compare, inRange, minMaxLoc +*/ +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +/** @overload +needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare) +*/ +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +/** @overload +needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare) +*/ +CV_EXPORTS void min(const UMat& src1, const UMat& src2, UMat& dst); + +/** @brief Calculates per-element maximum of two arrays or an array and a scalar. + +The function cv::max calculates the per-element maximum of two arrays: +\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f] +or array and a scalar: +\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{value} )\f] +@param src1 first input array. +@param src2 second input array of the same size and type as src1 . +@param dst output array of the same size and type as src1. +@sa min, compare, inRange, minMaxLoc, @ref MatrixExpressions +*/ +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); +/** @overload +needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare) +*/ +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +/** @overload +needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare) +*/ +CV_EXPORTS void max(const UMat& src1, const UMat& src2, UMat& dst); + +/** @brief Calculates a square root of array elements. + +The function cv::sqrt calculates a square root of each input array element. +In case of multi-channel arrays, each channel is processed +independently. The accuracy is approximately the same as of the built-in +std::sqrt . +@param src input floating-point array. +@param dst output array of the same size and type as src. +*/ +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); + +/** @brief Raises every array element to a power. + +The function cv::pow raises every element of the input array to power : +\f[\texttt{dst} (I) = \fork{\texttt{src}(I)^{power}}{if \(\texttt{power}\) is integer}{|\texttt{src}(I)|^{power}}{otherwise}\f] + +So, for a non-integer power exponent, the absolute values of input array +elements are used. However, it is possible to get true values for +negative values using some extra operations. In the example below, +computing the 5th root of array src shows: +@code{.cpp} + Mat mask = src < 0; + pow(src, 1./5, dst); + subtract(Scalar::all(0), dst, dst, mask); +@endcode +For some values of power, such as integer values, 0.5 and -0.5, +specialized faster algorithms are used. + +Special values (NaN, Inf) are not handled. +@param src input array. +@param power exponent of power. +@param dst output array of the same size and type as src. +@sa sqrt, exp, log, cartToPolar, polarToCart +*/ +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); + +/** @brief Calculates the exponent of every array element. + +The function cv::exp calculates the exponent of every element of the input +array: +\f[\texttt{dst} [I] = e^{ src(I) }\f] + +The maximum relative error is about 7e-6 for single-precision input and +less than 1e-10 for double-precision input. Currently, the function +converts denormalized values to zeros on output. Special values (NaN, +Inf) are not handled. +@param src input array. +@param dst output array of the same size and type as src. +@sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude +*/ +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); + +/** @brief Calculates the natural logarithm of every array element. + +The function cv::log calculates the natural logarithm of every element of the input array: +\f[\texttt{dst} (I) = \log (\texttt{src}(I)) \f] + +Output on zero, negative and special (NaN, Inf) values is undefined. + +@param src input array. +@param dst output array of the same size and type as src . +@sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude +*/ +CV_EXPORTS_W void log(InputArray src, OutputArray dst); + +/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle. + +The function cv::polarToCart calculates the Cartesian coordinates of each 2D +vector represented by the corresponding elements of magnitude and angle: +\f[\begin{array}{l} \texttt{x} (I) = \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) = \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f] + +The relative accuracy of the estimated coordinates is about 1e-6. +@param magnitude input floating-point array of magnitudes of 2D vectors; +it can be an empty matrix (=Mat()), in this case, the function assumes +that all the magnitudes are =1; if it is not empty, it must have the +same size and type as angle. +@param angle input floating-point array of angles of 2D vectors. +@param x output array of x-coordinates of 2D vectors; it has the same +size and type as angle. +@param y output array of y-coordinates of 2D vectors; it has the same +size and type as angle. +@param angleInDegrees when true, the input angles are measured in +degrees, otherwise, they are measured in radians. +@sa cartToPolar, magnitude, phase, exp, log, pow, sqrt +*/ +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees = false); + +/** @brief Calculates the magnitude and angle of 2D vectors. + +The function cv::cartToPolar calculates either the magnitude, angle, or both +for every 2D vector (x(I),y(I)): +\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f] + +The angles are calculated with accuracy about 0.3 degrees. For the point +(0,0), the angle is set to 0. +@param x array of x-coordinates; this must be a single-precision or +double-precision floating-point array. +@param y array of y-coordinates, that must have the same size and same type as x. +@param magnitude output array of magnitudes of the same size and type as x. +@param angle output array of angles that has the same size and type as +x; the angles are measured in radians (from 0 to 2\*Pi) or in degrees (0 to 360 degrees). +@param angleInDegrees a flag, indicating whether the angles are measured +in radians (which is by default), or in degrees. +@sa Sobel, Scharr +*/ +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees = false); + +/** @brief Calculates the rotation angle of 2D vectors. + +The function cv::phase calculates the rotation angle of each 2D vector that +is formed from the corresponding elements of x and y : +\f[\texttt{angle} (I) = \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f] + +The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 , +the corresponding angle(I) is set to 0. +@param x input floating-point array of x-coordinates of 2D vectors. +@param y input array of y-coordinates of 2D vectors; it must have the +same size and the same type as x. +@param angle output array of vector angles; it has the same size and +same type as x . +@param angleInDegrees when true, the function calculates the angle in +degrees, otherwise, they are measured in radians. +*/ +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees = false); + +/** @brief Calculates the magnitude of 2D vectors. + +The function cv::magnitude calculates the magnitude of 2D vectors formed +from the corresponding elements of x and y arrays: +\f[\texttt{dst} (I) = \sqrt{\texttt{x}(I)^2 + \texttt{y}(I)^2}\f] +@param x floating-point array of x-coordinates of the vectors. +@param y floating-point array of y-coordinates of the vectors; it must +have the same size as x. +@param magnitude output array of the same size and type as x. +@sa cartToPolar, polarToCart, phase, sqrt +*/ +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); + +/** @brief Checks every element of an input array for invalid values. + +The function cv::checkRange checks that every array element is neither NaN nor infinite. When minVal \> +-DBL_MAX and maxVal \< DBL_MAX, the function also checks that each value is between minVal and +maxVal. In case of multi-channel arrays, each channel is processed independently. If some values +are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the +function either returns false (when quiet=true) or throws an exception. +@param a input array. +@param quiet a flag, indicating whether the functions quietly return false when the array elements +are out of range or they throw an exception. +@param pos optional output parameter, when not NULL, must be a pointer to array of src.dims +elements. +@param minVal inclusive lower boundary of valid values range. +@param maxVal exclusive upper boundary of valid values range. +*/ +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet = true, CV_OUT Point* pos = 0, + double minVal = -DBL_MAX, double maxVal = DBL_MAX); + +/** @brief converts NaNs to the given number +@param a input/output matrix (CV_32F type). +@param val value to convert the NaNs +*/ +CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val = 0); + +/** @brief Performs generalized matrix multiplication. + +The function cv::gemm performs generalized matrix multiplication similar to the +gemm functions in BLAS level 3. For example, +`gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)` +corresponds to +\f[\texttt{dst} = \texttt{alpha} \cdot \texttt{src1} ^T \cdot \texttt{src2} + \texttt{beta} \cdot \texttt{src3} ^T\f] + +In case of complex (two-channel) data, performed a complex matrix +multiplication. + +The function can be replaced with a matrix expression. For example, the +above call can be replaced with: +@code{.cpp} + dst = alpha*src1.t()*src2 + beta*src3.t(); +@endcode +@param src1 first multiplied input matrix that could be real(CV_32FC1, +CV_64FC1) or complex(CV_32FC2, CV_64FC2). +@param src2 second multiplied input matrix of the same type as src1. +@param alpha weight of the matrix product. +@param src3 third optional delta matrix added to the matrix product; it +should have the same type as src1 and src2. +@param beta weight of src3. +@param dst output matrix; it has the proper size and the same type as +input matrices. +@param flags operation flags (cv::GemmFlags) +@sa mulTransposed , transform +*/ +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double beta, OutputArray dst, int flags = 0); + +/** @brief Calculates the product of a matrix and its transposition. + +The function cv::mulTransposed calculates the product of src and its +transposition: +\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} )^T ( \texttt{src} - \texttt{delta} )\f] +if aTa=true , and +\f[\texttt{dst} = \texttt{scale} ( \texttt{src} - \texttt{delta} ) ( \texttt{src} - \texttt{delta} )^T\f] +otherwise. The function is used to calculate the covariance matrix. With +zero delta, it can be used as a faster substitute for general matrix +product A\*B when B=A' +@param src input single-channel matrix. Note that unlike gemm, the +function can multiply not only floating-point matrices. +@param dst output square matrix. +@param aTa Flag specifying the multiplication ordering. See the +description below. +@param delta Optional delta matrix subtracted from src before the +multiplication. When the matrix is empty ( delta=noArray() ), it is +assumed to be zero, that is, nothing is subtracted. If it has the same +size as src , it is simply subtracted. Otherwise, it is "repeated" (see +repeat ) to cover the full src and then subtracted. Type of the delta +matrix, when it is not empty, must be the same as the type of created +output matrix. See the dtype parameter description below. +@param scale Optional scale factor for the matrix product. +@param dtype Optional type of the output matrix. When it is negative, +the output matrix will have the same type as src . Otherwise, it will be +type=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F . +@sa calcCovarMatrix, gemm, repeat, reduce +*/ +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta = noArray(), + double scale = 1, int dtype = -1 ); + +/** @brief Transposes a matrix. + +The function cv::transpose transposes the matrix src : +\f[\texttt{dst} (i,j) = \texttt{src} (j,i)\f] +@note No complex conjugation is done in case of a complex matrix. It +should be done separately if needed. +@param src input array. +@param dst output array of the same type as src. +*/ +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); + +/** @brief Performs the matrix transformation of every array element. + +The function cv::transform performs the matrix transformation of every +element of the array src and stores the results in dst : +\f[\texttt{dst} (I) = \texttt{m} \cdot \texttt{src} (I)\f] +(when m.cols=src.channels() ), or +\f[\texttt{dst} (I) = \texttt{m} \cdot [ \texttt{src} (I); 1]\f] +(when m.cols=src.channels()+1 ) + +Every element of the N -channel array src is interpreted as N -element +vector that is transformed using the M x N or M x (N+1) matrix m to +M-element vector - the corresponding element of the output array dst . + +The function may be used for geometrical transformation of +N -dimensional points, arbitrary linear color space transformation (such +as various kinds of RGB to YUV transforms), shuffling the image +channels, and so forth. +@param src input array that must have as many channels (1 to 4) as +m.cols or m.cols-1. +@param dst output array of the same size and depth as src; it has as +many channels as m.rows. +@param m transformation 2x2 or 2x3 floating-point matrix. +@sa perspectiveTransform, getAffineTransform, estimateAffine2D, warpAffine, warpPerspective +*/ +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); + +/** @brief Performs the perspective matrix transformation of vectors. + +The function cv::perspectiveTransform transforms every element of src by +treating it as a 2D or 3D vector, in the following way: +\f[(x, y, z) \rightarrow (x'/w, y'/w, z'/w)\f] +where +\f[(x', y', z', w') = \texttt{mat} \cdot \begin{bmatrix} x & y & z & 1 \end{bmatrix}\f] +and +\f[w = \fork{w'}{if \(w' \ne 0\)}{\infty}{otherwise}\f] + +Here a 3D vector transformation is shown. In case of a 2D vector +transformation, the z component is omitted. + +@note The function transforms a sparse set of 2D or 3D vectors. If you +want to transform an image using perspective transformation, use +warpPerspective . If you have an inverse problem, that is, you want to +compute the most probable perspective transformation out of several +pairs of corresponding points, you can use getPerspectiveTransform or +findHomography . +@param src input two-channel or three-channel floating-point array; each +element is a 2D/3D vector to be transformed. +@param dst output array of the same size and type as src. +@param m 3x3 or 4x4 floating-point transformation matrix. +@sa transform, warpPerspective, getPerspectiveTransform, findHomography +*/ +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +/** @brief Copies the lower or the upper half of a square matrix to its another half. + +The function cv::completeSymm copies the lower or the upper half of a square matrix to +its another half. The matrix diagonal remains unchanged: + - \f$\texttt{m}_{ij}=\texttt{m}_{ji}\f$ for \f$i > j\f$ if + lowerToUpper=false + - \f$\texttt{m}_{ij}=\texttt{m}_{ji}\f$ for \f$i < j\f$ if + lowerToUpper=true + +@param m input-output floating-point square matrix. +@param lowerToUpper operation flag; if true, the lower half is copied to +the upper half. Otherwise, the upper half is copied to the lower half. +@sa flip, transpose +*/ +CV_EXPORTS_W void completeSymm(InputOutputArray m, bool lowerToUpper = false); + +/** @brief Initializes a scaled identity matrix. + +The function cv::setIdentity initializes a scaled identity matrix: +\f[\texttt{mtx} (i,j)= \fork{\texttt{value}}{ if \(i=j\)}{0}{otherwise}\f] + +The function can also be emulated using the matrix initializers and the +matrix expressions: +@code + Mat A = Mat::eye(4, 3, CV_32F)*5; + // A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]] +@endcode +@param mtx matrix to initialize (not necessarily square). +@param s value to assign to diagonal elements. +@sa Mat::zeros, Mat::ones, Mat::setTo, Mat::operator= +*/ +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s = Scalar(1)); + +/** @brief Returns the determinant of a square floating-point matrix. + +The function cv::determinant calculates and returns the determinant of the +specified matrix. For small matrices ( mtx.cols=mtx.rows\<=3 ), the +direct method is used. For larger matrices, the function uses LU +factorization with partial pivoting. + +For symmetric positively-determined matrices, it is also possible to use +eigen decomposition to calculate the determinant. +@param mtx input matrix that must have CV_32FC1 or CV_64FC1 type and +square size. +@sa trace, invert, solve, eigen, @ref MatrixExpressions +*/ +CV_EXPORTS_W double determinant(InputArray mtx); + +/** @brief Returns the trace of a matrix. + +The function cv::trace returns the sum of the diagonal elements of the +matrix mtx . +\f[\mathrm{tr} ( \texttt{mtx} ) = \sum _i \texttt{mtx} (i,i)\f] +@param mtx input matrix. +*/ +CV_EXPORTS_W Scalar trace(InputArray mtx); + +/** @brief Finds the inverse or pseudo-inverse of a matrix. + +The function cv::invert inverts the matrix src and stores the result in dst +. When the matrix src is singular or non-square, the function calculates +the pseudo-inverse matrix (the dst matrix) so that norm(src\*dst - I) is +minimal, where I is an identity matrix. + +In case of the #DECOMP_LU method, the function returns non-zero value if +the inverse has been successfully calculated and 0 if src is singular. + +In case of the #DECOMP_SVD method, the function returns the inverse +condition number of src (the ratio of the smallest singular value to the +largest singular value) and 0 if src is singular. The SVD method +calculates a pseudo-inverse matrix if src is singular. + +Similarly to #DECOMP_LU, the method #DECOMP_CHOLESKY works only with +non-singular square matrices that should also be symmetrical and +positively defined. In this case, the function stores the inverted +matrix in dst and returns non-zero. Otherwise, it returns 0. + +@param src input floating-point M x N matrix. +@param dst output matrix of N x M size and the same type as src. +@param flags inversion method (cv::DecompTypes) +@sa solve, SVD +*/ +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags = DECOMP_LU); + +/** @brief Solves one or more linear systems or least-squares problems. + +The function cv::solve solves a linear system or least-squares problem (the +latter is possible with SVD or QR methods, or by specifying the flag +#DECOMP_NORMAL ): +\f[\texttt{dst} = \arg \min _X \| \texttt{src1} \cdot \texttt{X} - \texttt{src2} \|\f] + +If #DECOMP_LU or #DECOMP_CHOLESKY method is used, the function returns 1 +if src1 (or \f$\texttt{src1}^T\texttt{src1}\f$ ) is non-singular. Otherwise, +it returns 0. In the latter case, dst is not valid. Other methods find a +pseudo-solution in case of a singular left-hand side part. + +@note If you want to find a unity-norm solution of an under-defined +singular system \f$\texttt{src1}\cdot\texttt{dst}=0\f$ , the function solve +will not do the work. Use SVD::solveZ instead. + +@param src1 input matrix on the left-hand side of the system. +@param src2 input matrix on the right-hand side of the system. +@param dst output solution. +@param flags solution (matrix inversion) method (#DecompTypes) +@sa invert, SVD, eigen +*/ +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags = DECOMP_LU); + +/** @brief Sorts each row or each column of a matrix. + +The function cv::sort sorts each matrix row or each matrix column in +ascending or descending order. So you should pass two operation flags to +get desired behaviour. If you want to sort matrix rows or columns +lexicographically, you can use STL std::sort generic function with the +proper comparison predicate. + +@param src input single-channel array. +@param dst output array of the same size and type as src. +@param flags operation flags, a combination of #SortFlags +@sa sortIdx, randShuffle +*/ +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); + +/** @brief Sorts each row or each column of a matrix. + +The function cv::sortIdx sorts each matrix row or each matrix column in the +ascending or descending order. So you should pass two operation flags to +get desired behaviour. Instead of reordering the elements themselves, it +stores the indices of sorted elements in the output array. For example: +@code + Mat A = Mat::eye(3,3,CV_32F), B; + sortIdx(A, B, SORT_EVERY_ROW + SORT_ASCENDING); + // B will probably contain + // (because of equal elements in A some permutations are possible): + // [[1, 2, 0], [0, 2, 1], [0, 1, 2]] +@endcode +@param src input single-channel array. +@param dst output integer array of the same size as src. +@param flags operation flags that could be a combination of cv::SortFlags +@sa sort, randShuffle +*/ +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); + +/** @brief Finds the real roots of a cubic equation. + +The function solveCubic finds the real roots of a cubic equation: +- if coeffs is a 4-element vector: +\f[\texttt{coeffs} [0] x^3 + \texttt{coeffs} [1] x^2 + \texttt{coeffs} [2] x + \texttt{coeffs} [3] = 0\f] +- if coeffs is a 3-element vector: +\f[x^3 + \texttt{coeffs} [0] x^2 + \texttt{coeffs} [1] x + \texttt{coeffs} [2] = 0\f] + +The roots are stored in the roots array. +@param coeffs equation coefficients, an array of 3 or 4 elements. +@param roots output array of real roots that has 1 or 3 elements. +@return number of real roots. It can be 0, 1 or 2. +*/ +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); + +/** @brief Finds the real or complex roots of a polynomial equation. + +The function cv::solvePoly finds real and complex roots of a polynomial equation: +\f[\texttt{coeffs} [n] x^{n} + \texttt{coeffs} [n-1] x^{n-1} + ... + \texttt{coeffs} [1] x + \texttt{coeffs} [0] = 0\f] +@param coeffs array of polynomial coefficients. +@param roots output (complex) array of roots. +@param maxIters maximum number of iterations the algorithm does. +*/ +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters = 300); + +/** @brief Calculates eigenvalues and eigenvectors of a symmetric matrix. + +The function cv::eigen calculates just eigenvalues, or eigenvalues and eigenvectors of the symmetric +matrix src: +@code + src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t() +@endcode + +@note Use cv::eigenNonSymmetric for calculation of real eigenvalues and eigenvectors of non-symmetric matrix. + +@param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical +(src ^T^ == src). +@param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored +in the descending order. +@param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the +eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding +eigenvalues. +@sa eigenNonSymmetric, completeSymm , PCA +*/ +CV_EXPORTS_W bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors = noArray()); + +/** @brief Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only). + +@note Assumes real eigenvalues. + +The function calculates eigenvalues and eigenvectors (optional) of the square matrix src: +@code + src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t() +@endcode + +@param src input matrix (CV_32FC1 or CV_64FC1 type). +@param eigenvalues output vector of eigenvalues (type is the same type as src). +@param eigenvectors output matrix of eigenvectors (type is the same type as src). The eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues. +@sa eigen +*/ +CV_EXPORTS_W void eigenNonSymmetric(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors); + +/** @brief Calculates the covariance matrix of a set of vectors. + +The function cv::calcCovarMatrix calculates the covariance matrix and, optionally, the mean vector of +the set of input vectors. +@param samples samples stored as separate matrices +@param nsamples number of samples +@param covar output covariance matrix of the type ctype and square size. +@param mean input or output (depending on the flags) array as the average value of the input vectors. +@param flags operation flags as a combination of #CovarFlags +@param ctype type of the matrixl; it equals 'CV_64F' by default. +@sa PCA, mulTransposed, Mahalanobis +@todo InputArrayOfArrays +*/ +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype = CV_64F); + +/** @overload +@note use #COVAR_ROWS or #COVAR_COLS flag +@param samples samples stored as rows/columns of a single matrix. +@param covar output covariance matrix of the type ctype and square size. +@param mean input or output (depending on the flags) array as the average value of the input vectors. +@param flags operation flags as a combination of #CovarFlags +@param ctype type of the matrixl; it equals 'CV_64F' by default. +*/ +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + InputOutputArray mean, int flags, int ctype = CV_64F); + +/** wrap PCA::operator() */ +CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean, + OutputArray eigenvectors, int maxComponents = 0); + +/** wrap PCA::operator() and add eigenvalues output parameter */ +CV_EXPORTS_AS(PCACompute2) void PCACompute(InputArray data, InputOutputArray mean, + OutputArray eigenvectors, OutputArray eigenvalues, + int maxComponents = 0); + +/** wrap PCA::operator() */ +CV_EXPORTS_W void PCACompute(InputArray data, InputOutputArray mean, + OutputArray eigenvectors, double retainedVariance); + +/** wrap PCA::operator() and add eigenvalues output parameter */ +CV_EXPORTS_AS(PCACompute2) void PCACompute(InputArray data, InputOutputArray mean, + OutputArray eigenvectors, OutputArray eigenvalues, + double retainedVariance); + +/** wrap PCA::project */ +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +/** wrap PCA::backProject */ +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +/** wrap SVD::compute */ +CV_EXPORTS_W void SVDecomp( InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags = 0 ); + +/** wrap SVD::backSubst */ +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, OutputArray dst ); + +/** @brief Calculates the Mahalanobis distance between two vectors. + +The function cv::Mahalanobis calculates and returns the weighted distance between two vectors: +\f[d( \texttt{vec1} , \texttt{vec2} )= \sqrt{\sum_{i,j}{\texttt{icovar(i,j)}\cdot(\texttt{vec1}(I)-\texttt{vec2}(I))\cdot(\texttt{vec1(j)}-\texttt{vec2(j)})} }\f] +The covariance matrix may be calculated using the #calcCovarMatrix function and then inverted using +the invert function (preferably using the #DECOMP_SVD method, as the most accurate). +@param v1 first 1D input vector. +@param v2 second 1D input vector. +@param icovar inverse covariance matrix. +*/ +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); + +/** @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array. + +The function cv::dft performs one of the following: +- Forward the Fourier transform of a 1D vector of N elements: + \f[Y = F^{(N)} \cdot X,\f] + where \f$F^{(N)}_{jk}=\exp(-2\pi i j k/N)\f$ and \f$i=\sqrt{-1}\f$ +- Inverse the Fourier transform of a 1D vector of N elements: + \f[\begin{array}{l} X'= \left (F^{(N)} \right )^{-1} \cdot Y = \left (F^{(N)} \right )^* \cdot y \\ X = (1/N) \cdot X, \end{array}\f] + where \f$F^*=\left(\textrm{Re}(F^{(N)})-\textrm{Im}(F^{(N)})\right)^T\f$ +- Forward the 2D Fourier transform of a M x N matrix: + \f[Y = F^{(M)} \cdot X \cdot F^{(N)}\f] +- Inverse the 2D Fourier transform of a M x N matrix: + \f[\begin{array}{l} X'= \left (F^{(M)} \right )^* \cdot Y \cdot \left (F^{(N)} \right )^* \\ X = \frac{1}{M \cdot N} \cdot X' \end{array}\f] + +In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input +spectrum of the inverse Fourier transform can be represented in a packed format called *CCS* +(complex-conjugate-symmetrical). It was borrowed from IPL (Intel\* Image Processing Library). Here +is how 2D *CCS* spectrum looks: +\f[\begin{bmatrix} Re Y_{0,0} & Re Y_{0,1} & Im Y_{0,1} & Re Y_{0,2} & Im Y_{0,2} & \cdots & Re Y_{0,N/2-1} & Im Y_{0,N/2-1} & Re Y_{0,N/2} \\ Re Y_{1,0} & Re Y_{1,1} & Im Y_{1,1} & Re Y_{1,2} & Im Y_{1,2} & \cdots & Re Y_{1,N/2-1} & Im Y_{1,N/2-1} & Re Y_{1,N/2} \\ Im Y_{1,0} & Re Y_{2,1} & Im Y_{2,1} & Re Y_{2,2} & Im Y_{2,2} & \cdots & Re Y_{2,N/2-1} & Im Y_{2,N/2-1} & Im Y_{1,N/2} \\ \hdotsfor{9} \\ Re Y_{M/2-1,0} & Re Y_{M-3,1} & Im Y_{M-3,1} & \hdotsfor{3} & Re Y_{M-3,N/2-1} & Im Y_{M-3,N/2-1}& Re Y_{M/2-1,N/2} \\ Im Y_{M/2-1,0} & Re Y_{M-2,1} & Im Y_{M-2,1} & \hdotsfor{3} & Re Y_{M-2,N/2-1} & Im Y_{M-2,N/2-1}& Im Y_{M/2-1,N/2} \\ Re Y_{M/2,0} & Re Y_{M-1,1} & Im Y_{M-1,1} & \hdotsfor{3} & Re Y_{M-1,N/2-1} & Im Y_{M-1,N/2-1}& Re Y_{M/2,N/2} \end{bmatrix}\f] + +In case of 1D transform of a real vector, the output looks like the first row of the matrix above. + +So, the function chooses an operation mode depending on the flags and size of the input array: +- If #DFT_ROWS is set or the input array has a single row or single column, the function + performs a 1D forward or inverse transform of each row of a matrix when #DFT_ROWS is set. + Otherwise, it performs a 2D transform. +- If the input array is real and #DFT_INVERSE is not set, the function performs a forward 1D or + 2D transform: + - When #DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as + input. + - When #DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as + input. In case of 2D transform, it uses the packed format as shown above. In case of a + single 1D transform, it looks like the first row of the matrix above. In case of + multiple 1D transforms (when using the #DFT_ROWS flag), each row of the output matrix + looks like the first row of the matrix above. +- If the input array is complex and either #DFT_INVERSE or #DFT_REAL_OUTPUT are not set, the + output is a complex array of the same size as input. The function performs a forward or + inverse 1D or 2D transform of the whole input array or each row of the input array + independently, depending on the flags DFT_INVERSE and DFT_ROWS. +- When #DFT_INVERSE is set and the input array is real, or it is complex but #DFT_REAL_OUTPUT + is set, the output is a real array of the same size as input. The function performs a 1D or 2D + inverse transformation of the whole input array or each individual row, depending on the flags + #DFT_INVERSE and #DFT_ROWS. + +If #DFT_SCALE is set, the scaling is done after the transformation. + +Unlike dct , the function supports arrays of arbitrary size. But only those arrays are processed +efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the +current implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize +method. + +The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays: +@code + void convolveDFT(InputArray A, InputArray B, OutputArray C) + { + // reallocate the output array if needed + C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type()); + Size dftSize; + // calculate the size of DFT transform + dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1); + dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1); + + // allocate temporary buffers and initialize them with 0's + Mat tempA(dftSize, A.type(), Scalar::all(0)); + Mat tempB(dftSize, B.type(), Scalar::all(0)); + + // copy A and B to the top-left corners of tempA and tempB, respectively + Mat roiA(tempA, Rect(0,0,A.cols,A.rows)); + A.copyTo(roiA); + Mat roiB(tempB, Rect(0,0,B.cols,B.rows)); + B.copyTo(roiB); + + // now transform the padded A & B in-place; + // use "nonzeroRows" hint for faster processing + dft(tempA, tempA, 0, A.rows); + dft(tempB, tempB, 0, B.rows); + + // multiply the spectrums; + // the function handles packed spectrum representations well + mulSpectrums(tempA, tempB, tempA); + + // transform the product back from the frequency domain. + // Even though all the result rows will be non-zero, + // you need only the first C.rows of them, and thus you + // pass nonzeroRows == C.rows + dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows); + + // now copy the result back to C. + tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C); + + // all the temporary buffers will be deallocated automatically + } +@endcode +To optimize this sample, consider the following approaches: +- Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to + the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole + tempA and tempB. It is only necessary to clear the tempA.cols - A.cols ( tempB.cols - B.cols) + rightmost columns of the matrices. +- This DFT-based convolution does not have to be applied to the whole big arrays, especially if B + is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts. + To do this, you need to split the output array C into multiple tiles. For each tile, estimate + which parts of A and B are required to calculate convolution in this tile. If the tiles in C are + too small, the speed will decrease a lot because of repeated work. In the ultimate case, when + each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution + algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and + there is also a slowdown because of bad cache locality. So, there is an optimal tile size + somewhere in the middle. +- If different tiles in C can be calculated in parallel and, thus, the convolution is done by + parts, the loop can be threaded. + +All of the above improvements have been implemented in #matchTemplate and #filter2D . Therefore, by +using them, you can get the performance even better than with the above theoretically optimal +implementation. Though, those two functions actually calculate cross-correlation, not convolution, +so you need to "flip" the second convolution operand B vertically and horizontally using flip . +@note +- An example using the discrete fourier transform can be found at + opencv_source_code/samples/cpp/dft.cpp +- (Python) An example using the dft functionality to perform Wiener deconvolution can be found + at opencv_source/samples/python/deconvolution.py +- (Python) An example rearranging the quadrants of a Fourier image can be found at + opencv_source/samples/python/dft.py +@param src input array that could be real or complex. +@param dst output array whose size and type depends on the flags . +@param flags transformation flags, representing a combination of the #DftFlags +@param nonzeroRows when the parameter is not zero, the function assumes that only the first +nonzeroRows rows of the input array (#DFT_INVERSE is not set) or only the first nonzeroRows of the +output array (#DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the +rows more efficiently and save some time; this technique is very useful for calculating array +cross-correlation or convolution using DFT. +@sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar , +magnitude , phase +*/ +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0); + +/** @brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array. + +idft(src, dst, flags) is equivalent to dft(src, dst, flags | #DFT_INVERSE) . +@note None of dft and idft scales the result by default. So, you should pass #DFT_SCALE to one of +dft or idft explicitly to make these transforms mutually inverse. +@sa dft, dct, idct, mulSpectrums, getOptimalDFTSize +@param src input floating-point real or complex array. +@param dst output array whose size and type depend on the flags. +@param flags operation flags (see dft and #DftFlags). +@param nonzeroRows number of dst rows to process; the rest of the rows have undefined content (see +the convolution sample in dft description. +*/ +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags = 0, int nonzeroRows = 0); + +/** @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array. + +The function cv::dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D +floating-point array: +- Forward Cosine transform of a 1D vector of N elements: + \f[Y = C^{(N)} \cdot X\f] + where + \f[C^{(N)}_{jk}= \sqrt{\alpha_j/N} \cos \left ( \frac{\pi(2k+1)j}{2N} \right )\f] + and + \f$\alpha_0=1\f$, \f$\alpha_j=2\f$ for *j \> 0*. +- Inverse Cosine transform of a 1D vector of N elements: + \f[X = \left (C^{(N)} \right )^{-1} \cdot Y = \left (C^{(N)} \right )^T \cdot Y\f] + (since \f$C^{(N)}\f$ is an orthogonal matrix, \f$C^{(N)} \cdot \left(C^{(N)}\right)^T = I\f$ ) +- Forward 2D Cosine transform of M x N matrix: + \f[Y = C^{(N)} \cdot X \cdot \left (C^{(N)} \right )^T\f] +- Inverse 2D Cosine transform of M x N matrix: + \f[X = \left (C^{(N)} \right )^T \cdot X \cdot C^{(N)}\f] + +The function chooses the mode of operation by looking at the flags and size of the input array: +- If (flags & #DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it + is an inverse 1D or 2D transform. +- If (flags & #DCT_ROWS) != 0 , the function performs a 1D transform of each row. +- If the array is a single column or a single row, the function performs a 1D transform. +- If none of the above is true, the function performs a 2D transform. + +@note Currently dct supports even-size arrays (2, 4, 6 ...). For data analysis and approximation, you +can pad the array when necessary. +Also, the function performance depends very much, and not monotonically, on the array size (see +getOptimalDFTSize ). In the current implementation DCT of a vector of size N is calculated via DFT +of a vector of size N/2 . Thus, the optimal DCT size N1 \>= N can be calculated as: +@code + size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); } + N1 = getOptimalDCTSize(N); +@endcode +@param src input floating-point array. +@param dst output array of the same size and type as src . +@param flags transformation flags as a combination of cv::DftFlags (DCT_*) +@sa dft , getOptimalDFTSize , idct +*/ +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags = 0); + +/** @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array. + +idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE). +@param src input floating-point single-channel array. +@param dst output array of the same size and type as src. +@param flags operation flags. +@sa dct, dft, idft, getOptimalDFTSize +*/ +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags = 0); + +/** @brief Performs the per-element multiplication of two Fourier spectrums. + +The function cv::mulSpectrums performs the per-element multiplication of the two CCS-packed or complex +matrices that are results of a real or complex Fourier transform. + +The function, together with dft and idft , may be used to calculate convolution (pass conjB=false ) +or correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are +simply multiplied (per element) with an optional conjugation of the second-array elements. When the +arrays are real, they are assumed to be CCS-packed (see dft for details). +@param a first input array. +@param b second input array of the same size and type as src1 . +@param c output array of the same size and type as src1 . +@param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that +each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value. +@param conjB optional flag that conjugates the second input array before the multiplication (true) +or not (false). +*/ +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB = false); + +/** @brief Returns the optimal DFT size for a given vector size. + +DFT performance is not a monotonic function of a vector size. Therefore, when you calculate +convolution of two arrays or perform the spectral analysis of an array, it usually makes sense to +pad the input data with zeros to get a bit larger array that can be transformed much faster than the +original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the fastest to process. +Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\*5\*3\*2\*2) +are also processed quite efficiently. + +The function cv::getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize +so that the DFT of a vector of size N can be processed efficiently. In the current implementation N += 2 ^p^ \* 3 ^q^ \* 5 ^r^ for some integer p, q, r. + +The function returns a negative number if vecsize is too large (very close to INT_MAX ). + +While the function cannot be used directly to estimate the optimal vector size for DCT transform +(since the current DCT implementation supports only even-size vectors), it can be easily processed +as getOptimalDFTSize((vecsize+1)/2)\*2. +@param vecsize vector size. +@sa dft , dct , idft , idct , mulSpectrums +*/ +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/** @brief Returns the default random number generator. + +The function cv::theRNG returns the default random number generator. For each thread, there is a +separate random number generator, so you can use the function safely in multi-thread environments. +If you just need to get a single random number using this generator or initialize an array, you can +use randu or randn instead. But if you are going to generate many random numbers inside a loop, it +is much faster to use this function to retrieve the generator and then use RNG::operator _Tp() . +@sa RNG, randu, randn +*/ +CV_EXPORTS RNG& theRNG(); + +/** @brief Sets state of default random number generator. + +The function cv::setRNGSeed sets state of default random number generator to custom value. +@param seed new state for default random number generator +@sa RNG, randu, randn +*/ +CV_EXPORTS_W void setRNGSeed(int seed); + +/** @brief Generates a single uniformly-distributed random number or an array of random numbers. + +Non-template variant of the function fills the matrix dst with uniformly-distributed +random numbers from the specified range: +\f[\texttt{low} _c \leq \texttt{dst} (I)_c < \texttt{high} _c\f] +@param dst output array of random numbers; the array must be pre-allocated. +@param low inclusive lower boundary of the generated random numbers. +@param high exclusive upper boundary of the generated random numbers. +@sa RNG, randn, theRNG +*/ +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +/** @brief Fills the array with normally distributed random numbers. + +The function cv::randn fills the matrix dst with normally distributed random numbers with the specified +mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the +value range of the output array data type. +@param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels. +@param mean mean value (expectation) of the generated random numbers. +@param stddev standard deviation of the generated random numbers; it can be either a vector (in +which case a diagonal standard deviation matrix is assumed) or a square matrix. +@sa RNG, randu +*/ +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +/** @brief Shuffles the array elements randomly. + +The function cv::randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and +swapping them. The number of such swap operations will be dst.rows\*dst.cols\*iterFactor . +@param dst input/output numerical 1D array. +@param iterFactor scale factor that determines the number of random swap operations (see the details +below). +@param rng optional random number generator used for shuffling; if it is zero, theRNG () is used +instead. +@sa RNG, sort +*/ +CV_EXPORTS_W void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* rng = 0); + +/** @brief Principal Component Analysis + +The class is used to calculate a special basis for a set of vectors. The +basis will consist of eigenvectors of the covariance matrix calculated +from the input set of vectors. The class %PCA can also transform +vectors to/from the new coordinate space defined by the basis. Usually, +in this new coordinate system, each vector from the original set (and +any linear combination of such vectors) can be quite accurately +approximated by taking its first few components, corresponding to the +eigenvectors of the largest eigenvalues of the covariance matrix. +Geometrically it means that you calculate a projection of the vector to +a subspace formed by a few eigenvectors corresponding to the dominant +eigenvalues of the covariance matrix. And usually such a projection is +very close to the original vector. So, you can represent the original +vector from a high-dimensional space with a much shorter vector +consisting of the projected vector's coordinates in the subspace. Such a +transformation is also known as Karhunen-Loeve Transform, or KLT. +See http://en.wikipedia.org/wiki/Principal_component_analysis + +The sample below is the function that takes two matrices. The first +function stores a set of vectors (a row per vector) that is used to +calculate PCA. The second function stores another "test" set of vectors +(a row per vector). First, these vectors are compressed with PCA, then +reconstructed back, and then the reconstruction error norm is computed +and printed for each vector. : + +@code{.cpp} +using namespace cv; + +PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) +{ + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + PCA::DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use PCA::DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; +} +@endcode +@sa calcCovarMatrix, mulTransposed, SVD, dft, dct +*/ +class CV_EXPORTS PCA +{ +public: + enum Flags { DATA_AS_ROW = 0, //!< indicates that the input samples are stored as matrix rows + DATA_AS_COL = 1, //!< indicates that the input samples are stored as matrix columns + USE_AVG = 2 //! + }; + + /** @brief default constructor + + The default constructor initializes an empty %PCA structure. The other + constructors initialize the structure and call PCA::operator()(). + */ + PCA(); + + /** @overload + @param data input samples stored as matrix rows or matrix columns. + @param mean optional mean value; if the matrix is empty (@c noArray()), + the mean is computed from the data. + @param flags operation flags; currently the parameter is only used to + specify the data layout (PCA::Flags) + @param maxComponents maximum number of components that %PCA should + retain; by default, all the components are retained. + */ + PCA(InputArray data, InputArray mean, int flags, int maxComponents = 0); + + /** @overload + @param data input samples stored as matrix rows or matrix columns. + @param mean optional mean value; if the matrix is empty (noArray()), + the mean is computed from the data. + @param flags operation flags; currently the parameter is only used to + specify the data layout (PCA::Flags) + @param retainedVariance Percentage of variance that PCA should retain. + Using this parameter will let the PCA decided how many components to + retain but it will always keep at least 2. + */ + PCA(InputArray data, InputArray mean, int flags, double retainedVariance); + + /** @brief performs %PCA + + The operator performs %PCA of the supplied dataset. It is safe to reuse + the same PCA structure for multiple datasets. That is, if the structure + has been previously used with another dataset, the existing internal + data is reclaimed and the new @ref eigenvalues, @ref eigenvectors and @ref + mean are allocated and computed. + + The computed @ref eigenvalues are sorted from the largest to the smallest and + the corresponding @ref eigenvectors are stored as eigenvectors rows. + + @param data input samples stored as the matrix rows or as the matrix + columns. + @param mean optional mean value; if the matrix is empty (noArray()), + the mean is computed from the data. + @param flags operation flags; currently the parameter is only used to + specify the data layout. (Flags) + @param maxComponents maximum number of components that PCA should + retain; by default, all the components are retained. + */ + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents = 0); + + /** @overload + @param data input samples stored as the matrix rows or as the matrix + columns. + @param mean optional mean value; if the matrix is empty (noArray()), + the mean is computed from the data. + @param flags operation flags; currently the parameter is only used to + specify the data layout. (PCA::Flags) + @param retainedVariance Percentage of variance that %PCA should retain. + Using this parameter will let the %PCA decided how many components to + retain but it will always keep at least 2. + */ + PCA& operator()(InputArray data, InputArray mean, int flags, double retainedVariance); + + /** @brief Projects vector(s) to the principal component subspace. + + The methods project one or more vectors to the principal component + subspace, where each vector projection is represented by coefficients in + the principal component basis. The first form of the method returns the + matrix that the second form writes to the result. So the first form can + be used as a part of expression while the second form can be more + efficient in a processing loop. + @param vec input vector(s); must have the same dimensionality and the + same layout as the input data used at %PCA phase, that is, if + DATA_AS_ROW are specified, then `vec.cols==data.cols` + (vector dimensionality) and `vec.rows` is the number of vectors to + project, and the same is true for the PCA::DATA_AS_COL case. + */ + Mat project(InputArray vec) const; + + /** @overload + @param vec input vector(s); must have the same dimensionality and the + same layout as the input data used at PCA phase, that is, if + DATA_AS_ROW are specified, then `vec.cols==data.cols` + (vector dimensionality) and `vec.rows` is the number of vectors to + project, and the same is true for the PCA::DATA_AS_COL case. + @param result output vectors; in case of PCA::DATA_AS_COL, the + output matrix has as many columns as the number of input vectors, this + means that `result.cols==vec.cols` and the number of rows match the + number of principal components (for example, `maxComponents` parameter + passed to the constructor). + */ + void project(InputArray vec, OutputArray result) const; + + /** @brief Reconstructs vectors from their PC projections. + + The methods are inverse operations to PCA::project. They take PC + coordinates of projected vectors and reconstruct the original vectors. + Unless all the principal components have been retained, the + reconstructed vectors are different from the originals. But typically, + the difference is small if the number of components is large enough (but + still much smaller than the original vector dimensionality). As a + result, PCA is used. + @param vec coordinates of the vectors in the principal component + subspace, the layout and size are the same as of PCA::project output + vectors. + */ + Mat backProject(InputArray vec) const; + + /** @overload + @param vec coordinates of the vectors in the principal component + subspace, the layout and size are the same as of PCA::project output + vectors. + @param result reconstructed vectors; the layout and size are the same as + of PCA::project input vectors. + */ + void backProject(InputArray vec, OutputArray result) const; + + /** @brief write PCA objects + + Writes @ref eigenvalues @ref eigenvectors and @ref mean to specified FileStorage + */ + void write(FileStorage& fs) const; + + /** @brief load PCA objects + + Loads @ref eigenvalues @ref eigenvectors and @ref mean from specified FileNode + */ + void read(const FileNode& fn); + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +/** @example samples/cpp/pca.cpp +An example using %PCA for dimensionality reduction while maintaining an amount of variance +*/ + +/** @example samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp +Check @ref tutorial_introduction_to_pca "the corresponding tutorial" for more details +*/ + +/** +@brief Linear Discriminant Analysis +@todo document this class +*/ +class CV_EXPORTS LDA +{ +public: + /** @brief constructor + Initializes a LDA with num_components (default 0). + */ + explicit LDA(int num_components = 0); + + /** Initializes and performs a Discriminant Analysis with Fisher's + Optimization Criterion on given data in src and corresponding labels + in labels. If 0 (or less) number of components are given, they are + automatically determined for given data in computation. + */ + LDA(InputArrayOfArrays src, InputArray labels, int num_components = 0); + + /** Serializes this object to a given filename. + */ + void save(const String& filename) const; + + /** Deserializes this object from a given filename. + */ + void load(const String& filename); + + /** Serializes this object to a given cv::FileStorage. + */ + void save(FileStorage& fs) const; + + /** Deserializes this object from a given cv::FileStorage. + */ + void load(const FileStorage& node); + + /** destructor + */ + ~LDA(); + + /** Compute the discriminants for data in src (row aligned) and labels. + */ + void compute(InputArrayOfArrays src, InputArray labels); + + /** Projects samples into the LDA subspace. + src may be one or more row aligned samples. + */ + Mat project(InputArray src); + + /** Reconstructs projections from the LDA subspace. + src may be one or more row aligned projections. + */ + Mat reconstruct(InputArray src); + + /** Returns the eigenvectors of this LDA. + */ + Mat eigenvectors() const { return _eigenvectors; } + + /** Returns the eigenvalues of this LDA. + */ + Mat eigenvalues() const { return _eigenvalues; } + + static Mat subspaceProject(InputArray W, InputArray mean, InputArray src); + static Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src); + +protected: + bool _dataAsRow; // unused, but needed for 3.0 ABI compatibility. + int _num_components; + Mat _eigenvectors; + Mat _eigenvalues; + void lda(InputArrayOfArrays src, InputArray labels); +}; + +/** @brief Singular Value Decomposition + +Class for computing Singular Value Decomposition of a floating-point +matrix. The Singular Value Decomposition is used to solve least-square +problems, under-determined linear systems, invert matrices, compute +condition numbers, and so on. + +If you want to compute a condition number of a matrix or an absolute value of +its determinant, you do not need `u` and `vt`. You can pass +flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that full-size u +and vt must be computed, which is not necessary most of the time. + +@sa invert, solve, eigen, determinant +*/ +class CV_EXPORTS SVD +{ +public: + enum Flags { + /** allow the algorithm to modify the decomposed matrix; it can save space and speed up + processing. currently ignored. */ + MODIFY_A = 1, + /** indicates that only a vector of singular values `w` is to be processed, while u and vt + will be set to empty matrices */ + NO_UV = 2, + /** when the matrix is not square, by default the algorithm produces u and vt matrices of + sufficiently large size for the further A reconstruction; if, however, FULL_UV flag is + specified, u and vt will be full-size square orthogonal matrices.*/ + FULL_UV = 4 + }; + + /** @brief the default constructor + + initializes an empty SVD structure + */ + SVD(); + + /** @overload + initializes an empty SVD structure and then calls SVD::operator() + @param src decomposed matrix. The depth has to be CV_32F or CV_64F. + @param flags operation flags (SVD::Flags) + */ + SVD( InputArray src, int flags = 0 ); + + /** @brief the operator that performs SVD. The previously allocated u, w and vt are released. + + The operator performs the singular value decomposition of the supplied + matrix. The u,`vt` , and the vector of singular values w are stored in + the structure. The same SVD structure can be reused many times with + different matrices. Each time, if needed, the previous u,`vt` , and w + are reclaimed and the new matrices are created, which is all handled by + Mat::create. + @param src decomposed matrix. The depth has to be CV_32F or CV_64F. + @param flags operation flags (SVD::Flags) + */ + SVD& operator ()( InputArray src, int flags = 0 ); + + /** @brief decomposes matrix and stores the results to user-provided matrices + + The methods/functions perform SVD of matrix. Unlike SVD::SVD constructor + and SVD::operator(), they store the results to the user-provided + matrices: + + @code{.cpp} + Mat A, w, u, vt; + SVD::compute(A, w, u, vt); + @endcode + + @param src decomposed matrix. The depth has to be CV_32F or CV_64F. + @param w calculated singular values + @param u calculated left singular vectors + @param vt transposed matrix of right singular vectors + @param flags operation flags - see SVD::Flags. + */ + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags = 0 ); + + /** @overload + computes singular values of a matrix + @param src decomposed matrix. The depth has to be CV_32F or CV_64F. + @param w calculated singular values + @param flags operation flags - see SVD::Flags. + */ + static void compute( InputArray src, OutputArray w, int flags = 0 ); + + /** @brief performs back substitution + */ + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + /** @brief solves an under-determined singular linear system + + The method finds a unit-length solution x of a singular linear system + A\*x = 0. Depending on the rank of A, there can be no solutions, a + single solution or an infinite number of solutions. In general, the + algorithm solves the following problem: + \f[dst = \arg \min _{x: \| x \| =1} \| src \cdot x \|\f] + @param src left-hand-side matrix. + @param dst found solution. + */ + static void solveZ( InputArray src, OutputArray dst ); + + /** @brief performs a singular value back substitution. + + The method calculates a back substitution for the specified right-hand + side: + + \f[\texttt{x} = \texttt{vt} ^T \cdot diag( \texttt{w} )^{-1} \cdot \texttt{u} ^T \cdot \texttt{rhs} \sim \texttt{A} ^{-1} \cdot \texttt{rhs}\f] + + Using this technique you can either get a very accurate solution of the + convenient linear system, or the best (in the least-squares terms) + pseudo-solution of an overdetermined linear system. + + @param rhs right-hand side of a linear system (u\*w\*v')\*dst = rhs to + be solved, where A has been previously decomposed. + + @param dst found solution of the system. + + @note Explicit SVD with the further back substitution only makes sense + if you need to solve many linear systems with the same left-hand side + (for example, src ). If all you need is to solve a single system + (possibly with multiple rhs immediately available), simply call solve + add pass #DECOMP_SVD there. It does absolutely the same thing. + */ + void backSubst( InputArray rhs, OutputArray dst ) const; + + /** @todo document */ + template static + void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + + /** @todo document */ + template static + void compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ); + + /** @todo document */ + template static + void backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + Mat u, w, vt; +}; + +/** @brief Random Number Generator + +Random number generator. It encapsulates the state (currently, a 64-bit +integer) and has methods to return scalar random values and to fill +arrays with random values. Currently it supports uniform and Gaussian +(normal) distributions. The generator uses Multiply-With-Carry +algorithm, introduced by G. Marsaglia ( + ). +Gaussian-distribution random numbers are generated using the Ziggurat +algorithm ( ), +introduced by G. Marsaglia and W. W. Tsang. +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM = 0, + NORMAL = 1 + }; + + /** @brief constructor + + These are the RNG constructors. The first form sets the state to some + pre-defined value, equal to 2\*\*32-1 in the current implementation. The + second form sets the state to the specified value. If you passed state=0 + , the constructor uses the above default value instead to avoid the + singular random number sequence, consisting of all zeros. + */ + RNG(); + /** @overload + @param state 64-bit value used to initialize the RNG. + */ + RNG(uint64 state); + /**The method updates the state using the MWC algorithm and returns the + next 32-bit random number.*/ + unsigned next(); + + /**Each of the methods updates the state using the MWC algorithm and + returns the next random number of the specified type. In case of integer + types, the returned number is from the available value range for the + specified type. In case of floating-point types, the returned value is + from [0,1) range. + */ + operator uchar(); + /** @overload */ + operator schar(); + /** @overload */ + operator ushort(); + /** @overload */ + operator short(); + /** @overload */ + operator unsigned(); + /** @overload */ + operator int(); + /** @overload */ + operator float(); + /** @overload */ + operator double(); + + /** @brief returns a random integer sampled uniformly from [0, N). + + The methods transform the state using the MWC algorithm and return the + next random number. The first form is equivalent to RNG::next . The + second form returns the random number modulo N , which means that the + result is in the range [0, N) . + */ + unsigned operator ()(); + /** @overload + @param N upper non-inclusive boundary of the returned random number. + */ + unsigned operator ()(unsigned N); + + /** @brief returns uniformly distributed integer random number from [a,b) range + + The methods transform the state using the MWC algorithm and return the + next uniformly-distributed random number of the specified type, deduced + from the input parameter type, from the range [a, b) . There is a nuance + illustrated by the following sample: + + @code{.cpp} + RNG rng; + + // always produces 0 + double a = rng.uniform(0, 1); + + // produces double from [0, 1) + double a1 = rng.uniform((double)0, (double)1); + + // produces float from [0, 1) + float b = rng.uniform(0.f, 1.f); + + // produces double from [0, 1) + double c = rng.uniform(0., 1.); + + // may cause compiler error because of ambiguity: + // RNG::uniform(0, (int)0.999999)? or RNG::uniform((double)0, 0.99999)? + double d = rng.uniform(0, 0.999999); + @endcode + + The compiler does not take into account the type of the variable to + which you assign the result of RNG::uniform . The only thing that + matters to the compiler is the type of a and b parameters. So, if you + want a floating-point random number, but the range boundaries are + integer numbers, either put dots in the end, if they are constants, or + use explicit type cast operators, as in the a1 initialization above. + @param a lower inclusive boundary of the returned random number. + @param b upper non-inclusive boundary of the returned random number. + */ + int uniform(int a, int b); + /** @overload */ + float uniform(float a, float b); + /** @overload */ + double uniform(double a, double b); + + /** @brief Fills arrays with random numbers. + + @param mat 2D or N-dimensional matrix; currently matrices with more than + 4 channels are not supported by the methods, use Mat::reshape as a + possible workaround. + @param distType distribution type, RNG::UNIFORM or RNG::NORMAL. + @param a first distribution parameter; in case of the uniform + distribution, this is an inclusive lower boundary, in case of the normal + distribution, this is a mean value. + @param b second distribution parameter; in case of the uniform + distribution, this is a non-inclusive upper boundary, in case of the + normal distribution, this is a standard deviation (diagonal of the + standard deviation matrix or the full standard deviation matrix). + @param saturateRange pre-saturation flag; for uniform distribution only; + if true, the method will first convert a and b to the acceptable value + range (according to the mat datatype) and then will generate uniformly + distributed random numbers within the range [saturate(a), saturate(b)), + if saturateRange=false, the method will generate uniformly distributed + random numbers in the original range [a, b) and then will saturate them, + it means, for example, that + theRNG().fill(mat_8u, RNG::UNIFORM, -DBL_MAX, DBL_MAX) will likely + produce array mostly filled with 0's and 255's, since the range (0, 255) + is significantly smaller than [-DBL_MAX, DBL_MAX). + + Each of the methods fills the matrix with the random values from the + specified distribution. As the new numbers are generated, the RNG state + is updated accordingly. In case of multiple-channel images, every + channel is filled independently, which means that RNG cannot generate + samples from the multi-dimensional Gaussian distribution with + non-diagonal covariance matrix directly. To do that, the method + generates samples from multi-dimensional standard Gaussian distribution + with zero mean and identity covariation matrix, and then transforms them + using transform to get samples from the specified Gaussian distribution. + */ + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange = false ); + + /** @brief Returns the next random number sampled from the Gaussian distribution + @param sigma standard deviation of the distribution. + + The method transforms the state using the MWC algorithm and returns the + next random number from the Gaussian distribution N(0,sigma) . That is, + the mean value of the returned random numbers is zero and the standard + deviation is the specified sigma . + */ + double gaussian(double sigma); + + uint64 state; + + bool operator ==(const RNG& other) const; +}; + +/** @brief Mersenne Twister random number generator + +Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c +@todo document +*/ +class CV_EXPORTS RNG_MT19937 +{ +public: + RNG_MT19937(); + RNG_MT19937(unsigned s); + void seed(unsigned s); + + unsigned next(); + + operator int(); + operator unsigned(); + operator float(); + operator double(); + + unsigned operator ()(unsigned N); + unsigned operator ()(); + + /** @brief returns uniformly distributed integer random number from [a,b) range*/ + int uniform(int a, int b); + /** @brief returns uniformly distributed floating-point random number from [a,b) range*/ + float uniform(float a, float b); + /** @brief returns uniformly distributed double-precision floating-point random number from [a,b) range*/ + double uniform(double a, double b); + +private: + enum PeriodParameters {N = 624, M = 397}; + unsigned state[N]; + int mti; +}; + +//! @} core_array + +//! @addtogroup core_cluster +//! @{ + +/** @example samples/cpp/kmeans.cpp +An example on K-means clustering +*/ + +/** @brief Finds centers of clusters and groups input samples around the clusters. + +The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters +and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$ contains a +0-based cluster index for the sample stored in the \f$i^{th}\f$ row of the samples matrix. + +@note +- (Python) An example on K-means clustering can be found at + opencv_source_code/samples/python/kmeans.py +@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed. +Examples of this array can be: +- Mat points(count, 2, CV_32F); +- Mat points(count, 1, CV_32FC2); +- Mat points(1, count, CV_32FC2); +- std::vector\ points(sampleCount); +@param K Number of clusters to split the set by. +@param bestLabels Input/output integer array that stores the cluster indices for every sample. +@param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or +the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster +centers moves by less than criteria.epsilon on some iteration, the algorithm stops. +@param attempts Flag to specify the number of times the algorithm is executed using different +initial labellings. The algorithm returns the labels that yield the best compactness (see the last +function parameter). +@param flags Flag that can take values of cv::KmeansFlags +@param centers Output matrix of the cluster centers, one row per each cluster center. +@return The function returns the compactness measure that is computed as +\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f] +after every attempt. The best (minimum) value is chosen and the corresponding labels and the +compactness value are returned by the function. Basically, you can use only the core of the +function, set the number of attempts to 1, initialize labels each time using a custom algorithm, +pass them with the ( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best +(most-compact) clustering. +*/ +CV_EXPORTS_W double kmeans( InputArray data, int K, InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers = noArray() ); + +//! @} core_cluster + +//! @addtogroup core_basic +//! @{ + +/////////////////////////////// Formatted output of cv::Mat /////////////////////////// + +/** @todo document */ +class CV_EXPORTS Formatted +{ +public: + virtual const char* next() = 0; + virtual void reset() = 0; + virtual ~Formatted(); +}; + +/** @todo document */ +class CV_EXPORTS Formatter +{ +public: + enum { FMT_DEFAULT = 0, + FMT_MATLAB = 1, + FMT_CSV = 2, + FMT_PYTHON = 3, + FMT_NUMPY = 4, + FMT_C = 5 + }; + + virtual ~Formatter(); + + virtual Ptr format(const Mat& mtx) const = 0; + + virtual void set32fPrecision(int p = 8) = 0; + virtual void set64fPrecision(int p = 16) = 0; + virtual void setMultiline(bool ml = true) = 0; + + static Ptr get(int fmt = FMT_DEFAULT); + +}; + +static inline +String& operator << (String& out, Ptr fmtd) +{ + fmtd->reset(); + for(const char* str = fmtd->next(); str; str = fmtd->next()) + out += cv::String(str); + return out; +} + +static inline +String& operator << (String& out, const Mat& mtx) +{ + return out << Formatter::get()->format(mtx); +} + +//////////////////////////////////////// Algorithm //////////////////////////////////// + +class CV_EXPORTS Algorithm; + +template struct ParamType {}; + + +/** @brief This is a base class for all more or less complex algorithms in OpenCV + +especially for classes of algorithms, for which there can be multiple implementations. The examples +are stereo correspondence (for which there are algorithms like block matching, semi-global block +matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians +models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck +etc.). + +Here is example of SimpleBlobDetector use in your application via Algorithm interface: +@snippet snippets/core_various.cpp Algorithm +*/ +class CV_EXPORTS_W Algorithm +{ +public: + Algorithm(); + virtual ~Algorithm(); + + /** @brief Clears the algorithm state + */ + CV_WRAP virtual void clear() {} + + /** @brief Stores algorithm parameters in a file storage + */ + virtual void write(FileStorage& fs) const { CV_UNUSED(fs); } + + /** @brief simplified API for language bindings + * @overload + */ + CV_WRAP void write(const Ptr& fs, const String& name = String()) const; + + /** @brief Reads algorithm parameters from a file storage + */ + CV_WRAP virtual void read(const FileNode& fn) { CV_UNUSED(fn); } + + /** @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read + */ + CV_WRAP virtual bool empty() const { return false; } + + /** @brief Reads algorithm from the file node + + This is static template method of Algorithm. It's usage is following (in the case of SVM): + @code + cv::FileStorage fsRead("example.xml", FileStorage::READ); + Ptr svm = Algorithm::read(fsRead.root()); + @endcode + In order to make this method work, the derived class must overwrite Algorithm::read(const + FileNode& fn) and also have static create() method without parameters + (or with all the optional parameters) + */ + template static Ptr<_Tp> read(const FileNode& fn) + { + Ptr<_Tp> obj = _Tp::create(); + obj->read(fn); + return !obj->empty() ? obj : Ptr<_Tp>(); + } + + /** @brief Loads algorithm from the file + + @param filename Name of the file to read. + @param objname The optional name of the node to read (if empty, the first top-level node will be used) + + This is static template method of Algorithm. It's usage is following (in the case of SVM): + @code + Ptr svm = Algorithm::load("my_svm_model.xml"); + @endcode + In order to make this method work, the derived class must overwrite Algorithm::read(const + FileNode& fn). + */ + template static Ptr<_Tp> load(const String& filename, const String& objname=String()) + { + FileStorage fs(filename, FileStorage::READ); + CV_Assert(fs.isOpened()); + FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]; + if (fn.empty()) return Ptr<_Tp>(); + Ptr<_Tp> obj = _Tp::create(); + obj->read(fn); + return !obj->empty() ? obj : Ptr<_Tp>(); + } + + /** @brief Loads algorithm from a String + + @param strModel The string variable containing the model you want to load. + @param objname The optional name of the node to read (if empty, the first top-level node will be used) + + This is static template method of Algorithm. It's usage is following (in the case of SVM): + @code + Ptr svm = Algorithm::loadFromString(myStringModel); + @endcode + */ + template static Ptr<_Tp> loadFromString(const String& strModel, const String& objname=String()) + { + FileStorage fs(strModel, FileStorage::READ + FileStorage::MEMORY); + FileNode fn = objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]; + Ptr<_Tp> obj = _Tp::create(); + obj->read(fn); + return !obj->empty() ? obj : Ptr<_Tp>(); + } + + /** Saves the algorithm to a file. + In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */ + CV_WRAP virtual void save(const String& filename) const; + + /** Returns the algorithm string identifier. + This string is used as top level xml/yml node tag when the object is saved to a file or string. */ + CV_WRAP virtual String getDefaultName() const; + +protected: + void writeFormat(FileStorage& fs) const; +}; + +struct Param { + enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, + UNSIGNED_INT=8, UINT64=9, UCHAR=11, SCALAR=12 }; +}; + + + +template<> struct ParamType +{ + typedef bool const_param_type; + typedef bool member_type; + + enum { type = Param::BOOLEAN }; +}; + +template<> struct ParamType +{ + typedef int const_param_type; + typedef int member_type; + + enum { type = Param::INT }; +}; + +template<> struct ParamType +{ + typedef double const_param_type; + typedef double member_type; + + enum { type = Param::REAL }; +}; + +template<> struct ParamType +{ + typedef const String& const_param_type; + typedef String member_type; + + enum { type = Param::STRING }; +}; + +template<> struct ParamType +{ + typedef const Mat& const_param_type; + typedef Mat member_type; + + enum { type = Param::MAT }; +}; + +template<> struct ParamType > +{ + typedef const std::vector& const_param_type; + typedef std::vector member_type; + + enum { type = Param::MAT_VECTOR }; +}; + +template<> struct ParamType +{ + typedef const Ptr& const_param_type; + typedef Ptr member_type; + + enum { type = Param::ALGORITHM }; +}; + +template<> struct ParamType +{ + typedef float const_param_type; + typedef float member_type; + + enum { type = Param::FLOAT }; +}; + +template<> struct ParamType +{ + typedef unsigned const_param_type; + typedef unsigned member_type; + + enum { type = Param::UNSIGNED_INT }; +}; + +template<> struct ParamType +{ + typedef uint64 const_param_type; + typedef uint64 member_type; + + enum { type = Param::UINT64 }; +}; + +template<> struct ParamType +{ + typedef uchar const_param_type; + typedef uchar member_type; + + enum { type = Param::UCHAR }; +}; + +template<> struct ParamType +{ + typedef const Scalar& const_param_type; + typedef Scalar member_type; + + enum { type = Param::SCALAR }; +}; + +//! @} core_basic + +} //namespace cv + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/cvstd.inl.hpp" +#include "opencv2/core/utility.hpp" +#include "opencv2/core/optim.hpp" +#include "opencv2/core/ovx.hpp" + +#endif /*OPENCV_CORE_HPP*/ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/affine.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/affine.hpp new file mode 100644 index 00000000..7e2ed307 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/affine.hpp @@ -0,0 +1,678 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_AFFINE3_HPP +#define OPENCV_CORE_AFFINE3_HPP + +#ifdef __cplusplus + +#include + +namespace cv +{ + +//! @addtogroup core +//! @{ + + /** @brief Affine transform + * + * It represents a 4x4 homogeneous transformation matrix \f$T\f$ + * + * \f[T = + * \begin{bmatrix} + * R & t\\ + * 0 & 1\\ + * \end{bmatrix} + * \f] + * + * where \f$R\f$ is a 3x3 rotation matrix and \f$t\f$ is a 3x1 translation vector. + * + * You can specify \f$R\f$ either by a 3x3 rotation matrix or by a 3x1 rotation vector, + * which is converted to a 3x3 rotation matrix by the Rodrigues formula. + * + * To construct a matrix \f$T\f$ representing first rotation around the axis \f$r\f$ with rotation + * angle \f$|r|\f$ in radian (right hand rule) and then translation by the vector \f$t\f$, you can use + * + * @code + * cv::Vec3f r, t; + * cv::Affine3f T(r, t); + * @endcode + * + * If you already have the rotation matrix \f$R\f$, then you can use + * + * @code + * cv::Matx33f R; + * cv::Affine3f T(R, t); + * @endcode + * + * To extract the rotation matrix \f$R\f$ from \f$T\f$, use + * + * @code + * cv::Matx33f R = T.rotation(); + * @endcode + * + * To extract the translation vector \f$t\f$ from \f$T\f$, use + * + * @code + * cv::Vec3f t = T.translation(); + * @endcode + * + * To extract the rotation vector \f$r\f$ from \f$T\f$, use + * + * @code + * cv::Vec3f r = T.rvec(); + * @endcode + * + * Note that since the mapping from rotation vectors to rotation matrices + * is many to one. The returned rotation vector is not necessarily the one + * you used before to set the matrix. + * + * If you have two transformations \f$T = T_1 * T_2\f$, use + * + * @code + * cv::Affine3f T, T1, T2; + * T = T2.concatenate(T1); + * @endcode + * + * To get the inverse transform of \f$T\f$, use + * + * @code + * cv::Affine3f T, T_inv; + * T_inv = T.inv(); + * @endcode + * + */ + template + class Affine3 + { + public: + typedef T float_type; + typedef Matx Mat3; + typedef Matx Mat4; + typedef Vec Vec3; + + //! Default constructor. It represents a 4x4 identity matrix. + Affine3(); + + //! Augmented affine matrix + Affine3(const Mat4& affine); + + /** + * The resulting 4x4 matrix is + * + * \f[ + * \begin{bmatrix} + * R & t\\ + * 0 & 1\\ + * \end{bmatrix} + * \f] + * + * @param R 3x3 rotation matrix. + * @param t 3x1 translation vector. + */ + Affine3(const Mat3& R, const Vec3& t = Vec3::all(0)); + + /** + * Rodrigues vector. + * + * The last row of the current matrix is set to [0,0,0,1]. + * + * @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length + * indicates the rotation angle in radian (using right hand rule). + * @param t 3x1 translation vector. + */ + Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0)); + + /** + * Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix. + * + * The last row of the current matrix is set to [0,0,0,1] when data is not 4x4. + * + * @param data 1-channel matrix. + * when it is 4x4, it is copied to the current matrix and t is not used. + * When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used. + * When it is 3x3, it is copied to the upper left 3x3 part of the current matrix. + * When it is 3x1 or 1x3, it is treated as a rotation vector and the Rodrigues formula is used + * to compute a 3x3 rotation matrix. + * @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4. + */ + explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0)); + + //! From 16-element array + explicit Affine3(const float_type* vals); + + //! Create an 4x4 identity transform + static Affine3 Identity(); + + /** + * Rotation matrix. + * + * Copy the rotation matrix to the upper left 3x3 part of the current matrix. + * The remaining elements of the current matrix are not changed. + * + * @param R 3x3 rotation matrix. + * + */ + void rotation(const Mat3& R); + + /** + * Rodrigues vector. + * + * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. + * + * @param rvec 3x1 rotation vector. The direction indicates the rotation axis and + * its length indicates the rotation angle in radian (using the right thumb convention). + */ + void rotation(const Vec3& rvec); + + /** + * Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix. + * + * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. + * + * @param data 1-channel matrix. + * When it is a 3x3 matrix, it sets the upper left 3x3 part of the current matrix. + * When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues formula + * is used to compute the rotation matrix and sets the upper left 3x3 part of the current matrix. + */ + void rotation(const Mat& data); + + /** + * Copy the 3x3 matrix L to the upper left part of the current matrix + * + * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. + * + * @param L 3x3 matrix. + */ + void linear(const Mat3& L); + + /** + * Copy t to the first three elements of the last column of the current matrix + * + * It sets the upper right 3x1 part of the matrix. The remaining part is unaffected. + * + * @param t 3x1 translation vector. + */ + void translation(const Vec3& t); + + //! @return the upper left 3x3 part + Mat3 rotation() const; + + //! @return the upper left 3x3 part + Mat3 linear() const; + + //! @return the upper right 3x1 part + Vec3 translation() const; + + //! Rodrigues vector. + //! @return a vector representing the upper left 3x3 rotation matrix of the current matrix. + //! @warning Since the mapping between rotation vectors and rotation matrices is many to one, + //! this function returns only one rotation vector that represents the current rotation matrix, + //! which is not necessarily the same one set by `rotation(const Vec3& rvec)`. + Vec3 rvec() const; + + //! @return the inverse of the current matrix. + Affine3 inv(int method = cv::DECOMP_SVD) const; + + //! a.rotate(R) is equivalent to Affine(R, 0) * a; + Affine3 rotate(const Mat3& R) const; + + //! a.rotate(rvec) is equivalent to Affine(rvec, 0) * a; + Affine3 rotate(const Vec3& rvec) const; + + //! a.translate(t) is equivalent to Affine(E, t) * a, where E is an identity matrix + Affine3 translate(const Vec3& t) const; + + //! a.concatenate(affine) is equivalent to affine * a; + Affine3 concatenate(const Affine3& affine) const; + + template operator Affine3() const; + + template Affine3 cast() const; + + Mat4 matrix; + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + Affine3(const Eigen::Transform& affine); + Affine3(const Eigen::Transform& affine); + operator Eigen::Transform() const; + operator Eigen::Transform() const; +#endif + }; + + template static + Affine3 operator*(const Affine3& affine1, const Affine3& affine2); + + //! V is a 3-element vector with member fields x, y and z + template static + V operator*(const Affine3& affine, const V& vector); + + typedef Affine3 Affine3f; + typedef Affine3 Affine3d; + + static Vec3f operator*(const Affine3f& affine, const Vec3f& vector); + static Vec3d operator*(const Affine3d& affine, const Vec3d& vector); + + template class DataType< Affine3<_Tp> > + { + public: + typedef Affine3<_Tp> value_type; + typedef Affine3::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 16, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; + }; + + namespace traits { + template + struct Depth< Affine3<_Tp> > { enum { value = Depth<_Tp>::value }; }; + template + struct Type< Affine3<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 16) }; }; + } // namespace + +//! @} core + +} + +//! @cond IGNORED + +/////////////////////////////////////////////////////////////////////////////////// +// Implementation + +template inline +cv::Affine3::Affine3() + : matrix(Mat4::eye()) +{} + +template inline +cv::Affine3::Affine3(const Mat4& affine) + : matrix(affine) +{} + +template inline +cv::Affine3::Affine3(const Mat3& R, const Vec3& t) +{ + rotation(R); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const Vec3& _rvec, const Vec3& t) +{ + rotation(_rvec); + translation(t); + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const cv::Mat& data, const Vec3& t) +{ + CV_Assert(data.type() == cv::traits::Type::value); + CV_Assert(data.channels() == 1); + + if (data.cols == 4 && data.rows == 4) + { + data.copyTo(matrix); + return; + } + else if (data.cols == 4 && data.rows == 3) + { + rotation(data(Rect(0, 0, 3, 3))); + translation(data(Rect(3, 0, 1, 3))); + } + else + { + rotation(data); + translation(t); + } + + matrix.val[12] = matrix.val[13] = matrix.val[14] = 0; + matrix.val[15] = 1; +} + +template inline +cv::Affine3::Affine3(const float_type* vals) : matrix(vals) +{} + +template inline +cv::Affine3 cv::Affine3::Identity() +{ + return Affine3(cv::Affine3::Mat4::eye()); +} + +template inline +void cv::Affine3::rotation(const Mat3& R) +{ + linear(R); +} + +template inline +void cv::Affine3::rotation(const Vec3& _rvec) +{ + double theta = norm(_rvec); + + if (theta < DBL_EPSILON) + rotation(Mat3::eye()); + else + { + double c = std::cos(theta); + double s = std::sin(theta); + double c1 = 1. - c; + double itheta = (theta != 0) ? 1./theta : 0.; + + Point3_ r = _rvec*itheta; + + Mat3 rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z ); + Mat3 r_x( 0, -r.z, r.y, r.z, 0, -r.x, -r.y, r.x, 0 ); + + // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] + // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0] + Mat3 R = c*Mat3::eye() + c1*rrt + s*r_x; + + rotation(R); + } +} + +//Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix; +template inline +void cv::Affine3::rotation(const cv::Mat& data) +{ + CV_Assert(data.type() == cv::traits::Type::value); + CV_Assert(data.channels() == 1); + + if (data.cols == 3 && data.rows == 3) + { + Mat3 R; + data.copyTo(R); + rotation(R); + } + else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3)) + { + Vec3 _rvec; + data.reshape(1, 3).copyTo(_rvec); + rotation(_rvec); + } + else + CV_Error(Error::StsError, "Input matrix can only be 3x3, 1x3 or 3x1"); +} + +template inline +void cv::Affine3::linear(const Mat3& L) +{ + matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2]; + matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5]; + matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8]; +} + +template inline +void cv::Affine3::translation(const Vec3& t) +{ + matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2]; +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::rotation() const +{ + return linear(); +} + +template inline +typename cv::Affine3::Mat3 cv::Affine3::linear() const +{ + typename cv::Affine3::Mat3 R; + R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2]; + R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6]; + R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10]; + return R; +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::translation() const +{ + return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]); +} + +template inline +typename cv::Affine3::Vec3 cv::Affine3::rvec() const +{ + cv::Vec3d w; + cv::Matx33d u, vt, R = rotation(); + cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A); + R = u * vt; + + double rx = R.val[7] - R.val[5]; + double ry = R.val[2] - R.val[6]; + double rz = R.val[3] - R.val[1]; + + double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25); + double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5; + c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c; + double theta = acos(c); + + if( s < 1e-5 ) + { + if( c > 0 ) + rx = ry = rz = 0; + else + { + double t; + t = (R.val[0] + 1) * 0.5; + rx = std::sqrt(std::max(t, 0.0)); + t = (R.val[4] + 1) * 0.5; + ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0); + t = (R.val[8] + 1) * 0.5; + rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0); + + if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) ) + rz = -rz; + theta /= std::sqrt(rx*rx + ry*ry + rz*rz); + rx *= theta; + ry *= theta; + rz *= theta; + } + } + else + { + double vth = 1/(2*s); + vth *= theta; + rx *= vth; ry *= vth; rz *= vth; + } + + return cv::Vec3d(rx, ry, rz); +} + +template inline +cv::Affine3 cv::Affine3::inv(int method) const +{ + return matrix.inv(method); +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Mat3& R) const +{ + Mat3 Lc = linear(); + Vec3 tc = translation(); + Mat4 result; + result.val[12] = result.val[13] = result.val[14] = 0; + result.val[15] = 1; + + for(int j = 0; j < 3; ++j) + { + for(int i = 0; i < 3; ++i) + { + float_type value = 0; + for(int k = 0; k < 3; ++k) + value += R(j, k) * Lc(k, i); + result(j, i) = value; + } + + result(j, 3) = R.row(j).dot(tc.t()); + } + return result; +} + +template inline +cv::Affine3 cv::Affine3::rotate(const Vec3& _rvec) const +{ + return rotate(Affine3f(_rvec).rotation()); +} + +template inline +cv::Affine3 cv::Affine3::translate(const Vec3& t) const +{ + Mat4 m = matrix; + m.val[ 3] += t[0]; + m.val[ 7] += t[1]; + m.val[11] += t[2]; + return m; +} + +template inline +cv::Affine3 cv::Affine3::concatenate(const Affine3& affine) const +{ + return (*this).rotate(affine.rotation()).translate(affine.translation()); +} + +template template inline +cv::Affine3::operator Affine3() const +{ + return Affine3(matrix); +} + +template template inline +cv::Affine3 cv::Affine3::cast() const +{ + return Affine3(matrix); +} + +template inline +cv::Affine3 cv::operator*(const cv::Affine3& affine1, const cv::Affine3& affine2) +{ + return affine2.concatenate(affine1); +} + +template inline +V cv::operator*(const cv::Affine3& affine, const V& v) +{ + const typename Affine3::Mat4& m = affine.matrix; + + V r; + r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3]; + r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7]; + r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11]; + return r; +} + +static inline +cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v) +{ + const cv::Matx44f& m = affine.matrix; + cv::Vec3f r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + +static inline +cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v) +{ + const cv::Matx44d& m = affine.matrix; + cv::Vec3d r; + r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3]; + r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7]; + r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11]; + return r; +} + + + +#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + cv::Mat(4, 4, cv::traits::Type::value, affine.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::Affine3(const Eigen::Transform& affine) +{ + Eigen::Transform a = affine; + cv::Mat(4, 4, cv::traits::Type::value, a.matrix().data()).copyTo(matrix); +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + Eigen::Transform r; + cv::Mat hdr(4, 4, cv::traits::Type::value, r.matrix().data()); + cv::Mat(matrix, false).copyTo(hdr); + return r; +} + +template inline +cv::Affine3::operator Eigen::Transform() const +{ + return this->operator Eigen::Transform(); +} + +#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */ + +//! @endcond + +#endif /* __cplusplus */ + +#endif /* OPENCV_CORE_AFFINE3_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/async.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/async.hpp new file mode 100644 index 00000000..54560c7d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/async.hpp @@ -0,0 +1,105 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_ASYNC_HPP +#define OPENCV_CORE_ASYNC_HPP + +#include + +#ifdef CV_CXX11 +//#include +#include +#endif + +namespace cv { + +/** @addtogroup core_async + +@{ +*/ + + +/** @brief Returns result of asynchronous operations + +Object has attached asynchronous state. +Assignment operator doesn't clone asynchronous state (it is shared between all instances). + +Result can be fetched via get() method only once. + +*/ +class CV_EXPORTS_W AsyncArray +{ +public: + ~AsyncArray() CV_NOEXCEPT; + CV_WRAP AsyncArray() CV_NOEXCEPT; + AsyncArray(const AsyncArray& o) CV_NOEXCEPT; + AsyncArray& operator=(const AsyncArray& o) CV_NOEXCEPT; + CV_WRAP void release() CV_NOEXCEPT; + + /** Fetch the result. + @param[out] dst destination array + + Waits for result until container has valid result. + Throws exception if exception was stored as a result. + + Throws exception on invalid container state. + + @note Result or stored exception can be fetched only once. + */ + CV_WRAP void get(OutputArray dst) const; + + /** Retrieving the result with timeout + @param[out] dst destination array + @param[in] timeoutNs timeout in nanoseconds, -1 for infinite wait + + @returns true if result is ready, false if the timeout has expired + + @note Result or stored exception can be fetched only once. + */ + bool get(OutputArray dst, int64 timeoutNs) const; + + CV_WRAP inline + bool get(OutputArray dst, double timeoutNs) const { return get(dst, (int64)timeoutNs); } + + bool wait_for(int64 timeoutNs) const; + + CV_WRAP inline + bool wait_for(double timeoutNs) const { return wait_for((int64)timeoutNs); } + + CV_WRAP bool valid() const CV_NOEXCEPT; + +#ifdef CV_CXX11 + inline AsyncArray(AsyncArray&& o) { p = o.p; o.p = NULL; } + inline AsyncArray& operator=(AsyncArray&& o) CV_NOEXCEPT { std::swap(p, o.p); return *this; } + + template + inline bool get(OutputArray dst, const std::chrono::duration<_Rep, _Period>& timeout) + { + return get(dst, (int64)(std::chrono::nanoseconds(timeout).count())); + } + + template + inline bool wait_for(const std::chrono::duration<_Rep, _Period>& timeout) + { + return wait_for((int64)(std::chrono::nanoseconds(timeout).count())); + } + +#if 0 + std::future getFutureMat() const; + std::future getFutureUMat() const; +#endif +#endif + + + // PImpl + struct Impl; friend struct Impl; + inline void* _getImpl() const CV_NOEXCEPT { return p; } +protected: + Impl* p; +}; + + +//! @} +} // namespace +#endif // OPENCV_CORE_ASYNC_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/base.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/base.hpp new file mode 100644 index 00000000..12504974 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/base.hpp @@ -0,0 +1,722 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_BASE_HPP +#define OPENCV_CORE_BASE_HPP + +#ifndef __cplusplus +# error base.hpp header must be compiled as C++ +#endif + +#include "opencv2/opencv_modules.hpp" + +#include +#include + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/cvstd.hpp" + +namespace cv +{ + +//! @addtogroup core_utils +//! @{ + +namespace Error { +//! error codes +enum Code { + StsOk= 0, //!< everything is ok + StsBackTrace= -1, //!< pseudo error for back trace + StsError= -2, //!< unknown /unspecified error + StsInternal= -3, //!< internal error (bad state) + StsNoMem= -4, //!< insufficient memory + StsBadArg= -5, //!< function arg/param is bad + StsBadFunc= -6, //!< unsupported function + StsNoConv= -7, //!< iteration didn't converge + StsAutoTrace= -8, //!< tracing + HeaderIsNull= -9, //!< image header is NULL + BadImageSize= -10, //!< image size is invalid + BadOffset= -11, //!< offset is invalid + BadDataPtr= -12, //!< + BadStep= -13, //!< image step is wrong, this may happen for a non-continuous matrix. + BadModelOrChSeq= -14, //!< + BadNumChannels= -15, //!< bad number of channels, for example, some functions accept only single channel matrices. + BadNumChannel1U= -16, //!< + BadDepth= -17, //!< input image depth is not supported by the function + BadAlphaChannel= -18, //!< + BadOrder= -19, //!< number of dimensions is out of range + BadOrigin= -20, //!< incorrect input origin + BadAlign= -21, //!< incorrect input align + BadCallBack= -22, //!< + BadTileSize= -23, //!< + BadCOI= -24, //!< input COI is not supported + BadROISize= -25, //!< incorrect input roi + MaskIsTiled= -26, //!< + StsNullPtr= -27, //!< null pointer + StsVecLengthErr= -28, //!< incorrect vector length + StsFilterStructContentErr= -29, //!< incorrect filter structure content + StsKernelStructContentErr= -30, //!< incorrect transform kernel content + StsFilterOffsetErr= -31, //!< incorrect filter offset value + StsBadSize= -201, //!< the input/output structure size is incorrect + StsDivByZero= -202, //!< division by zero + StsInplaceNotSupported= -203, //!< in-place operation is not supported + StsObjectNotFound= -204, //!< request can't be completed + StsUnmatchedFormats= -205, //!< formats of input/output arrays differ + StsBadFlag= -206, //!< flag is wrong or not supported + StsBadPoint= -207, //!< bad CvPoint + StsBadMask= -208, //!< bad format of mask (neither 8uC1 nor 8sC1) + StsUnmatchedSizes= -209, //!< sizes of input/output structures do not match + StsUnsupportedFormat= -210, //!< the data format/type is not supported by the function + StsOutOfRange= -211, //!< some of parameters are out of range + StsParseError= -212, //!< invalid syntax/structure of the parsed file + StsNotImplemented= -213, //!< the requested function/feature is not implemented + StsBadMemBlock= -214, //!< an allocated block has been corrupted + StsAssert= -215, //!< assertion failed + GpuNotSupported= -216, //!< no CUDA support + GpuApiCallError= -217, //!< GPU API call error + OpenGlNotSupported= -218, //!< no OpenGL support + OpenGlApiCallError= -219, //!< OpenGL API call error + OpenCLApiCallError= -220, //!< OpenCL API call error + OpenCLDoubleNotSupported= -221, + OpenCLInitError= -222, //!< OpenCL initialization error + OpenCLNoAMDBlasFft= -223 +}; +} //Error + +//! @} core_utils + +//! @addtogroup core_array +//! @{ + +//! matrix decomposition types +enum DecompTypes { + /** Gaussian elimination with the optimal pivot element chosen. */ + DECOMP_LU = 0, + /** singular value decomposition (SVD) method; the system can be over-defined and/or the matrix + src1 can be singular */ + DECOMP_SVD = 1, + /** eigenvalue decomposition; the matrix src1 must be symmetrical */ + DECOMP_EIG = 2, + /** Cholesky \f$LL^T\f$ factorization; the matrix src1 must be symmetrical and positively + defined */ + DECOMP_CHOLESKY = 3, + /** QR factorization; the system can be over-defined and/or the matrix src1 can be singular */ + DECOMP_QR = 4, + /** while all the previous flags are mutually exclusive, this flag can be used together with + any of the previous; it means that the normal equations + \f$\texttt{src1}^T\cdot\texttt{src1}\cdot\texttt{dst}=\texttt{src1}^T\texttt{src2}\f$ are + solved instead of the original system + \f$\texttt{src1}\cdot\texttt{dst}=\texttt{src2}\f$ */ + DECOMP_NORMAL = 16 +}; + +/** norm types + +src1 and src2 denote input arrays. +*/ + +enum NormTypes { + /** + \f[ + norm = \forkthree + {\|\texttt{src1}\|_{L_{\infty}} = \max _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) } + {\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} = \max _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_INF}\) } + {\frac{\|\texttt{src1}-\texttt{src2}\|_{L_{\infty}} }{\|\texttt{src2}\|_{L_{\infty}} }}{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_INF}\) } + \f] + */ + NORM_INF = 1, + /** + \f[ + norm = \forkthree + {\| \texttt{src1} \| _{L_1} = \sum _I | \texttt{src1} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\)} + { \| \texttt{src1} - \texttt{src2} \| _{L_1} = \sum _I | \texttt{src1} (I) - \texttt{src2} (I)|}{if \(\texttt{normType} = \texttt{NORM_L1}\) } + { \frac{\|\texttt{src1}-\texttt{src2}\|_{L_1} }{\|\texttt{src2}\|_{L_1}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L1}\) } + \f]*/ + NORM_L1 = 2, + /** + \f[ + norm = \forkthree + { \| \texttt{src1} \| _{L_2} = \sqrt{\sum_I \texttt{src1}(I)^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) } + { \| \texttt{src1} - \texttt{src2} \| _{L_2} = \sqrt{\sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2} }{if \(\texttt{normType} = \texttt{NORM_L2}\) } + { \frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}} }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2}\) } + \f] + */ + NORM_L2 = 4, + /** + \f[ + norm = \forkthree + { \| \texttt{src1} \| _{L_2} ^{2} = \sum_I \texttt{src1}(I)^2} {if \(\texttt{normType} = \texttt{NORM_L2SQR}\)} + { \| \texttt{src1} - \texttt{src2} \| _{L_2} ^{2} = \sum_I (\texttt{src1}(I) - \texttt{src2}(I))^2 }{if \(\texttt{normType} = \texttt{NORM_L2SQR}\) } + { \left(\frac{\|\texttt{src1}-\texttt{src2}\|_{L_2} }{\|\texttt{src2}\|_{L_2}}\right)^2 }{if \(\texttt{normType} = \texttt{NORM_RELATIVE | NORM_L2SQR}\) } + \f] + */ + NORM_L2SQR = 5, + /** + In the case of one input array, calculates the Hamming distance of the array from zero, + In the case of two input arrays, calculates the Hamming distance between the arrays. + */ + NORM_HAMMING = 6, + /** + Similar to NORM_HAMMING, but in the calculation, each two bits of the input sequence will + be added and treated as a single bit to be used in the same calculation as NORM_HAMMING. + */ + NORM_HAMMING2 = 7, + NORM_TYPE_MASK = 7, //!< bit-mask which can be used to separate norm type from norm flags + NORM_RELATIVE = 8, //!< flag + NORM_MINMAX = 32 //!< flag + }; + +//! comparison types +enum CmpTypes { CMP_EQ = 0, //!< src1 is equal to src2. + CMP_GT = 1, //!< src1 is greater than src2. + CMP_GE = 2, //!< src1 is greater than or equal to src2. + CMP_LT = 3, //!< src1 is less than src2. + CMP_LE = 4, //!< src1 is less than or equal to src2. + CMP_NE = 5 //!< src1 is unequal to src2. + }; + +//! generalized matrix multiplication flags +enum GemmFlags { GEMM_1_T = 1, //!< transposes src1 + GEMM_2_T = 2, //!< transposes src2 + GEMM_3_T = 4 //!< transposes src3 + }; + +enum DftFlags { + /** performs an inverse 1D or 2D transform instead of the default forward + transform. */ + DFT_INVERSE = 1, + /** scales the result: divide it by the number of array elements. Normally, it is + combined with DFT_INVERSE. */ + DFT_SCALE = 2, + /** performs a forward or inverse transform of every individual row of the input + matrix; this flag enables you to transform multiple vectors simultaneously and can be used to + decrease the overhead (which is sometimes several times larger than the processing itself) to + perform 3D and higher-dimensional transformations and so forth.*/ + DFT_ROWS = 4, + /** performs a forward transformation of 1D or 2D real array; the result, + though being a complex array, has complex-conjugate symmetry (*CCS*, see the function + description below for details), and such an array can be packed into a real array of the same + size as input, which is the fastest option and which is what the function does by default; + however, you may wish to get a full complex array (for simpler spectrum analysis, and so on) - + pass the flag to enable the function to produce a full-size complex output array. */ + DFT_COMPLEX_OUTPUT = 16, + /** performs an inverse transformation of a 1D or 2D complex array; the + result is normally a complex array of the same size, however, if the input array has + conjugate-complex symmetry (for example, it is a result of forward transformation with + DFT_COMPLEX_OUTPUT flag), the output is a real array; while the function itself does not + check whether the input is symmetrical or not, you can pass the flag and then the function + will assume the symmetry and produce the real output array (note that when the input is packed + into a real array and inverse transformation is executed, the function treats the input as a + packed complex-conjugate symmetrical array, and the output will also be a real array). */ + DFT_REAL_OUTPUT = 32, + /** specifies that input is complex input. If this flag is set, the input must have 2 channels. + On the other hand, for backwards compatibility reason, if input has 2 channels, input is + already considered complex. */ + DFT_COMPLEX_INPUT = 64, + /** performs an inverse 1D or 2D transform instead of the default forward transform. */ + DCT_INVERSE = DFT_INVERSE, + /** performs a forward or inverse transform of every individual row of the input + matrix. This flag enables you to transform multiple vectors simultaneously and can be used to + decrease the overhead (which is sometimes several times larger than the processing itself) to + perform 3D and higher-dimensional transforms and so forth.*/ + DCT_ROWS = DFT_ROWS +}; + +//! Various border types, image boundaries are denoted with `|` +//! @see borderInterpolate, copyMakeBorder +enum BorderTypes { + BORDER_CONSTANT = 0, //!< `iiiiii|abcdefgh|iiiiiii` with some specified `i` + BORDER_REPLICATE = 1, //!< `aaaaaa|abcdefgh|hhhhhhh` + BORDER_REFLECT = 2, //!< `fedcba|abcdefgh|hgfedcb` + BORDER_WRAP = 3, //!< `cdefgh|abcdefgh|abcdefg` + BORDER_REFLECT_101 = 4, //!< `gfedcb|abcdefgh|gfedcba` + BORDER_TRANSPARENT = 5, //!< `uvwxyz|abcdefgh|ijklmno` + + BORDER_REFLECT101 = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101 + BORDER_DEFAULT = BORDER_REFLECT_101, //!< same as BORDER_REFLECT_101 + BORDER_ISOLATED = 16 //!< do not look outside of ROI +}; + +//! @} core_array + +//! @addtogroup core_utils +//! @{ + +/*! @brief Signals an error and raises the exception. + +By default the function prints information about the error to stderr, +then it either stops if setBreakOnError() had been called before or raises the exception. +It is possible to alternate error processing by using redirectError(). +@param _code - error code (Error::Code) +@param _err - error description +@param _func - function name. Available only when the compiler supports getting it +@param _file - source file name where the error has occurred +@param _line - line number in the source file where the error has occurred +@see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert + */ +CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line); + +#ifdef __GNUC__ +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Winvalid-noreturn" +# endif +#endif + +/** same as cv::error, but does not return */ +CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const char* _func, const char* _file, int _line) +{ + error(_code, _err, _func, _file, _line); +#ifdef __GNUC__ +# if !defined __clang__ && !defined __APPLE__ + // this suppresses this warning: "noreturn" function does return [enabled by default] + __builtin_trap(); + // or use infinite loop: for (;;) {} +# endif +#endif +} +#ifdef __GNUC__ +# if defined __clang__ || defined __APPLE__ +# pragma GCC diagnostic pop +# endif +#endif + +#ifdef CV_STATIC_ANALYSIS + +// In practice, some macro are not processed correctly (noreturn is not detected). +// We need to use simplified definition for them. +#define CV_Error(code, msg) do { (void)(code); (void)(msg); abort(); } while (0) +#define CV_Error_(code, args) do { (void)(code); (void)(cv::format args); abort(); } while (0) +#define CV_Assert( expr ) do { if (!(expr)) abort(); } while (0) +#define CV_ErrorNoReturn CV_Error +#define CV_ErrorNoReturn_ CV_Error_ + +#else // CV_STATIC_ANALYSIS + +/** @brief Call the error handler. + +Currently, the error handler prints the error code and the error message to the standard +error stream `stderr`. In the Debug configuration, it then provokes memory access violation, so that +the execution stack and all the parameters can be analyzed by the debugger. In the Release +configuration, the exception is thrown. + +@param code one of Error::Code +@param msg error message +*/ +#define CV_Error( code, msg ) cv::error( code, msg, CV_Func, __FILE__, __LINE__ ) + +/** @brief Call the error handler. + +This macro can be used to construct an error message on-fly to include some dynamic information, +for example: +@code + // note the extra parentheses around the formatted text message + CV_Error_(Error::StsOutOfRange, + ("the value at (%d, %d)=%g is out of range", badPt.x, badPt.y, badValue)); +@endcode +@param code one of Error::Code +@param args printf-like formatted error message in parentheses +*/ +#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ ) + +/** @brief Checks a condition at runtime and throws exception if it fails + +The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros +raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release +configurations while CV_DbgAssert is only retained in the Debug configuration. +*/ +#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) + +//! @cond IGNORED +#define CV__ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ ) +#define CV__ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ ) +#ifdef __OPENCV_BUILD +#undef CV_Error +#define CV_Error CV__ErrorNoReturn +#undef CV_Error_ +#define CV_Error_ CV__ErrorNoReturn_ +#undef CV_Assert +#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) +#else +// backward compatibility +#define CV_ErrorNoReturn CV__ErrorNoReturn +#define CV_ErrorNoReturn_ CV__ErrorNoReturn_ +#endif +//! @endcond + +#endif // CV_STATIC_ANALYSIS + +//! @cond IGNORED + +#if defined OPENCV_FORCE_MULTIARG_ASSERT_CHECK && defined CV_STATIC_ANALYSIS +#warning "OPENCV_FORCE_MULTIARG_ASSERT_CHECK can't be used with CV_STATIC_ANALYSIS" +#undef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#endif + +#ifdef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#define CV_Assert_1( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) +#else +#define CV_Assert_1 CV_Assert +#endif +#define CV_Assert_2( expr1, expr2 ) CV_Assert_1(expr1); CV_Assert_1(expr2) +#define CV_Assert_3( expr1, expr2, expr3 ) CV_Assert_2(expr1, expr2); CV_Assert_1(expr3) +#define CV_Assert_4( expr1, expr2, expr3, expr4 ) CV_Assert_3(expr1, expr2, expr3); CV_Assert_1(expr4) +#define CV_Assert_5( expr1, expr2, expr3, expr4, expr5 ) CV_Assert_4(expr1, expr2, expr3, expr4); CV_Assert_1(expr5) +#define CV_Assert_6( expr1, expr2, expr3, expr4, expr5, expr6 ) CV_Assert_5(expr1, expr2, expr3, expr4, expr5); CV_Assert_1(expr6) +#define CV_Assert_7( expr1, expr2, expr3, expr4, expr5, expr6, expr7 ) CV_Assert_6(expr1, expr2, expr3, expr4, expr5, expr6 ); CV_Assert_1(expr7) +#define CV_Assert_8( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ) CV_Assert_7(expr1, expr2, expr3, expr4, expr5, expr6, expr7 ); CV_Assert_1(expr8) +#define CV_Assert_9( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ) CV_Assert_8(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ); CV_Assert_1(expr9) +#define CV_Assert_10( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9, expr10 ) CV_Assert_9(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ); CV_Assert_1(expr10) + +#define CV_Assert_N(...) do { __CV_CAT(CV_Assert_, __CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__); } while(0) + +#ifdef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#undef CV_Assert +#define CV_Assert CV_Assert_N +#endif +//! @endcond + +#if defined _DEBUG || defined CV_STATIC_ANALYSIS +# define CV_DbgAssert(expr) CV_Assert(expr) +#else +/** replaced with CV_Assert(expr) in Debug configuration */ +# define CV_DbgAssert(expr) +#endif + +/* + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct CV_EXPORTS Hamming +{ + enum { normType = NORM_HAMMING }; + typedef unsigned char ValueType; + typedef int ResultType; + + /** this will count the bits in a ^ b + */ + ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const; +}; + +typedef Hamming HammingLUT; + +/////////////////////////////////// inline norms //////////////////////////////////// + +template inline _Tp cv_abs(_Tp x) { return std::abs(x); } +inline int cv_abs(uchar x) { return x; } +inline int cv_abs(schar x) { return std::abs(x); } +inline int cv_abs(ushort x) { return x; } +inline int cv_abs(short x) { return std::abs(x); } + +template static inline +_AccTp normL2Sqr(const _Tp* a, int n) +{ + _AccTp s = 0; + int i=0; +#if CV_ENABLE_UNROLLED + for( ; i <= n - 4; i += 4 ) + { + _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3]; + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = a[i]; + s += v*v; + } + return s; +} + +template static inline +_AccTp normL1(const _Tp* a, int n) +{ + _AccTp s = 0; + int i = 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + s += (_AccTp)cv_abs(a[i]) + (_AccTp)cv_abs(a[i+1]) + + (_AccTp)cv_abs(a[i+2]) + (_AccTp)cv_abs(a[i+3]); + } +#endif + for( ; i < n; i++ ) + s += cv_abs(a[i]); + return s; +} + +template static inline +_AccTp normInf(const _Tp* a, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + s = std::max(s, (_AccTp)cv_abs(a[i])); + return s; +} + +template static inline +_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += v0*v0 + v1*v1 + v2*v2 + v3*v3; + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += v*v; + } + return s; +} + +static inline float normL2Sqr(const float* a, const float* b, int n) +{ + float s = 0.f; + for( int i = 0; i < n; i++ ) + { + float v = a[i] - b[i]; + s += v*v; + } + return s; +} + +template static inline +_AccTp normL1(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + int i= 0; +#if CV_ENABLE_UNROLLED + for(; i <= n - 4; i += 4 ) + { + _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]); + s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3); + } +#endif + for( ; i < n; i++ ) + { + _AccTp v = _AccTp(a[i] - b[i]); + s += std::abs(v); + } + return s; +} + +inline float normL1(const float* a, const float* b, int n) +{ + float s = 0.f; + for( int i = 0; i < n; i++ ) + { + s += std::abs(a[i] - b[i]); + } + return s; +} + +inline int normL1(const uchar* a, const uchar* b, int n) +{ + int s = 0; + for( int i = 0; i < n; i++ ) + { + s += std::abs(a[i] - b[i]); + } + return s; +} + +template static inline +_AccTp normInf(const _Tp* a, const _Tp* b, int n) +{ + _AccTp s = 0; + for( int i = 0; i < n; i++ ) + { + _AccTp v0 = a[i] - b[i]; + s = std::max(s, std::abs(v0)); + } + return s; +} + +/** @brief Computes the cube root of an argument. + + The function cubeRoot computes \f$\sqrt[3]{\texttt{val}}\f$. Negative arguments are handled correctly. + NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for + single-precision data. + @param val A function argument. + */ +CV_EXPORTS_W float cubeRoot(float val); + +/** @overload + +cubeRoot with argument of `double` type calls `std::cbrt(double)` (C++11) or falls back on `pow()` for C++98 compilation mode. +*/ +static inline +double cubeRoot(double val) +{ +#ifdef CV_CXX11 + return std::cbrt(val); +#else + double v = pow(abs(val), 1/3.); // pow doesn't support negative inputs with fractional exponents + return val >= 0 ? v : -v; +#endif +} + +/** @brief Calculates the angle of a 2D vector in degrees. + + The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured + in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees. + @param x x-coordinate of the vector. + @param y y-coordinate of the vector. + */ +CV_EXPORTS_W float fastAtan2(float y, float x); + +/** proxy for hal::LU */ +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +/** proxy for hal::LU */ +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +/** proxy for hal::Cholesky */ +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +/** proxy for hal::Cholesky */ +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + +////////////////// forward declarations for important OpenCV types ////////////////// + +//! @cond IGNORED + +template class Vec; +template class Matx; + +template class Complex; +template class Point_; +template class Point3_; +template class Size_; +template class Rect_; +template class Scalar_; + +class CV_EXPORTS RotatedRect; +class CV_EXPORTS Range; +class CV_EXPORTS TermCriteria; +class CV_EXPORTS KeyPoint; +class CV_EXPORTS DMatch; +class CV_EXPORTS RNG; + +class CV_EXPORTS Mat; +class CV_EXPORTS MatExpr; + +class CV_EXPORTS UMat; + +class CV_EXPORTS SparseMat; +typedef Mat MatND; + +template class Mat_; +template class SparseMat_; + +class CV_EXPORTS MatConstIterator; +class CV_EXPORTS SparseMatIterator; +class CV_EXPORTS SparseMatConstIterator; +template class MatIterator_; +template class MatConstIterator_; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +namespace ogl +{ + class CV_EXPORTS Buffer; + class CV_EXPORTS Texture2D; + class CV_EXPORTS Arrays; +} + +namespace cuda +{ + class CV_EXPORTS GpuMat; + class CV_EXPORTS HostMem; + class CV_EXPORTS Stream; + class CV_EXPORTS Event; +} + +namespace cudev +{ + template class GpuMat_; +} + +namespace ipp +{ +#if OPENCV_ABI_COMPATIBILITY > 300 +CV_EXPORTS unsigned long long getIppFeatures(); +#else +CV_EXPORTS int getIppFeatures(); +#endif +CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL, + int line = 0); +CV_EXPORTS int getIppStatus(); +CV_EXPORTS String getIppErrorLocation(); +CV_EXPORTS_W bool useIPP(); +CV_EXPORTS_W void setUseIPP(bool flag); +CV_EXPORTS_W String getIppVersion(); + +// IPP Not-Exact mode. This function may force use of IPP then both IPP and OpenCV provide proper results +// but have internal accuracy differences which have too much direct or indirect impact on accuracy tests. +CV_EXPORTS_W bool useIPP_NotExact(); +CV_EXPORTS_W void setUseIPP_NotExact(bool flag); +#if OPENCV_ABI_COMPATIBILITY < 400 +CV_EXPORTS_W bool useIPP_NE(); +CV_EXPORTS_W void setUseIPP_NE(bool flag); +#endif + +} // ipp + +//! @endcond + +//! @} core_utils + + + + +} // cv + +#include "opencv2/core/neon_utils.hpp" +#include "opencv2/core/vsx_utils.hpp" +#include "opencv2/core/check.hpp" + +#endif //OPENCV_CORE_BASE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/bindings_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/bindings_utils.hpp new file mode 100644 index 00000000..c53511f8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/bindings_utils.hpp @@ -0,0 +1,223 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_BINDINGS_UTILS_HPP +#define OPENCV_CORE_BINDINGS_UTILS_HPP + +#include +#include +#include + +#include + +namespace cv { namespace utils { +//! @addtogroup core_utils +//! @{ + +CV_EXPORTS_W String dumpInputArray(InputArray argument); + +CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument); + +CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument); + +CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argument); + +CV_WRAP static inline +String dumpBool(bool argument) +{ + return (argument) ? String("Bool: True") : String("Bool: False"); +} + +CV_WRAP static inline +String dumpInt(int argument) +{ + return cv::format("Int: %d", argument); +} + +CV_WRAP static inline +String dumpSizeT(size_t argument) +{ + std::ostringstream oss("size_t: ", std::ios::ate); + oss << argument; + return oss.str(); +} + +CV_WRAP static inline +String dumpFloat(float argument) +{ + return cv::format("Float: %.2f", argument); +} + +CV_WRAP static inline +String dumpDouble(double argument) +{ + return cv::format("Double: %.2f", argument); +} + +CV_WRAP static inline +String dumpCString(const char* argument) +{ + return cv::format("String: %s", argument); +} + +CV_WRAP static inline +String dumpString(const String& argument) +{ + return cv::format("String: %s", argument.c_str()); +} + +CV_WRAP static inline +String testOverloadResolution(int value, const Point& point = Point(42, 24)) +{ + return format("overload (int=%d, point=(x=%d, y=%d))", value, point.x, + point.y); +} + +CV_WRAP static inline +String testOverloadResolution(const Rect& rect) +{ + return format("overload (rect=(x=%d, y=%d, w=%d, h=%d))", rect.x, rect.y, + rect.width, rect.height); +} + +CV_WRAP static inline +String dumpRect(const Rect& argument) +{ + return format("rect: (x=%d, y=%d, w=%d, h=%d)", argument.x, argument.y, + argument.width, argument.height); +} + +CV_WRAP static inline +String dumpTermCriteria(const TermCriteria& argument) +{ + return format("term_criteria: (type=%d, max_count=%d, epsilon=%lf", + argument.type, argument.maxCount, argument.epsilon); +} + +CV_WRAP static inline +String dumpRotatedRect(const RotatedRect& argument) +{ + return format("rotated_rect: (c_x=%f, c_y=%f, w=%f, h=%f, a=%f)", + argument.center.x, argument.center.y, argument.size.width, + argument.size.height, argument.angle); +} + +CV_WRAP static inline +String dumpRange(const Range& argument) +{ + if (argument == Range::all()) + { + return "range: all"; + } + else + { + return format("range: (s=%d, e=%d)", argument.start, argument.end); + } +} + +CV_WRAP static inline +String testReservedKeywordConversion(int positional_argument, int lambda = 2, int from = 3) +{ + return format("arg=%d, lambda=%d, from=%d", positional_argument, lambda, from); +} + +CV_EXPORTS_W String dumpVectorOfInt(const std::vector& vec); + +CV_EXPORTS_W String dumpVectorOfDouble(const std::vector& vec); + +CV_EXPORTS_W String dumpVectorOfRect(const std::vector& vec); + +CV_WRAP static inline +void generateVectorOfRect(size_t len, CV_OUT std::vector& vec) +{ + vec.resize(len); + if (len > 0) + { + RNG rng(12345); + Mat tmp(static_cast(len), 1, CV_32SC4); + rng.fill(tmp, RNG::UNIFORM, 10, 20); + tmp.copyTo(vec); + } +} + +CV_WRAP static inline +void generateVectorOfInt(size_t len, CV_OUT std::vector& vec) +{ + vec.resize(len); + if (len > 0) + { + RNG rng(554433); + Mat tmp(static_cast(len), 1, CV_32SC1); + rng.fill(tmp, RNG::UNIFORM, -10, 10); + tmp.copyTo(vec); + } +} + +CV_WRAP static inline +void generateVectorOfMat(size_t len, int rows, int cols, int dtype, CV_OUT std::vector& vec) +{ + vec.resize(len); + if (len > 0) + { + RNG rng(65431); + for (size_t i = 0; i < len; ++i) + { + vec[i].create(rows, cols, dtype); + rng.fill(vec[i], RNG::UNIFORM, 0, 10); + } + } +} + +CV_WRAP static inline +void testRaiseGeneralException() +{ + throw std::runtime_error("exception text"); +} + +CV_WRAP static inline +AsyncArray testAsyncArray(InputArray argument) +{ + AsyncPromise p; + p.setValue(argument); + return p.getArrayResult(); +} + +CV_WRAP static inline +AsyncArray testAsyncException() +{ + AsyncPromise p; + try + { + CV_Error(Error::StsOk, "Test: Generated async error"); + } + catch (const cv::Exception& e) + { + p.setException(e); + } + return p.getArrayResult(); +} + +//! @} // core_utils +} // namespace cv::utils + +//! @cond IGNORED + +CV_WRAP static inline +int setLogLevel(int level) +{ + // NB: Binding generators doesn't work with enums properly yet, so we define separate overload here + return cv::utils::logging::setLogLevel((cv::utils::logging::LogLevel)level); +} + +CV_WRAP static inline +int getLogLevel() +{ + return cv::utils::logging::getLogLevel(); +} + +//! @endcond IGNORED + +} // namespaces cv / utils + +#endif // OPENCV_CORE_BINDINGS_UTILS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/bufferpool.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/bufferpool.hpp new file mode 100644 index 00000000..4698e5da --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/bufferpool.hpp @@ -0,0 +1,40 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved. + +#ifndef OPENCV_CORE_BUFFER_POOL_HPP +#define OPENCV_CORE_BUFFER_POOL_HPP + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4265) +#endif + +namespace cv +{ + +//! @addtogroup core +//! @{ + +class BufferPoolController +{ +protected: + ~BufferPoolController() { } +public: + virtual size_t getReservedSize() const = 0; + virtual size_t getMaxReservedSize() const = 0; + virtual void setMaxReservedSize(size_t size) = 0; + virtual void freeAllReservedBuffers() = 0; +}; + +//! @} + +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#endif // OPENCV_CORE_BUFFER_POOL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/check.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/check.hpp new file mode 100644 index 00000000..d975223c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/check.hpp @@ -0,0 +1,160 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_CHECK_HPP +#define OPENCV_CORE_CHECK_HPP + +#include + +namespace cv { + +/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or "" */ +CV_EXPORTS const char* depthToString(int depth); + +/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or "" */ +CV_EXPORTS const String typeToString(int type); + + +//! @cond IGNORED +namespace detail { + +/** Returns string of cv::Mat depth value: CV_8U -> "CV_8U" or NULL */ +CV_EXPORTS const char* depthToString_(int depth); + +/** Returns string of cv::Mat depth value: CV_8UC3 -> "CV_8UC3" or cv::String() */ +CV_EXPORTS const cv::String typeToString_(int type); + +enum TestOp { + TEST_CUSTOM = 0, + TEST_EQ = 1, + TEST_NE = 2, + TEST_LE = 3, + TEST_LT = 4, + TEST_GE = 5, + TEST_GT = 6, + CV__LAST_TEST_OP +}; + +struct CheckContext { + const char* func; + const char* file; + int line; + enum TestOp testOp; + const char* message; + const char* p1_str; + const char* p2_str; +}; + +#ifndef CV__CHECK_FILENAME +# define CV__CHECK_FILENAME __FILE__ +#endif + +#ifndef CV__CHECK_FUNCTION +# if defined _MSC_VER +# define CV__CHECK_FUNCTION __FUNCSIG__ +# elif defined __GNUC__ +# define CV__CHECK_FUNCTION __PRETTY_FUNCTION__ +# else +# define CV__CHECK_FUNCTION "" +# endif +#endif + +#define CV__CHECK_LOCATION_VARNAME(id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_check_, id), __LINE__) +#define CV__DEFINE_CHECK_CONTEXT(id, message, testOp, p1_str, p2_str) \ + static const cv::detail::CheckContext CV__CHECK_LOCATION_VARNAME(id) = \ + { CV__CHECK_FUNCTION, CV__CHECK_FILENAME, __LINE__, testOp, "" message, "" p1_str, "" p2_str } + +CV_EXPORTS void CV_NORETURN check_failed_auto(const int v1, const int v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v1, const size_t v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const float v1, const float v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const double v1, const double v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_ v1, const Size_ v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v1, const int v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v1, const int v2, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v1, const int v2, const CheckContext& ctx); + +CV_EXPORTS void CV_NORETURN check_failed_auto(const int v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const size_t v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const float v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const double v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const Size_ v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_auto(const std::string& v1, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatDepth(const int v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatType(const int v, const CheckContext& ctx); +CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v, const CheckContext& ctx); + + +#define CV__TEST_EQ(v1, v2) ((v1) == (v2)) +#define CV__TEST_NE(v1, v2) ((v1) != (v2)) +#define CV__TEST_LE(v1, v2) ((v1) <= (v2)) +#define CV__TEST_LT(v1, v2) ((v1) < (v2)) +#define CV__TEST_GE(v1, v2) ((v1) >= (v2)) +#define CV__TEST_GT(v1, v2) ((v1) > (v2)) + +#define CV__CHECK(id, op, type, v1, v2, v1_str, v2_str, msg_str) do { \ + if(CV__TEST_##op((v1), (v2))) ; else { \ + CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_ ## op, v1_str, v2_str); \ + cv::detail::check_failed_ ## type((v1), (v2), CV__CHECK_LOCATION_VARNAME(id)); \ + } \ +} while (0) + +#define CV__CHECK_CUSTOM_TEST(id, type, v, test_expr, v_str, test_expr_str, msg_str) do { \ + if(!!(test_expr)) ; else { \ + CV__DEFINE_CHECK_CONTEXT(id, msg_str, cv::detail::TEST_CUSTOM, v_str, test_expr_str); \ + cv::detail::check_failed_ ## type((v), CV__CHECK_LOCATION_VARNAME(id)); \ + } \ +} while (0) + +} // namespace +//! @endcond + + +/// Supported values of these types: int, float, double +#define CV_CheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg) +#define CV_CheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg) +#define CV_CheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg) +#define CV_CheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg) +#define CV_CheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg) +#define CV_CheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg) + +/// Check with additional "decoding" of type values in error message +#define CV_CheckTypeEQ(t1, t2, msg) CV__CHECK(_, EQ, MatType, t1, t2, #t1, #t2, msg) +/// Check with additional "decoding" of depth values in error message +#define CV_CheckDepthEQ(d1, d2, msg) CV__CHECK(_, EQ, MatDepth, d1, d2, #d1, #d2, msg) + +#define CV_CheckChannelsEQ(c1, c2, msg) CV__CHECK(_, EQ, MatChannels, c1, c2, #c1, #c2, msg) + +/// Example: type == CV_8UC1 || type == CV_8UC3 +#define CV_CheckType(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatType, t, (test_expr), #t, #test_expr, msg) + +/// Example: depth == CV_32F || depth == CV_64F +#define CV_CheckDepth(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatDepth, t, (test_expr), #t, #test_expr, msg) + +/// Example: v == A || v == B +#define CV_Check(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg) + +/// Some complex conditions: CV_Check(src2, src2.empty() || (src2.type() == src1.type() && src2.size() == src1.size()), "src2 should have same size/type as src1") +// TODO define pretty-printers + +#ifndef NDEBUG +#define CV_DbgCheck(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg) +#define CV_DbgCheckEQ(v1, v2, msg) CV__CHECK(_, EQ, auto, v1, v2, #v1, #v2, msg) +#define CV_DbgCheckNE(v1, v2, msg) CV__CHECK(_, NE, auto, v1, v2, #v1, #v2, msg) +#define CV_DbgCheckLE(v1, v2, msg) CV__CHECK(_, LE, auto, v1, v2, #v1, #v2, msg) +#define CV_DbgCheckLT(v1, v2, msg) CV__CHECK(_, LT, auto, v1, v2, #v1, #v2, msg) +#define CV_DbgCheckGE(v1, v2, msg) CV__CHECK(_, GE, auto, v1, v2, #v1, #v2, msg) +#define CV_DbgCheckGT(v1, v2, msg) CV__CHECK(_, GT, auto, v1, v2, #v1, #v2, msg) +#else +#define CV_DbgCheck(v, test_expr, msg) do { } while (0) +#define CV_DbgCheckEQ(v1, v2, msg) do { } while (0) +#define CV_DbgCheckNE(v1, v2, msg) do { } while (0) +#define CV_DbgCheckLE(v1, v2, msg) do { } while (0) +#define CV_DbgCheckLT(v1, v2, msg) do { } while (0) +#define CV_DbgCheckGE(v1, v2, msg) do { } while (0) +#define CV_DbgCheckGT(v1, v2, msg) do { } while (0) +#endif + +} // namespace + +#endif // OPENCV_CORE_CHECK_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/core.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/core.hpp new file mode 100644 index 00000000..43891835 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/core.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/core.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/core_c.h b/third_party/opencv/uos/sw_64/include/opencv2/core/core_c.h new file mode 100644 index 00000000..95a98cfc --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/core_c.h @@ -0,0 +1,3175 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef OPENCV_CORE_C_H +#define OPENCV_CORE_C_H + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +# ifdef _MSC_VER +/* disable warning C4190: 'function' has C-linkage specified, but returns UDT 'typename' + which is incompatible with C + + It is OK to disable it because we only extend few plain structures with + C++ constructors for simpler interoperability with C++ API of the library +*/ +# pragma warning(disable:4190) +# elif defined __clang__ && __clang_major__ >= 3 +# pragma GCC diagnostic ignored "-Wreturn-type-c-linkage" +# endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup core_c + @{ +*/ + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/** `malloc` wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/** `free` wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/** @brief Creates an image header but does not allocate the image data. + +@param size Image width and height +@param depth Image depth (see cvCreateImage ) +@param channels Number of channels (see cvCreateImage ) + */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/** @brief Initializes an image header that was previously allocated. + +The returned IplImage\* points to the initialized header. +@param image Image header to initialize +@param size Image width and height +@param depth Image depth (see cvCreateImage ) +@param channels Number of channels (see cvCreateImage ) +@param origin Top-left IPL_ORIGIN_TL or bottom-left IPL_ORIGIN_BL +@param align Alignment for image rows, typically 4 or 8 bytes + */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/** @brief Creates an image header and allocates the image data. + +This function call is equivalent to the following code: +@code + header = cvCreateImageHeader(size, depth, channels); + cvCreateData(header); +@endcode +@param size Image width and height +@param depth Bit depth of image elements. See IplImage for valid depths. +@param channels Number of channels per pixel. See IplImage for details. This function only creates +images with interleaved channels. + */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/** @brief Deallocates an image header. + +This call is an analogue of : +@code + if(image ) + { + iplDeallocate(*image, IPL_IMAGE_HEADER | IPL_IMAGE_ROI); + *image = 0; + } +@endcode +but it does not use IPL functions by default (see the CV_TURN_ON_IPL_COMPATIBILITY macro). +@param image Double pointer to the image header + */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/** @brief Deallocates the image header and the image data. + +This call is a shortened form of : +@code + if(*image ) + { + cvReleaseData(*image); + cvReleaseImageHeader(image); + } +@endcode +@param image Double pointer to the image header +*/ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/** Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/** @brief Sets the channel of interest in an IplImage. + +If the ROI is set to NULL and the coi is *not* 0, the ROI is allocated. Most OpenCV functions do +*not* support the COI setting, so to process an individual image/matrix channel one may copy (via +cvCopy or cvSplit) the channel to a separate image/matrix, process it and then copy the result +back (via cvCopy or cvMerge) if needed. +@param image A pointer to the image header +@param coi The channel of interest. 0 - all channels are selected, 1 - first channel is selected, +etc. Note that the channel indices become 1-based. + */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/** @brief Returns the index of the channel of interest. + +Returns the channel of interest of in an IplImage. Returned values correspond to the coi in +cvSetImageCOI. +@param image A pointer to the image header + */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/** @brief Sets an image Region Of Interest (ROI) for a given rectangle. + +If the original image ROI was NULL and the rect is not the whole image, the ROI structure is +allocated. + +Most OpenCV functions support the use of ROI and treat the image rectangle as a separate image. For +example, all of the pixel coordinates are counted from the top-left (or bottom-left) corner of the +ROI, not the original image. +@param image A pointer to the image header +@param rect The ROI rectangle + */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/** @brief Resets the image ROI to include the entire image and releases the ROI structure. + +This produces a similar result to the following, but in addition it releases the ROI structure. : +@code + cvSetImageROI(image, cvRect(0, 0, image->width, image->height )); + cvSetImageCOI(image, 0); +@endcode +@param image A pointer to the image header + */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/** @brief Returns the image ROI. + +If there is no ROI set, cvRect(0,0,image-\>width,image-\>height) is returned. +@param image A pointer to the image header + */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/** @brief Creates a matrix header but does not allocate the matrix data. + +The function allocates a new matrix header and returns a pointer to it. The matrix data can then be +allocated using cvCreateData or set explicitly to user-allocated data via cvSetData. +@param rows Number of rows in the matrix +@param cols Number of columns in the matrix +@param type Type of the matrix elements, see cvCreateMat + */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/** @brief Initializes a pre-allocated matrix header. + +This function is often used to process raw data with OpenCV matrix functions. For example, the +following code computes the matrix product of two matrices, stored as ordinary arrays: +@code + double a[] = { 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12 }; + + double b[] = { 1, 5, 9, + 2, 6, 10, + 3, 7, 11, + 4, 8, 12 }; + + double c[9]; + CvMat Ma, Mb, Mc ; + + cvInitMatHeader(&Ma, 3, 4, CV_64FC1, a); + cvInitMatHeader(&Mb, 4, 3, CV_64FC1, b); + cvInitMatHeader(&Mc, 3, 3, CV_64FC1, c); + + cvMatMulAdd(&Ma, &Mb, 0, &Mc); + // the c array now contains the product of a (3x4) and b (4x3) +@endcode +@param mat A pointer to the matrix header to be initialized +@param rows Number of rows in the matrix +@param cols Number of columns in the matrix +@param type Type of the matrix elements, see cvCreateMat . +@param data Optional: data pointer assigned to the matrix header +@param step Optional: full row width in bytes of the assigned data. By default, the minimal +possible step is used which assumes there are no gaps between subsequent rows of the matrix. + */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/** @brief Creates a matrix header and allocates the matrix data. + +The function call is equivalent to the following code: +@code + CvMat* mat = cvCreateMatHeader(rows, cols, type); + cvCreateData(mat); +@endcode +@param rows Number of rows in the matrix +@param cols Number of columns in the matrix +@param type The type of the matrix elements in the form +CV_\\C\ , where S=signed, U=unsigned, F=float. For +example, CV _ 8UC1 means the elements are 8-bit unsigned and the there is 1 channel, and CV _ +32SC2 means the elements are 32-bit signed and there are 2 channels. + */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/** @brief Deallocates a matrix. + +The function decrements the matrix data reference counter and deallocates matrix header. If the data +reference counter is 0, it also deallocates the data. : +@code + if(*mat ) + cvDecRefData(*mat); + cvFree((void**)mat); +@endcode +@param mat Double pointer to the matrix + */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/** @brief Decrements an array data reference counter. + +The function decrements the data reference counter in a CvMat or CvMatND if the reference counter + +pointer is not NULL. If the counter reaches zero, the data is deallocated. In the current +implementation the reference counter is not NULL only if the data was allocated using the +cvCreateData function. The counter will be NULL in other cases such as: external data was assigned +to the header using cvSetData, header is part of a larger matrix or image, or the header was +converted from an image or n-dimensional matrix header. +@param arr Pointer to an array header + */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/** @brief Increments array data reference counter. + +The function increments CvMat or CvMatND data reference counter and returns the new counter value if +the reference counter pointer is not NULL, otherwise it returns zero. +@param arr Array header + */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/** Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/** @brief Returns matrix header corresponding to the rectangular sub-array of input image or matrix. + +The function returns header, corresponding to a specified rectangle of the input array. In other + +words, it allows the user to treat a rectangular part of input array as a stand-alone array. ROI is +taken into account by the function so the sub-array of ROI is actually extracted. +@param arr Input array +@param submat Pointer to the resultant sub-array header +@param rect Zero-based coordinates of the rectangle of interest + */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/** @brief Returns array row or row span. + +The function returns the header, corresponding to a specified row/row span of the input array. +cvGetRow(arr, submat, row) is a shortcut for cvGetRows(arr, submat, row, row+1). +@param arr Input array +@param submat Pointer to the resulting sub-array header +@param start_row Zero-based index of the starting row (inclusive) of the span +@param end_row Zero-based index of the ending row (exclusive) of the span +@param delta_row Index step in the row span. That is, the function extracts every delta_row -th +row from start_row and up to (but not including) end_row . + */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +/** @overload +@param arr Input array +@param submat Pointer to the resulting sub-array header +@param row Zero-based index of the selected row +*/ +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/** @brief Returns one of more array columns. + +The function returns the header, corresponding to a specified column span of the input array. That + +is, no data is copied. Therefore, any modifications of the submatrix will affect the original array. +If you need to copy the columns, use cvCloneMat. cvGetCol(arr, submat, col) is a shortcut for +cvGetCols(arr, submat, col, col+1). +@param arr Input array +@param submat Pointer to the resulting sub-array header +@param start_col Zero-based index of the starting column (inclusive) of the span +@param end_col Zero-based index of the ending column (exclusive) of the span + */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +/** @overload +@param arr Input array +@param submat Pointer to the resulting sub-array header +@param col Zero-based index of the selected column +*/ +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/** @brief Returns one of array diagonals. + +The function returns the header, corresponding to a specified diagonal of the input array. +@param arr Input array +@param submat Pointer to the resulting sub-array header +@param diag Index of the array diagonal. Zero value corresponds to the main diagonal, -1 +corresponds to the diagonal above the main, 1 corresponds to the diagonal below the main, and so +forth. + */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/** low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/** @brief Creates a new matrix header but does not allocate the matrix data. + +The function allocates a header for a multi-dimensional dense array. The array data can further be +allocated using cvCreateData or set explicitly to user-allocated data via cvSetData. +@param dims Number of array dimensions +@param sizes Array of dimension sizes +@param type Type of array elements, see cvCreateMat + */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/** @brief Creates the header and allocates the data for a multi-dimensional dense array. + +This function call is equivalent to the following code: +@code + CvMatND* mat = cvCreateMatNDHeader(dims, sizes, type); + cvCreateData(mat); +@endcode +@param dims Number of array dimensions. This must not exceed CV_MAX_DIM (32 by default, but can be +changed at build time). +@param sizes Array of dimension sizes. +@param type Type of array elements, see cvCreateMat . + */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/** @brief Initializes a pre-allocated multi-dimensional array header. + +@param mat A pointer to the array header to be initialized +@param dims The number of array dimensions +@param sizes An array of dimension sizes +@param type Type of array elements, see cvCreateMat +@param data Optional data pointer assigned to the matrix header + */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/** @brief Deallocates a multi-dimensional array. + +The function decrements the array data reference counter and releases the array header. If the +reference counter reaches 0, it also deallocates the data. : +@code + if(*mat ) + cvDecRefData(*mat); + cvFree((void**)mat); +@endcode +@param mat Double pointer to the array + */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/** Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/** @brief Creates sparse array. + +The function allocates a multi-dimensional sparse array. Initially the array contain no elements, +that is PtrND and other related functions will return 0 for every index. +@param dims Number of array dimensions. In contrast to the dense matrix, the number of dimensions is +practically unlimited (up to \f$2^{16}\f$ ). +@param sizes Array of dimension sizes +@param type Type of array elements. The same as for CvMat + */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/** @brief Deallocates sparse array. + +The function releases the sparse array and clears the array pointer upon exit. +@param mat Double pointer to the array + */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/** Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/** @brief Initializes sparse array elements iterator. + +The function initializes iterator of sparse array elements and returns pointer to the first element, +or NULL if the array is empty. +@param mat Input array +@param mat_iterator Initialized iterator + */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +/** @brief Returns the next sparse matrix element + +The function moves iterator to the next sparse matrix element and returns pointer to it. In the +current version there is no any particular order of the elements, because they are stored in the +hash table. The sample below demonstrates how to iterate through the sparse matrix: +@code + // print all the non-zero sparse matrix elements and compute their sum + double sum = 0; + int i, dims = cvGetDims(sparsemat); + CvSparseMatIterator it; + CvSparseNode* node = cvInitSparseMatIterator(sparsemat, &it); + + for(; node != 0; node = cvGetNextSparseNode(&it)) + { + int* idx = CV_NODE_IDX(array, node); + float val = *(float*)CV_NODE_VAL(array, node); + printf("M"); + for(i = 0; i < dims; i++ ) + printf("[%d]", idx[i]); + printf("=%g\n", val); + + sum += val; + } + + printf("nTotal sum = %g\n", sum); +@endcode +@param mat_iterator Sparse array iterator + */ +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + + +#define CV_MAX_ARR 10 + +/** matrix iterator: used for n-ary operations on dense arrays */ +typedef struct CvNArrayIterator +{ + int count; /**< number of arrays */ + int dims; /**< number of dimensions to iterate */ + CvSize size; /**< maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /**< pointers to the array slices */ + int stack[CV_MAX_DIM]; /**< for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /**< pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/** initializes iterator that traverses through several arrays simultaneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/** returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/** @brief Returns type of array elements. + +The function returns type of the array elements. In the case of IplImage the type is converted to +CvMat-like representation. For example, if the image has been created as: +@code + IplImage* img = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3); +@endcode +The code cvGetElemType(img) will return CV_8UC3. +@param arr Input array + */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/** @brief Return number of array dimensions + +The function returns the array dimensionality and the array of dimension sizes. In the case of +IplImage or CvMat it always returns 2 regardless of number of image/matrix rows. For example, the +following code calculates total number of array elements: +@code + int sizes[CV_MAX_DIM]; + int i, total = 1; + int dims = cvGetDims(arr, size); + for(i = 0; i < dims; i++ ) + total *= sizes[i]; +@endcode +@param arr Input array +@param sizes Optional output vector of the array dimension sizes. For 2d arrays the number of rows +(height) goes first, number of columns (width) next. + */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/** @brief Returns array size along the specified dimension. + +@param arr Input array +@param index Zero-based dimension index (for matrices 0 means number of rows, 1 means number of +columns; for images 0 means height, 1 means width) + */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/** @brief Return pointer to a particular array element. + +The functions return a pointer to a specific array element. Number of array dimension should match +to the number of indices passed to the function except for cvPtr1D function that can be used for +sequential access to 1D, 2D or nD dense arrays. + +The functions can be used for sparse arrays as well - if the requested node does not exist they +create it and set it to zero. + +All these as well as other functions accessing array elements ( cvGetND , cvGetRealND , cvSet +, cvSetND , cvSetRealND ) raise an error in case if the element index is out of range. +@param arr Input array +@param idx0 The first zero-based component of the element index +@param type Optional output parameter: type of matrix elements + */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +/** @overload */ +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +/** @overload */ +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); +/** @overload +@param arr Input array +@param idx Array of the element indices +@param type Optional output parameter: type of matrix elements +@param create_node Optional input parameter for sparse matrices. Non-zero value of the parameter +means that the requested element is created if it does not exist already. +@param precalc_hashval Optional input parameter for sparse matrices. If the pointer is not NULL, +the function does not recalculate the node hash value, but takes it from the specified location. +It is useful for speeding up pair-wise operations (TODO: provide an example) +*/ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/** @brief Return a specific array element. + +The functions return a specific array element. In the case of a sparse array the functions return 0 +if the requested node does not exist (no new node is created by the functions). +@param arr Input array +@param idx0 The first zero-based component of the element index + */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +/** @overload */ +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +/** @overload */ +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +/** @overload +@param arr Input array +@param idx Array of the element indices +*/ +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/** @brief Return a specific element of single-channel 1D, 2D, 3D or nD array. + +Returns a specific element of a single-channel array. If the array has multiple channels, a runtime +error is raised. Note that Get?D functions can be used safely for both single-channel and +multiple-channel arrays though they are a bit slower. + +In the case of a sparse array the functions return 0 if the requested node does not exist (no new +node is created by the functions). +@param arr Input array. Must have a single channel. +@param idx0 The first zero-based component of the element index + */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +/** @overload */ +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +/** @overload */ +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +/** @overload +@param arr Input array. Must have a single channel. +@param idx Array of the element indices +*/ +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/** @brief Change the particular array element. + +The functions assign the new value to a particular array element. In the case of a sparse array the +functions create the node if it does not exist yet. +@param arr Input array +@param idx0 The first zero-based component of the element index +@param value The assigned value + */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +/** @overload */ +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +/** @overload */ +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +/** @overload +@param arr Input array +@param idx Array of the element indices +@param value The assigned value +*/ +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/** @brief Change a specific array element. + +The functions assign a new value to a specific element of a single-channel array. If the array has +multiple channels, a runtime error is raised. Note that the Set\*D function can be used safely for +both single-channel and multiple-channel arrays, though they are a bit slower. + +In the case of a sparse array the functions create the node if it does not yet exist. +@param arr Input array +@param idx0 The first zero-based component of the element index +@param value The assigned value + */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +/** @overload */ +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +/** @overload */ +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +/** @overload +@param arr Input array +@param idx Array of the element indices +@param value The assigned value +*/ +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/** clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/** @brief Returns matrix header for arbitrary array. + +The function returns a matrix header for the input array that can be a matrix - CvMat, an image - +IplImage, or a multi-dimensional dense array - CvMatND (the third option is allowed only if +allowND != 0) . In the case of matrix the function simply returns the input pointer. In the case of +IplImage\* or CvMatND it initializes the header structure with parameters of the current image ROI +and returns &header. Because COI is not supported by CvMat, it is returned separately. + +The function provides an easy way to handle both types of arrays - IplImage and CvMat using the same +code. Input array must have non-zero data pointer, otherwise the function will report an error. + +@note If the input array is IplImage with planar data layout and COI set, the function returns the +pointer to the selected plane and COI == 0. This feature allows user to process IplImage structures +with planar data layout, even though OpenCV does not support such images. +@param arr Input array +@param header Pointer to CvMat structure used as a temporary buffer +@param coi Optional output parameter for storing COI +@param allowND If non-zero, the function accepts multi-dimensional dense arrays (CvMatND\*) and +returns 2D matrix (if CvMatND has two dimensions) or 1D matrix (when CvMatND has 1 dimension or +more than 2 dimensions). The CvMatND array must be continuous. +@sa cvGetImage, cvarrToMat. + */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/** @brief Returns image header for arbitrary array. + +The function returns the image header for the input array that can be a matrix (CvMat) or image +(IplImage). In the case of an image the function simply returns the input pointer. In the case of +CvMat it initializes an image_header structure with the parameters of the input matrix. Note that +if we transform IplImage to CvMat using cvGetMat and then transform CvMat back to IplImage using +this function, we will get different headers if the ROI is set in the original image. +@param arr Input array +@param image_header Pointer to IplImage structure used as a temporary buffer + */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/** @brief Changes the shape of a multi-dimensional array without copying the data. + +The function is an advanced version of cvReshape that can work with multi-dimensional arrays as +well (though it can work with ordinary images and matrices) and change the number of dimensions. + +Below are the two samples from the cvReshape description rewritten using cvReshapeMatND: +@code + IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3); + IplImage gray_img_hdr, *gray_img; + gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0); + ... + int size[] = { 2, 2, 2 }; + CvMatND* mat = cvCreateMatND(3, size, CV_32F); + CvMat row_header, *row; + row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0); +@endcode +In C, the header file for this function includes a convenient macro cvReshapeND that does away with +the sizeof_header parameter. So, the lines containing the call to cvReshapeMatND in the examples +may be replaced as follow: +@code + gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0); + ... + row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0); +@endcode +@param arr Input array +@param sizeof_header Size of output header to distinguish between IplImage, CvMat and CvMatND +output headers +@param header Output header to be filled +@param new_cn New number of channels. new_cn = 0 means that the number of channels remains +unchanged. +@param new_dims New number of dimensions. new_dims = 0 means that the number of dimensions +remains the same. +@param new_sizes Array of new dimension sizes. Only new_dims-1 values are used, because the +total number of elements must remain the same. Thus, if new_dims = 1, new_sizes array is not +used. + */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +/** @brief Changes shape of matrix/image without copying data. + +The function initializes the CvMat header so that it points to the same data as the original array +but has a different shape - different number of channels, different number of rows, or both. + +The following example code creates one image buffer and two image headers, the first is for a +320x240x3 image and the second is for a 960x240x1 image: +@code + IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3); + CvMat gray_mat_hdr; + IplImage gray_img_hdr, *gray_img; + cvReshape(color_img, &gray_mat_hdr, 1); + gray_img = cvGetImage(&gray_mat_hdr, &gray_img_hdr); +@endcode +And the next example converts a 3x3 matrix to a single 1x9 vector: +@code + CvMat* mat = cvCreateMat(3, 3, CV_32F); + CvMat row_header, *row; + row = cvReshape(mat, &row_header, 0, 1); +@endcode +@param arr Input array +@param header Output header to be filled +@param new_cn New number of channels. 'new_cn = 0' means that the number of channels remains +unchanged. +@param new_rows New number of rows. 'new_rows = 0' means that the number of rows remains +unchanged unless it needs to be changed according to new_cn value. +*/ +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/** Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/** @brief Allocates array data + +The function allocates image, matrix or multi-dimensional dense array data. Note that in the case of +matrix types OpenCV allocation functions are used. In the case of IplImage they are used unless +CV_TURN_ON_IPL_COMPATIBILITY() has been called before. In the latter case IPL functions are used +to allocate the data. +@param arr Array header + */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/** @brief Releases array data. + +The function releases the array data. In the case of CvMat or CvMatND it simply calls +cvDecRefData(), that is the function can not deallocate external data. See also the note to +cvCreateData . +@param arr Array header + */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/** @brief Assigns user data to the array header. + +The function assigns user data to the array header. Header should be initialized before using +cvCreateMatHeader, cvCreateImageHeader, cvCreateMatNDHeader, cvInitMatHeader, +cvInitImageHeader or cvInitMatNDHeader. +@param arr Array header +@param data User data +@param step Full row length in bytes + */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/** @brief Retrieves low-level information about the array. + +The function fills output variables with low-level information about the array data. All output + +parameters are optional, so some of the pointers may be set to NULL. If the array is IplImage with +ROI set, the parameters of ROI are returned. + +The following example shows how to get access to array elements. It computes absolute values of the +array elements : +@code + float* data; + int step; + CvSize size; + + cvGetRawData(array, (uchar**)&data, &step, &size); + step /= sizeof(data[0]); + + for(int y = 0; y < size.height; y++, data += step ) + for(int x = 0; x < size.width; x++ ) + data[x] = (float)fabs(data[x]); +@endcode +@param arr Array header +@param data Output pointer to the whole image origin or ROI origin if ROI is set +@param step Output full row length in bytes +@param roi_size Output ROI size + */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/** @brief Returns size of matrix or image ROI. + +The function returns number of rows (CvSize::height) and number of columns (CvSize::width) of the +input matrix or image. In the case of image the size of ROI is returned. +@param arr array header + */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/** @brief Copies one array to another. + +The function copies selected elements from an input array to an output array: + +\f[\texttt{dst} (I)= \texttt{src} (I) \quad \text{if} \quad \texttt{mask} (I) \ne 0.\f] + +If any of the passed arrays is of IplImage type, then its ROI and COI fields are used. Both arrays +must have the same type, the same number of dimensions, and the same size. The function can also +copy sparse arrays (mask is not supported in this case). +@param src The source array +@param dst The destination array +@param mask Operation mask, 8-bit single channel array; specifies elements of the destination array +to be changed + */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @brief Sets every element of an array to a given value. + +The function copies the scalar value to every selected element of the destination array: +\f[\texttt{arr} (I)= \texttt{value} \quad \text{if} \quad \texttt{mask} (I) \ne 0\f] +If array arr is of IplImage type, then is ROI used, but COI must not be set. +@param arr The destination array +@param value Fill value +@param mask Operation mask, 8-bit single channel array; specifies elements of the destination +array to be changed + */ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @brief Clears the array. + +The function clears the array. In the case of dense arrays (CvMat, CvMatND or IplImage), +cvZero(array) is equivalent to cvSet(array,cvScalarAll(0),0). In the case of sparse arrays all the +elements are removed. +@param arr Array to be cleared + */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/** Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/** Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/** Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/** @brief Converts one array to another with optional linear transformation. + +The function has several different purposes, and thus has several different names. It copies one +array to another with optional scaling, which is performed first, and/or optional type conversion, +performed after: + +\f[\texttt{dst} (I) = \texttt{scale} \texttt{src} (I) + ( \texttt{shift} _0, \texttt{shift} _1,...)\f] + +All the channels of multi-channel arrays are processed independently. + +The type of conversion is done with rounding and saturation, that is if the result of scaling + +conversion can not be represented exactly by a value of the destination array element type, it is +set to the nearest representable value on the real axis. +@param src Source array +@param dst Destination array +@param scale Scale factor +@param shift Value added to the scaled source array elements + */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/** Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/** checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/** dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/** dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/** element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/** dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/** dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/** @brief Calculates the dot product of two arrays in Euclidean metrics. + +The function calculates and returns the Euclidean dot product of two arrays. + +\f[src1 \bullet src2 = \sum _I ( \texttt{src1} (I) \texttt{src2} (I))\f] + +In the case of multiple channel arrays, the results for all channels are accumulated. In particular, +cvDotProduct(a,a) where a is a complex vector, will return \f$||\texttt{a}||^2\f$. The function can +process multi-dimensional arrays, row by row, layer by layer, and so on. +@param src1 The first source array +@param src2 The second source array + */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/** dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/** dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/** dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/** dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/** The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/** dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/** dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/** dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/** dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/** dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/** dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/** dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/** dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/** Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/** Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/** Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/** Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/** Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/** Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/** Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +/** Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 + +/** @brief Fills an array with random numbers and updates the RNG state. + +The function fills the destination array with uniformly or normally distributed random numbers. +@param rng CvRNG state initialized by cvRNG +@param arr The destination array +@param dist_type Distribution type +> - **CV_RAND_UNI** uniform distribution +> - **CV_RAND_NORMAL** normal or Gaussian distribution +@param param1 The first parameter of the distribution. In the case of a uniform distribution it is +the inclusive lower boundary of the random numbers range. In the case of a normal distribution it +is the mean value of the random numbers. +@param param2 The second parameter of the distribution. In the case of a uniform distribution it +is the exclusive upper boundary of the random numbers range. In the case of a normal distribution +it is the standard deviation of the random numbers. +@sa randu, randn, RNG::fill. + */ +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/** Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/** Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/** @brief Calculates the cross product of two 3D vectors. + +The function calculates the cross product of two 3D vectors: +\f[\texttt{dst} = \texttt{src1} \times \texttt{src2}\f] +or: +\f[\begin{array}{l} \texttt{dst} _1 = \texttt{src1} _2 \texttt{src2} _3 - \texttt{src1} _3 \texttt{src2} _2 \\ \texttt{dst} _2 = \texttt{src1} _3 \texttt{src2} _1 - \texttt{src1} _1 \texttt{src2} _3 \\ \texttt{dst} _3 = \texttt{src1} _1 \texttt{src2} _2 - \texttt{src1} _2 \texttt{src2} _1 \end{array}\f] +@param src1 The first source vector +@param src2 The second source vector +@param dst The destination vector + */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/** Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/** Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/** Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/** Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/** Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/** Transposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/** Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/** Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/** Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/** Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/** Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/** Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/** Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/** Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/** Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/** Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/** Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/** @anchor core_c_CovarFlags +@name Flags for cvCalcCovarMatrix +@see cvCalcCovarMatrix + @{ +*/ + +/** flag for cvCalcCovarMatrix, transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/** flag for cvCalcCovarMatrix, [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/** flag for cvCalcCovarMatrix, do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/** flag for cvCalcCovarMatrix, scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/** flag for cvCalcCovarMatrix, all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +/** @} */ + +/** Calculates covariation matrix for a set of vectors +@see @ref core_c_CovarFlags "flags" +*/ +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/** Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/** Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/** Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/** Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/** Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @anchor core_c_NormFlags + @name Flags for cvNorm and cvNormalize + @{ +*/ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) +/** @} */ + +/** Finds norm, difference norm or relative difference norm for an array (or two arrays) +@see ref core_c_NormFlags "flags" +*/ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @see ref core_c_NormFlags "flags" */ +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @anchor core_c_ReduceFlags + @name Flags for cvReduce + @{ +*/ +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 +/** @} */ + +/** @see @ref core_c_ReduceFlags "flags" */ +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +/** @anchor core_c_DftFlags + @name Flags for cvDFT, cvDCT and cvMulSpectrums + @{ + */ +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /**< divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /**< transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /**< conjugate the second argument of cvMulSpectrums */ +/** @} */ + +/** Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) +@see core_c_DftFlags "flags" +*/ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/** Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) +@see core_c_DftFlags "flags" +*/ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/** Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/** Discrete Cosine Transform +@see core_c_DftFlags "flags" +*/ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/** Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/** Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/** Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/** Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/** Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/** Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/** Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/** Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/** Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/** Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size, + size_t elem_size, CvMemStorage* storage ); + +/** Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/** Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/** Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/** Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/** Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/** Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/** Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/** Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/** Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/** Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/** Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/** Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/** Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/** Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/** Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/** Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/** Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/** Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/** Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/** Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/** Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/** Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/** Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/** Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/** a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/** Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/** Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/** Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/** Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/** Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/** Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/** Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, &elem ); + return elem; +} + +/** Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/** Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/** Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx ) +{ + CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/** Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/** Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/** Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/** Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/** Link two vertices specified by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/** Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/** Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/** Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/** Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/** Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/** Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/** Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/** flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/** Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/** Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/** Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/** Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + + +/** Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/** Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/** Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/** Gathers pointers to all the sequences, + accessible from the `first`, to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/** The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/** Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/** @brief Makes OpenCV use IPL functions for allocating IplImage and IplROI structures. + +Normally, the function is not called directly. Instead, a simple macro +CV_TURN_ON_IPL_COMPATIBILITY() is used that calls cvSetIPLAllocators and passes there pointers +to IPL allocation functions. : +@code + ... + CV_TURN_ON_IPL_COMPATIBILITY() + ... +@endcode +@param create_header pointer to a function, creating IPL image header. +@param allocate_data pointer to a function, allocating IPL image data. +@param deallocate pointer to a function, deallocating IPL image. +@param create_roi pointer to a function, creating IPL image ROI (i.e. Region of Interest). +@param clone_image pointer to a function, cloning an IPL image. + */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/** @brief Opens file storage for reading or writing data. + +The function opens file storage for reading or writing data. In the latter case, a new file is +created or an existing file is rewritten. The type of the read or written file is determined by the +filename extension: .xml for XML, .yml or .yaml for YAML and .json for JSON. + +At the same time, it also supports adding parameters like "example.xml?base64". + +The function returns a pointer to the CvFileStorage structure. +If the file cannot be opened then the function returns NULL. +@param filename Name of the file associated with the storage +@param memstorage Memory storage used for temporary data and for +: storing dynamic structures, such as CvSeq or CvGraph . If it is NULL, a temporary memory + storage is created and used. +@param flags Can be one of the following: +> - **CV_STORAGE_READ** the storage is open for reading +> - **CV_STORAGE_WRITE** the storage is open for writing + (use **CV_STORAGE_WRITE | CV_STORAGE_WRITE_BASE64** to write rawdata in Base64) +@param encoding + */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/** @brief Releases file storage. + +The function closes the file associated with the storage and releases all the temporary structures. +It must be called after all I/O operations with the storage are finished. +@param fs Double pointer to the released file storage + */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/** returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/** @brief Starts writing a new structure. + +The function starts writing a compound structure (collection) that can be a sequence or a map. After +all the structure fields, which can be scalars or structures, are written, cvEndWriteStruct should +be called. The function can be used to group some objects or to implement the write function for a +some user object (see CvTypeInfo). +@param fs File storage +@param name Name of the written structure. The structure can be accessed by this name when the +storage is read. +@param struct_flags A combination one of the following values: +- **CV_NODE_SEQ** the written structure is a sequence (see discussion of CvFileStorage ), + that is, its elements do not have a name. +- **CV_NODE_MAP** the written structure is a map (see discussion of CvFileStorage ), that + is, all its elements have names. +One and only one of the two above flags must be specified +- **CV_NODE_FLOW** the optional flag that makes sense only for YAML streams. It means that + the structure is written as a flow (not as a block), which is more compact. It is + recommended to use this flag for structures or arrays whose elements are all scalars. +@param type_name Optional parameter - the object type name. In + case of XML it is written as a type_id attribute of the structure opening tag. In the case of + YAML it is written after a colon following the structure name (see the example in + CvFileStorage description). In case of JSON it is written as a name/value pair. + Mainly it is used with user objects. When the storage is read, the + encoded type name is used to determine the object type (see CvTypeInfo and cvFindType ). +@param attributes This parameter is not used in the current implementation + */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/** @brief Finishes writing to a file node collection. +@param fs File storage +@sa cvStartWriteStruct. + */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/** @brief Writes an integer value. + +The function writes a single integer value (with or without a name) to the file storage. +@param fs File storage +@param name Name of the written value. Should be NULL if and only if the parent structure is a +sequence. +@param value The written value + */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/** @brief Writes a floating-point value. + +The function writes a single floating-point value (with or without a name) to file storage. Special +values are encoded as follows: NaN (Not A Number) as .NaN, infinity as +.Inf or -.Inf. + +The following example shows how to use the low-level writing functions to store custom structures, +such as termination criteria, without registering a new type. : +@code + void write_termcriteria( CvFileStorage* fs, const char* struct_name, + CvTermCriteria* termcrit ) + { + cvStartWriteStruct( fs, struct_name, CV_NODE_MAP, NULL, cvAttrList(0,0)); + cvWriteComment( fs, "termination criteria", 1 ); // just a description + if( termcrit->type & CV_TERMCRIT_ITER ) + cvWriteInteger( fs, "max_iterations", termcrit->max_iter ); + if( termcrit->type & CV_TERMCRIT_EPS ) + cvWriteReal( fs, "accuracy", termcrit->epsilon ); + cvEndWriteStruct( fs ); + } +@endcode +@param fs File storage +@param name Name of the written value. Should be NULL if and only if the parent structure is a +sequence. +@param value The written value +*/ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/** @brief Writes a text string. + +The function writes a text string to file storage. +@param fs File storage +@param name Name of the written string . Should be NULL if and only if the parent structure is a +sequence. +@param str The written text string +@param quote If non-zero, the written string is put in quotes, regardless of whether they are +required. Otherwise, if the flag is zero, quotes are used only when they are required (e.g. when +the string starts with a digit or contains spaces). + */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/** @brief Writes a comment. + +The function writes a comment into file storage. The comments are skipped when the storage is read. +@param fs File storage +@param comment The written comment, single-line or multi-line +@param eol_comment If non-zero, the function tries to put the comment at the end of current line. +If the flag is zero, if the comment is multi-line, or if it does not fit at the end of the current +line, the comment starts a new line. + */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/** @brief Writes an object to file storage. + +The function writes an object to file storage. First, the appropriate type info is found using +cvTypeOf. Then, the write method associated with the type info is called. + +Attributes are used to customize the writing procedure. The standard types support the following +attributes (all the dt attributes have the same format as in cvWriteRawData): + +-# CvSeq + - **header_dt** description of user fields of the sequence header that follow CvSeq, or + CvChain (if the sequence is a Freeman chain) or CvContour (if the sequence is a contour or + point sequence) + - **dt** description of the sequence elements. + - **recursive** if the attribute is present and is not equal to "0" or "false", the whole + tree of sequences (contours) is stored. +-# CvGraph + - **header_dt** description of user fields of the graph header that follows CvGraph; + - **vertex_dt** description of user fields of graph vertices + - **edge_dt** description of user fields of graph edges (note that the edge weight is + always written, so there is no need to specify it explicitly) + +Below is the code that creates the YAML file shown in the CvFileStorage description: +@code + #include "cxcore.h" + + int main( int argc, char** argv ) + { + CvMat* mat = cvCreateMat( 3, 3, CV_32F ); + CvFileStorage* fs = cvOpenFileStorage( "example.yml", 0, CV_STORAGE_WRITE ); + + cvSetIdentity( mat ); + cvWrite( fs, "A", mat, cvAttrList(0,0) ); + + cvReleaseFileStorage( &fs ); + cvReleaseMat( &mat ); + return 0; + } +@endcode +@param fs File storage +@param name Name of the written object. Should be NULL if and only if the parent structure is a +sequence. +@param ptr Pointer to the object +@param attributes The attributes of the object. They are specific for each particular type (see +the discussion below). + */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/** @brief Starts the next stream. + +The function finishes the currently written stream and starts the next stream. In the case of XML +the file with multiple streams looks like this: +@code{.xml} + + + + + + + ... +@endcode +The YAML file will look like this: +@code{.yaml} + %YAML 1.0 + # stream #1 data + ... + --- + # stream #2 data +@endcode +This is useful for concatenating files or for resuming the writing process. +@param fs File storage + */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/** @brief Writes multiple numbers. + +The function writes an array, whose elements consist of single or multiple numbers. The function +call can be replaced with a loop containing a few cvWriteInt and cvWriteReal calls, but a single +call is more efficient. Note that because none of the elements have a name, they should be written +to a sequence rather than a map. +@param fs File storage +@param src Pointer to the written array +@param len Number of the array elements to write +@param dt Specification of each array element, see @ref format_spec "format specification" + */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/** @brief Writes multiple numbers in Base64. + +If either CV_STORAGE_WRITE_BASE64 or cv::FileStorage::WRITE_BASE64 is used, +this function will be the same as cvWriteRawData. If neither, the main +difference is that it outputs a sequence in Base64 encoding rather than +in plain text. + +This function can only be used to write a sequence with a type "binary". + +@param fs File storage +@param src Pointer to the written array +@param len Number of the array elements to write +@param dt Specification of each array element, see @ref format_spec "format specification" +*/ +CVAPI(void) cvWriteRawDataBase64( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/** @brief Returns a unique pointer for a given name. + +The function returns a unique pointer for each particular file node name. This pointer can be then +passed to the cvGetFileNode function that is faster than cvGetFileNodeByName because it compares +text strings by comparing pointers rather than the strings' content. + +Consider the following example where an array of points is encoded as a sequence of 2-entry maps: +@code + points: + - { x: 10, y: 10 } + - { x: 20, y: 20 } + - { x: 30, y: 30 } + # ... +@endcode +Then, it is possible to get hashed "x" and "y" pointers to speed up decoding of the points. : +@code + #include "cxcore.h" + + int main( int argc, char** argv ) + { + CvFileStorage* fs = cvOpenFileStorage( "points.yml", 0, CV_STORAGE_READ ); + CvStringHashNode* x_key = cvGetHashedNode( fs, "x", -1, 1 ); + CvStringHashNode* y_key = cvGetHashedNode( fs, "y", -1, 1 ); + CvFileNode* points = cvGetFileNodeByName( fs, 0, "points" ); + + if( CV_NODE_IS_SEQ(points->tag) ) + { + CvSeq* seq = points->data.seq; + int i, total = seq->total; + CvSeqReader reader; + cvStartReadSeq( seq, &reader, 0 ); + for( i = 0; i < total; i++ ) + { + CvFileNode* pt = (CvFileNode*)reader.ptr; + #if 1 // faster variant + CvFileNode* xnode = cvGetFileNode( fs, pt, x_key, 0 ); + CvFileNode* ynode = cvGetFileNode( fs, pt, y_key, 0 ); + assert( xnode && CV_NODE_IS_INT(xnode->tag) && + ynode && CV_NODE_IS_INT(ynode->tag)); + int x = xnode->data.i; // or x = cvReadInt( xnode, 0 ); + int y = ynode->data.i; // or y = cvReadInt( ynode, 0 ); + #elif 1 // slower variant; does not use x_key & y_key + CvFileNode* xnode = cvGetFileNodeByName( fs, pt, "x" ); + CvFileNode* ynode = cvGetFileNodeByName( fs, pt, "y" ); + assert( xnode && CV_NODE_IS_INT(xnode->tag) && + ynode && CV_NODE_IS_INT(ynode->tag)); + int x = xnode->data.i; // or x = cvReadInt( xnode, 0 ); + int y = ynode->data.i; // or y = cvReadInt( ynode, 0 ); + #else // the slowest yet the easiest to use variant + int x = cvReadIntByName( fs, pt, "x", 0 ); + int y = cvReadIntByName( fs, pt, "y", 0 ); + #endif + CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); + printf(" + } + } + cvReleaseFileStorage( &fs ); + return 0; + } +@endcode +Please note that whatever method of accessing a map you are using, it is still much slower than +using plain sequences; for example, in the above example, it is more efficient to encode the points +as pairs of integers in a single numeric sequence. +@param fs File storage +@param name Literal node name +@param len Length of the name (if it is known apriori), or -1 if it needs to be calculated +@param create_missing Flag that specifies, whether an absent key should be added into the hash table +*/ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/** @brief Retrieves one of the top-level nodes of the file storage. + +The function returns one of the top-level file nodes. The top-level nodes do not have a name, they +correspond to the streams that are stored one after another in the file storage. If the index is out +of range, the function returns a NULL pointer, so all the top-level nodes can be iterated by +subsequent calls to the function with stream_index=0,1,..., until the NULL pointer is returned. +This function can be used as a base for recursive traversal of the file storage. +@param fs File storage +@param stream_index Zero-based index of the stream. See cvStartNextStream . In most cases, +there is only one stream in the file; however, there can be several. + */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/** @brief Finds a node in a map or file storage. + +The function finds a file node. It is a faster version of cvGetFileNodeByName (see +cvGetHashedKey discussion). Also, the function can insert a new node, if it is not in the map yet. +@param fs File storage +@param map The parent map. If it is NULL, the function searches a top-level node. If both map and +key are NULLs, the function returns the root file node - a map that contains top-level nodes. +@param key Unique pointer to the node name, retrieved with cvGetHashedKey +@param create_missing Flag that specifies whether an absent node should be added to the map + */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/** @brief Finds a node in a map or file storage. + +The function finds a file node by name. The node is searched either in map or, if the pointer is +NULL, among the top-level file storage nodes. Using this function for maps and cvGetSeqElem (or +sequence reader) for sequences, it is possible to navigate through the file storage. To speed up +multiple queries for a certain key (e.g., in the case of an array of structures) one may use a +combination of cvGetHashedKey and cvGetFileNode. +@param fs File storage +@param map The parent map. If it is NULL, the function searches in all the top-level nodes +(streams), starting with the first one. +@param name The file node name + */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +/** @brief Retrieves an integer value from a file node. + +The function returns an integer that is represented by the file node. If the file node is NULL, the +default_value is returned (thus, it is convenient to call the function right after cvGetFileNode +without checking for a NULL pointer). If the file node has type CV_NODE_INT, then node-\>data.i is +returned. If the file node has type CV_NODE_REAL, then node-\>data.f is converted to an integer +and returned. Otherwise the error is reported. +@param node File node +@param default_value The value that is returned if node is NULL + */ +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + +/** @brief Finds a file node and returns its value. + +The function is a simple superposition of cvGetFileNodeByName and cvReadInt. +@param fs File storage +@param map The parent map. If it is NULL, the function searches a top-level node. +@param name The node name +@param default_value The value that is returned if the file node is not found + */ +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + +/** @brief Retrieves a floating-point value from a file node. + +The function returns a floating-point value that is represented by the file node. If the file node +is NULL, the default_value is returned (thus, it is convenient to call the function right after +cvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_REAL , +then node-\>data.f is returned. If the file node has type CV_NODE_INT , then node-:math:\>data.f +is converted to floating-point and returned. Otherwise the result is not determined. +@param node File node +@param default_value The value that is returned if node is NULL + */ +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + +/** @brief Finds a file node and returns its value. + +The function is a simple superposition of cvGetFileNodeByName and cvReadReal . +@param fs File storage +@param map The parent map. If it is NULL, the function searches a top-level node. +@param name The node name +@param default_value The value that is returned if the file node is not found + */ +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + +/** @brief Retrieves a text string from a file node. + +The function returns a text string that is represented by the file node. If the file node is NULL, +the default_value is returned (thus, it is convenient to call the function right after +cvGetFileNode without checking for a NULL pointer). If the file node has type CV_NODE_STR , then +node-:math:\>data.str.ptr is returned. Otherwise the result is not determined. +@param node File node +@param default_value The value that is returned if node is NULL + */ +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + +/** @brief Finds a file node by its name and returns its value. + +The function is a simple superposition of cvGetFileNodeByName and cvReadString . +@param fs File storage +@param map The parent map. If it is NULL, the function searches a top-level node. +@param name The node name +@param default_value The value that is returned if the file node is not found + */ +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/** @brief Decodes an object and returns a pointer to it. + +The function decodes a user object (creates an object in a native representation from the file +storage subtree) and returns it. The object to be decoded must be an instance of a registered type +that supports the read method (see CvTypeInfo). The type of the object is determined by the type +name that is encoded in the file. If the object is a dynamic structure, it is created either in +memory storage and passed to cvOpenFileStorage or, if a NULL pointer was passed, in temporary +memory storage, which is released when cvReleaseFileStorage is called. Otherwise, if the object is +not a dynamic structure, it is created in a heap and should be released with a specialized function +or by using the generic cvRelease. +@param fs File storage +@param node The root object node +@param attributes Unused parameter + */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/** @brief Finds an object by name and decodes it. + +The function is a simple superposition of cvGetFileNodeByName and cvRead. +@param fs File storage +@param map The parent map. If it is NULL, the function searches a top-level node. +@param name The node name +@param attributes Unused parameter + */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/** @brief Initializes the file node sequence reader. + +The function initializes the sequence reader to read data from a file node. The initialized reader +can be then passed to cvReadRawDataSlice. +@param fs File storage +@param src The file node (a sequence) to read numbers from +@param reader Pointer to the sequence reader + */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/** @brief Initializes file node sequence reader. + +The function reads one or more elements from the file node, representing a sequence, to a +user-specified array. The total number of read sequence elements is a product of total and the +number of components in each array element. For example, if dt=2if, the function will read total\*3 +sequence elements. As with any sequence, some parts of the file node sequence can be skipped or read +repeatedly by repositioning the reader using cvSetSeqReaderPos. +@param fs File storage +@param reader The sequence reader. Initialize it with cvStartReadRawData . +@param count The number of elements to read +@param dst Pointer to the destination array +@param dt Specification of each array element. It has the same format as in cvWriteRawData . + */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/** @brief Reads multiple numbers. + +The function reads elements from a file node that represents a sequence of scalars. +@param fs File storage +@param src The file node (a sequence) to read numbers from +@param dst Pointer to the destination array +@param dt Specification of each array element. It has the same format as in cvWriteRawData . + */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/** @brief Writes a file node to another file storage. + +The function writes a copy of a file node to file storage. Possible applications of the function are +merging several file storages into one and conversion between XML, YAML and JSON formats. +@param fs Destination file storage +@param new_node_name New name of the file node in the destination file storage. To keep the +existing name, use cvcvGetFileNodeName +@param node The written node +@param embed If the written node is a collection and this parameter is not zero, no extra level of +hierarchy is created. Instead, all the elements of node are written into the currently written +structure. Of course, map elements can only be embedded into another map, and sequence elements +can only be embedded into another sequence. + */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/** @brief Returns the name of a file node. + +The function returns the name of a file node or NULL, if the file node does not have a name or if +node is NULL. +@param node File node + */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +/** @brief Registers a new type. + +The function registers a new type, which is described by info . The function creates a copy of the +structure, so the user should delete it after calling the function. +@param info Type info structure + */ +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); + +/** @brief Unregisters the type. + +The function unregisters a type with a specified name. If the name is unknown, it is possible to +locate the type info by an instance of the type using cvTypeOf or by iterating the type list, +starting from cvFirstType, and then calling cvUnregisterType(info-\>typeName). +@param type_name Name of an unregistered type + */ +CVAPI(void) cvUnregisterType( const char* type_name ); + +/** @brief Returns the beginning of a type list. + +The function returns the first type in the list of registered types. Navigation through the list can +be done via the prev and next fields of the CvTypeInfo structure. + */ +CVAPI(CvTypeInfo*) cvFirstType(void); + +/** @brief Finds a type by its name. + +The function finds a registered type by its name. It returns NULL if there is no type with the +specified name. +@param type_name Type name + */ +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); + +/** @brief Returns the type of an object. + +The function finds the type of a given object. It iterates through the list of registered types and +calls the is_instance function/method for every type info structure with that object until one of +them returns non-zero or until the whole list has been traversed. In the latter case, the function +returns NULL. +@param struct_ptr The object pointer + */ +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/** @brief Releases an object. + +The function finds the type of a given object and calls release with the double pointer. +@param struct_ptr Double pointer to the object + */ +CVAPI(void) cvRelease( void** struct_ptr ); + +/** @brief Makes a clone of an object. + +The function finds the type of a given object and calls clone with the passed object. Of course, if +you know the object type, for example, struct_ptr is CvMat\*, it is faster to call the specific +function, like cvCloneMat. +@param struct_ptr The object to clone + */ +CVAPI(void*) cvClone( const void* struct_ptr ); + +/** @brief Saves an object to a file. + +The function saves an object to a file. It provides a simple interface to cvWrite . +@param filename File name +@param struct_ptr Object to save +@param name Optional object name. If it is NULL, the name will be formed from filename . +@param comment Optional comment to put in the beginning of the file +@param attributes Optional attributes passed to cvWrite + */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/** @brief Loads an object from a file. + +The function loads an object from a file. It basically reads the specified file, find the first +top-level node and calls cvRead for that node. If the file node does not have type information or +the type information can not be found by the type name, the function returns NULL. After the object +is loaded, the file storage is closed and all the temporary buffers are deleted. Thus, to load a +dynamic structure, such as a sequence, contour, or graph, one should pass a valid memory storage +destination to the function. +@param filename File name +@param memstorage Memory storage for dynamic structures, such as CvSeq or CvGraph . It is not used +for matrices or images. +@param name Optional object name. If it is NULL, the first top-level object in the storage will be +loaded. +@param real_name Optional output parameter that will contain the name of the loaded object +(useful if name=NULL ) + */ +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/** helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/** retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/** get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/** Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/** Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/** Retrieves current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/** Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/** Sets error status and performs some additional actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/** Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/** Retrieves detailed information about the last error occurred */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/** Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/** Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/** Output nothing */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +/** Output to console(fprintf(stderr,...)) */ +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +/** Output to MessageBox(WIN32) */ +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/** CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/** + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/** + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/** + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error processing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/** Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +/** @} core_c */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#ifdef __cplusplus + +//! @addtogroup core_c_glue +//! @{ + +//! class for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +//! @} + +#include "opencv2/core/utility.hpp" + +namespace cv +{ + +//! @addtogroup core_c_glue +//! @{ + +/////////////////////////////////////////// glue /////////////////////////////////////////// + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0, + AutoBuffer* buf=0); + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + + +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + + + +////// specialized implementations of DefaultDeleter::operator() for classic OpenCV types ////// + +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvMat* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(IplImage* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvMatND* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvSparseMat* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvMemStorage* obj) const; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(std::vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator std::vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, cvSlice(r.start, r.end)); } + +template inline void Seq<_Tp>::copyTo(std::vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], cvSlice(range)); +} + +template inline Seq<_Tp>::operator std::vector<_Tp>() const +{ + std::vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd) +{ + cvStartReadSeq(_seq.seq, this); + index = seekEnd ? _seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( delta > n || delta < -n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + +//! @} + +} // cv + +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.hpp new file mode 100644 index 00000000..7d7bb62e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.hpp @@ -0,0 +1,1049 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CUDA_HPP +#define OPENCV_CORE_CUDA_HPP + +#ifndef __cplusplus +# error cuda.hpp header must be compiled as C++ +#endif + +#include "opencv2/core.hpp" +#include "opencv2/core/cuda_types.hpp" + +/** + @defgroup cuda CUDA-accelerated Computer Vision + @{ + @defgroup cudacore Core part + @{ + @defgroup cudacore_init Initialization and Information + @defgroup cudacore_struct Data Structures + @} + @} + */ + +namespace cv { namespace cuda { + +//! @addtogroup cudacore_struct +//! @{ + +//=================================================================================== +// GpuMat +//=================================================================================== + +/** @brief Base storage class for GPU memory with reference counting. + +Its interface matches the Mat interface with the following limitations: + +- no arbitrary dimensions support (only 2D) +- no functions that return references to their data (because references on GPU are not valid for + CPU) +- no expression templates technique support + +Beware that the latter limitation may lead to overloaded matrix operators that cause memory +allocations. The GpuMat class is convertible to cuda::PtrStepSz and cuda::PtrStep so it can be +passed directly to the kernel. + +@note In contrast with Mat, in most cases GpuMat::isContinuous() == false . This means that rows are +aligned to a size depending on the hardware. Single-row GpuMat is always a continuous matrix. + +@note You are not recommended to leave static or global GpuMat variables allocated, that is, to rely +on its destructor. The destruction order of such variables and CUDA context is undefined. GPU memory +release function returns error if the CUDA context has been destroyed before. + +Some member functions are described as a "Blocking Call" while some are described as a +"Non-Blocking Call". Blocking functions are synchronous to host. It is guaranteed that the GPU +operation is finished when the function returns. However, non-blocking functions are asynchronous to +host. Those functions may return even if the GPU operation is not finished. + +Compared to their blocking counterpart, non-blocking functions accept Stream as an additional +argument. If a non-default stream is passed, the GPU operation may overlap with operations in other +streams. + +@sa Mat + */ +class CV_EXPORTS GpuMat +{ +public: + class CV_EXPORTS Allocator + { + public: + virtual ~Allocator() {} + + // allocator must fill data, step and refcount fields + virtual bool allocate(GpuMat* mat, int rows, int cols, size_t elemSize) = 0; + virtual void free(GpuMat* mat) = 0; + }; + + //! default allocator + static Allocator* defaultAllocator(); + static void setDefaultAllocator(Allocator* allocator); + + //! default constructor + explicit GpuMat(Allocator* allocator = defaultAllocator()); + + //! constructs GpuMat of the specified size and type + GpuMat(int rows, int cols, int type, Allocator* allocator = defaultAllocator()); + GpuMat(Size size, int type, Allocator* allocator = defaultAllocator()); + + //! constructs GpuMat and fills it with the specified value _s + GpuMat(int rows, int cols, int type, Scalar s, Allocator* allocator = defaultAllocator()); + GpuMat(Size size, int type, Scalar s, Allocator* allocator = defaultAllocator()); + + //! copy constructor + GpuMat(const GpuMat& m); + + //! constructor for GpuMat headers pointing to user-allocated data + GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP); + GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP); + + //! creates a GpuMat header for a part of the bigger matrix + GpuMat(const GpuMat& m, Range rowRange, Range colRange); + GpuMat(const GpuMat& m, Rect roi); + + //! builds GpuMat from host memory (Blocking call) + explicit GpuMat(InputArray arr, Allocator* allocator = defaultAllocator()); + + //! destructor - calls release() + ~GpuMat(); + + //! assignment operators + GpuMat& operator =(const GpuMat& m); + + //! allocates new GpuMat data unless the GpuMat already has specified size and type + void create(int rows, int cols, int type); + void create(Size size, int type); + + //! decreases reference counter, deallocate the data when reference counter reaches 0 + void release(); + + //! swaps with other smart pointer + void swap(GpuMat& mat); + + /** @brief Performs data upload to GpuMat (Blocking call) + + This function copies data from host memory to device memory. As being a blocking call, it is + guaranteed that the copy operation is finished when this function returns. + */ + void upload(InputArray arr); + + /** @brief Performs data upload to GpuMat (Non-Blocking call) + + This function copies data from host memory to device memory. As being a non-blocking call, this + function may return even if the copy operation is not finished. + + The copy operation may be overlapped with operations in other non-default streams if \p stream is + not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option. + */ + void upload(InputArray arr, Stream& stream); + + /** @brief Performs data download from GpuMat (Blocking call) + + This function copies data from device memory to host memory. As being a blocking call, it is + guaranteed that the copy operation is finished when this function returns. + */ + void download(OutputArray dst) const; + + /** @brief Performs data download from GpuMat (Non-Blocking call) + + This function copies data from device memory to host memory. As being a non-blocking call, this + function may return even if the copy operation is not finished. + + The copy operation may be overlapped with operations in other non-default streams if \p stream is + not the default stream and \p dst is HostMem allocated with HostMem::PAGE_LOCKED option. + */ + void download(OutputArray dst, Stream& stream) const; + + //! returns deep copy of the GpuMat, i.e. the data is copied + GpuMat clone() const; + + //! copies the GpuMat content to device memory (Blocking call) + void copyTo(OutputArray dst) const; + + //! copies the GpuMat content to device memory (Non-Blocking call) + void copyTo(OutputArray dst, Stream& stream) const; + + //! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call) + void copyTo(OutputArray dst, InputArray mask) const; + + //! copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call) + void copyTo(OutputArray dst, InputArray mask, Stream& stream) const; + + //! sets some of the GpuMat elements to s (Blocking call) + GpuMat& setTo(Scalar s); + + //! sets some of the GpuMat elements to s (Non-Blocking call) + GpuMat& setTo(Scalar s, Stream& stream); + + //! sets some of the GpuMat elements to s, according to the mask (Blocking call) + GpuMat& setTo(Scalar s, InputArray mask); + + //! sets some of the GpuMat elements to s, according to the mask (Non-Blocking call) + GpuMat& setTo(Scalar s, InputArray mask, Stream& stream); + + //! converts GpuMat to another datatype (Blocking call) + void convertTo(OutputArray dst, int rtype) const; + + //! converts GpuMat to another datatype (Non-Blocking call) + void convertTo(OutputArray dst, int rtype, Stream& stream) const; + + //! converts GpuMat to another datatype with scaling (Blocking call) + void convertTo(OutputArray dst, int rtype, double alpha, double beta = 0.0) const; + + //! converts GpuMat to another datatype with scaling (Non-Blocking call) + void convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const; + + //! converts GpuMat to another datatype with scaling (Non-Blocking call) + void convertTo(OutputArray dst, int rtype, double alpha, double beta, Stream& stream) const; + + void assignTo(GpuMat& m, int type=-1) const; + + //! returns pointer to y-th row + uchar* ptr(int y = 0); + const uchar* ptr(int y = 0) const; + + //! template version of the above method + template _Tp* ptr(int y = 0); + template const _Tp* ptr(int y = 0) const; + + template operator PtrStepSz<_Tp>() const; + template operator PtrStep<_Tp>() const; + + //! returns a new GpuMat header for the specified row + GpuMat row(int y) const; + + //! returns a new GpuMat header for the specified column + GpuMat col(int x) const; + + //! ... for the specified row span + GpuMat rowRange(int startrow, int endrow) const; + GpuMat rowRange(Range r) const; + + //! ... for the specified column span + GpuMat colRange(int startcol, int endcol) const; + GpuMat colRange(Range r) const; + + //! extracts a rectangular sub-GpuMat (this is a generalized form of row, rowRange etc.) + GpuMat operator ()(Range rowRange, Range colRange) const; + GpuMat operator ()(Rect roi) const; + + //! creates alternative GpuMat header for the same data, with different + //! number of channels and/or different number of rows + GpuMat reshape(int cn, int rows = 0) const; + + //! locates GpuMat header within a parent GpuMat + void locateROI(Size& wholeSize, Point& ofs) const; + + //! moves/resizes the current GpuMat ROI inside the parent GpuMat + GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright); + + //! returns true iff the GpuMat data is continuous + //! (i.e. when there are no gaps between successive rows) + bool isContinuous() const; + + //! returns element size in bytes + size_t elemSize() const; + + //! returns the size of element channel in bytes + size_t elemSize1() const; + + //! returns element type + int type() const; + + //! returns element type + int depth() const; + + //! returns number of channels + int channels() const; + + //! returns step/elemSize1() + size_t step1() const; + + //! returns GpuMat size : width == number of columns, height == number of rows + Size size() const; + + //! returns true if GpuMat data is NULL + bool empty() const; + + //! internal use method: updates the continuity flag + void updateContinuityFlag(); + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + + //! the number of rows and columns + int rows, cols; + + //! a distance between successive rows in bytes; includes the gap if any + size_t step; + + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + //! when GpuMat points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + const uchar* dataend; + + //! allocator + Allocator* allocator; +}; + +/** @brief Creates a continuous matrix. + +@param rows Row count. +@param cols Column count. +@param type Type of the matrix. +@param arr Destination matrix. This parameter changes only if it has a proper type and area ( +\f$\texttt{rows} \times \texttt{cols}\f$ ). + +Matrix is called continuous if its elements are stored continuously, that is, without gaps at the +end of each row. + */ +CV_EXPORTS void createContinuous(int rows, int cols, int type, OutputArray arr); + +/** @brief Ensures that the size of a matrix is big enough and the matrix has a proper type. + +@param rows Minimum desired number of rows. +@param cols Minimum desired number of columns. +@param type Desired matrix type. +@param arr Destination matrix. + +The function does not reallocate memory if the matrix has proper attributes already. + */ +CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, OutputArray arr); + +/** @brief BufferPool for use with CUDA streams + +BufferPool utilizes Stream's allocator to create new buffers for GpuMat's. It is +only useful when enabled with #setBufferPoolUsage. + +@code + setBufferPoolUsage(true); +@endcode + +@note #setBufferPoolUsage must be called \em before any Stream declaration. + +Users may specify custom allocator for Stream and may implement their own stream based +functions utilizing the same underlying GPU memory management. + +If custom allocator is not specified, BufferPool utilizes StackAllocator by +default. StackAllocator allocates a chunk of GPU device memory beforehand, +and when GpuMat is declared later on, it is given the pre-allocated memory. +This kind of strategy reduces the number of calls for memory allocating APIs +such as cudaMalloc or cudaMallocPitch. + +Below is an example that utilizes BufferPool with StackAllocator: + +@code + #include + + using namespace cv; + using namespace cv::cuda + + int main() + { + setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool + setBufferPoolConfig(getDevice(), 1024 * 1024 * 64, 2); // Allocate 64 MB, 2 stacks (default is 10 MB, 5 stacks) + + Stream stream1, stream2; // Each stream uses 1 stack + BufferPool pool1(stream1), pool2(stream2); + + GpuMat d_src1 = pool1.getBuffer(4096, 4096, CV_8UC1); // 16MB + GpuMat d_dst1 = pool1.getBuffer(4096, 4096, CV_8UC3); // 48MB, pool1 is now full + + GpuMat d_src2 = pool2.getBuffer(1024, 1024, CV_8UC1); // 1MB + GpuMat d_dst2 = pool2.getBuffer(1024, 1024, CV_8UC3); // 3MB + + cvtColor(d_src1, d_dst1, CV_GRAY2BGR, 0, stream1); + cvtColor(d_src2, d_dst2, CV_GRAY2BGR, 0, stream2); + } +@endcode + +If we allocate another GpuMat on pool1 in the above example, it will be carried out by +the DefaultAllocator since the stack for pool1 is full. + +@code + GpuMat d_add1 = pool1.getBuffer(1024, 1024, CV_8UC1); // Stack for pool1 is full, memory is allocated with DefaultAllocator +@endcode + +If a third stream is declared in the above example, allocating with #getBuffer +within that stream will also be carried out by the DefaultAllocator because we've run out of +stacks. + +@code + Stream stream3; // Only 2 stacks were allocated, we've run out of stacks + BufferPool pool3(stream3); + GpuMat d_src3 = pool3.getBuffer(1024, 1024, CV_8UC1); // Memory is allocated with DefaultAllocator +@endcode + +@warning When utilizing StackAllocator, deallocation order is important. + +Just like a stack, deallocation must be done in LIFO order. Below is an example of +erroneous usage that violates LIFO rule. If OpenCV is compiled in Debug mode, this +sample code will emit CV_Assert error. + +@code + int main() + { + setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool + Stream stream; // A default size (10 MB) stack is allocated to this stream + BufferPool pool(stream); + + GpuMat mat1 = pool.getBuffer(1024, 1024, CV_8UC1); // Allocate mat1 (1MB) + GpuMat mat2 = pool.getBuffer(1024, 1024, CV_8UC1); // Allocate mat2 (1MB) + + mat1.release(); // erroneous usage : mat2 must be deallocated before mat1 + } +@endcode + +Since C++ local variables are destroyed in the reverse order of construction, +the code sample below satisfies the LIFO rule. Local GpuMat's are deallocated +and the corresponding memory is automatically returned to the pool for later usage. + +@code + int main() + { + setBufferPoolUsage(true); // Tell OpenCV that we are going to utilize BufferPool + setBufferPoolConfig(getDevice(), 1024 * 1024 * 64, 2); // Allocate 64 MB, 2 stacks (default is 10 MB, 5 stacks) + + Stream stream1, stream2; // Each stream uses 1 stack + BufferPool pool1(stream1), pool2(stream2); + + for (int i = 0; i < 10; i++) + { + GpuMat d_src1 = pool1.getBuffer(4096, 4096, CV_8UC1); // 16MB + GpuMat d_dst1 = pool1.getBuffer(4096, 4096, CV_8UC3); // 48MB, pool1 is now full + + GpuMat d_src2 = pool2.getBuffer(1024, 1024, CV_8UC1); // 1MB + GpuMat d_dst2 = pool2.getBuffer(1024, 1024, CV_8UC3); // 3MB + + d_src1.setTo(Scalar(i), stream1); + d_src2.setTo(Scalar(i), stream2); + + cvtColor(d_src1, d_dst1, CV_GRAY2BGR, 0, stream1); + cvtColor(d_src2, d_dst2, CV_GRAY2BGR, 0, stream2); + // The order of destruction of the local variables is: + // d_dst2 => d_src2 => d_dst1 => d_src1 + // LIFO rule is satisfied, this code runs without error + } + } +@endcode + */ +class CV_EXPORTS BufferPool +{ +public: + + //! Gets the BufferPool for the given stream. + explicit BufferPool(Stream& stream); + + //! Allocates a new GpuMat of given size and type. + GpuMat getBuffer(int rows, int cols, int type); + + //! Allocates a new GpuMat of given size and type. + GpuMat getBuffer(Size size, int type) { return getBuffer(size.height, size.width, type); } + + //! Returns the allocator associated with the stream. + Ptr getAllocator() const { return allocator_; } + +private: + Ptr allocator_; +}; + +//! BufferPool management (must be called before Stream creation) +CV_EXPORTS void setBufferPoolUsage(bool on); +CV_EXPORTS void setBufferPoolConfig(int deviceId, size_t stackSize, int stackCount); + +//=================================================================================== +// HostMem +//=================================================================================== + +/** @brief Class with reference counting wrapping special memory type allocation functions from CUDA. + +Its interface is also Mat-like but with additional memory type parameters. + +- **PAGE_LOCKED** sets a page locked memory type used commonly for fast and asynchronous + uploading/downloading data from/to GPU. +- **SHARED** specifies a zero copy memory allocation that enables mapping the host memory to GPU + address space, if supported. +- **WRITE_COMBINED** sets the write combined buffer that is not cached by CPU. Such buffers are + used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache + utilization. + +@note Allocation size of such memory types is usually limited. For more details, see *CUDA 2.2 +Pinned Memory APIs* document or *CUDA C Programming Guide*. + */ +class CV_EXPORTS HostMem +{ +public: + enum AllocType { PAGE_LOCKED = 1, SHARED = 2, WRITE_COMBINED = 4 }; + + static MatAllocator* getAllocator(AllocType alloc_type = PAGE_LOCKED); + + explicit HostMem(AllocType alloc_type = PAGE_LOCKED); + + HostMem(const HostMem& m); + + HostMem(int rows, int cols, int type, AllocType alloc_type = PAGE_LOCKED); + HostMem(Size size, int type, AllocType alloc_type = PAGE_LOCKED); + + //! creates from host memory with coping data + explicit HostMem(InputArray arr, AllocType alloc_type = PAGE_LOCKED); + + ~HostMem(); + + HostMem& operator =(const HostMem& m); + + //! swaps with other smart pointer + void swap(HostMem& b); + + //! returns deep copy of the matrix, i.e. the data is copied + HostMem clone() const; + + //! allocates new matrix data unless the matrix already has specified size and type. + void create(int rows, int cols, int type); + void create(Size size, int type); + + //! creates alternative HostMem header for the same data, with different + //! number of channels and/or different number of rows + HostMem reshape(int cn, int rows = 0) const; + + //! decrements reference counter and released memory if needed. + void release(); + + //! returns matrix header with disabled reference counting for HostMem data. + Mat createMatHeader() const; + + /** @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting + for it. + + This can be done only if memory was allocated with the SHARED flag and if it is supported by the + hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which + eliminates an extra copy. + */ + GpuMat createGpuMatHeader() const; + + // Please see cv::Mat for descriptions + bool isContinuous() const; + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1() const; + Size size() const; + bool empty() const; + + // Please see cv::Mat for descriptions + int flags; + int rows, cols; + size_t step; + + uchar* data; + int* refcount; + + uchar* datastart; + const uchar* dataend; + + AllocType alloc_type; +}; + +/** @brief Page-locks the memory of matrix and maps it for the device(s). + +@param m Input matrix. + */ +CV_EXPORTS void registerPageLocked(Mat& m); + +/** @brief Unmaps the memory of matrix and makes it pageable again. + +@param m Input matrix. + */ +CV_EXPORTS void unregisterPageLocked(Mat& m); + +//=================================================================================== +// Stream +//=================================================================================== + +/** @brief This class encapsulates a queue of asynchronous calls. + +@note Currently, you may face problems if an operation is enqueued twice with different data. Some +functions use the constant GPU memory, and next call may update the memory before the previous one +has been finished. But calling different operations asynchronously is safe because each operation +has its own constant buffer. Memory copy/upload/download/set operations to the buffers you hold are +also safe. + +@note The Stream class is not thread-safe. Please use different Stream objects for different CPU threads. + +@code +void thread1() +{ + cv::cuda::Stream stream1; + cv::cuda::func1(..., stream1); +} + +void thread2() +{ + cv::cuda::Stream stream2; + cv::cuda::func2(..., stream2); +} +@endcode + +@note By default all CUDA routines are launched in Stream::Null() object, if the stream is not specified by user. +In multi-threading environment the stream objects must be passed explicitly (see previous note). + */ +class CV_EXPORTS Stream +{ + typedef void (Stream::*bool_type)() const; + void this_type_does_not_support_comparisons() const {} + +public: + typedef void (*StreamCallback)(int status, void* userData); + + //! creates a new asynchronous stream + Stream(); + + //! creates a new asynchronous stream with custom allocator + Stream(const Ptr& allocator); + + /** @brief Returns true if the current stream queue is finished. Otherwise, it returns false. + */ + bool queryIfComplete() const; + + /** @brief Blocks the current CPU thread until all operations in the stream are complete. + */ + void waitForCompletion(); + + /** @brief Makes a compute stream wait on an event. + */ + void waitEvent(const Event& event); + + /** @brief Adds a callback to be called on the host after all currently enqueued items in the stream have + completed. + + @note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization + that may depend on outstanding device work or other callbacks that are not mandated to run earlier. + Callbacks without a mandated order (in independent streams) execute in undefined order and may be + serialized. + */ + void enqueueHostCallback(StreamCallback callback, void* userData); + + //! return Stream object for default CUDA stream + static Stream& Null(); + + //! returns true if stream object is not default (!= 0) + operator bool_type() const; + + class Impl; + +private: + Ptr impl_; + Stream(const Ptr& impl); + + friend struct StreamAccessor; + friend class BufferPool; + friend class DefaultDeviceInitializer; +}; + +class CV_EXPORTS Event +{ +public: + enum CreateFlags + { + DEFAULT = 0x00, /**< Default event flag */ + BLOCKING_SYNC = 0x01, /**< Event uses blocking synchronization */ + DISABLE_TIMING = 0x02, /**< Event will not record timing data */ + INTERPROCESS = 0x04 /**< Event is suitable for interprocess use. DisableTiming must be set */ + }; + + explicit Event(CreateFlags flags = DEFAULT); + + //! records an event + void record(Stream& stream = Stream::Null()); + + //! queries an event's status + bool queryIfComplete() const; + + //! waits for an event to complete + void waitForCompletion(); + + //! computes the elapsed time between events + static float elapsedTime(const Event& start, const Event& end); + + class Impl; + +private: + Ptr impl_; + Event(const Ptr& impl); + + friend struct EventAccessor; +}; + +//! @} cudacore_struct + +//=================================================================================== +// Initialization & Info +//=================================================================================== + +//! @addtogroup cudacore_init +//! @{ + +/** @brief Returns the number of installed CUDA-enabled devices. + +Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support, +this function returns 0. If the CUDA driver is not installed, or is incompatible, this function +returns -1. + */ +CV_EXPORTS int getCudaEnabledDeviceCount(); + +/** @brief Sets a device and initializes it for the current thread. + +@param device System index of a CUDA device starting with 0. + +If the call of this function is omitted, a default device is initialized at the fist CUDA usage. + */ +CV_EXPORTS void setDevice(int device); + +/** @brief Returns the current device index set by cuda::setDevice or initialized by default. + */ +CV_EXPORTS int getDevice(); + +/** @brief Explicitly destroys and cleans up all resources associated with the current device in the current +process. + +Any subsequent API call to this device will reinitialize the device. + */ +CV_EXPORTS void resetDevice(); + +/** @brief Enumeration providing CUDA computing features. + */ +enum FeatureSet +{ + FEATURE_SET_COMPUTE_10 = 10, + FEATURE_SET_COMPUTE_11 = 11, + FEATURE_SET_COMPUTE_12 = 12, + FEATURE_SET_COMPUTE_13 = 13, + FEATURE_SET_COMPUTE_20 = 20, + FEATURE_SET_COMPUTE_21 = 21, + FEATURE_SET_COMPUTE_30 = 30, + FEATURE_SET_COMPUTE_32 = 32, + FEATURE_SET_COMPUTE_35 = 35, + FEATURE_SET_COMPUTE_50 = 50, + + GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11, + SHARED_ATOMICS = FEATURE_SET_COMPUTE_12, + NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13, + WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30, + DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35 +}; + +//! checks whether current device supports the given feature +CV_EXPORTS bool deviceSupports(FeatureSet feature_set); + +/** @brief Class providing a set of static methods to check what NVIDIA\* card architecture the CUDA module was +built for. + +According to the CUDA C Programming Guide Version 3.2: "PTX code produced for some specific compute +capability can always be compiled to binary code of greater or equal compute capability". + */ +class CV_EXPORTS TargetArchs +{ +public: + /** @brief The following method checks whether the module was built with the support of the given feature: + + @param feature_set Features to be checked. See :ocvcuda::FeatureSet. + */ + static bool builtWith(FeatureSet feature_set); + + /** @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA + code for the given architecture(s): + + @param major Major compute capability version. + @param minor Minor compute capability version. + */ + static bool has(int major, int minor); + static bool hasPtx(int major, int minor); + static bool hasBin(int major, int minor); + + static bool hasEqualOrLessPtx(int major, int minor); + static bool hasEqualOrGreater(int major, int minor); + static bool hasEqualOrGreaterPtx(int major, int minor); + static bool hasEqualOrGreaterBin(int major, int minor); +}; + +/** @brief Class providing functionality for querying the specified GPU properties. + */ +class CV_EXPORTS DeviceInfo +{ +public: + //! creates DeviceInfo object for the current GPU + DeviceInfo(); + + /** @brief The constructors. + + @param device_id System index of the CUDA device starting with 0. + + Constructs the DeviceInfo object for the specified device. If device_id parameter is missed, it + constructs an object for the current device. + */ + DeviceInfo(int device_id); + + /** @brief Returns system index of the CUDA device starting with 0. + */ + int deviceID() const; + + //! ASCII string identifying device + const char* name() const; + + //! global memory available on device in bytes + size_t totalGlobalMem() const; + + //! shared memory available per block in bytes + size_t sharedMemPerBlock() const; + + //! 32-bit registers available per block + int regsPerBlock() const; + + //! warp size in threads + int warpSize() const; + + //! maximum pitch in bytes allowed by memory copies + size_t memPitch() const; + + //! maximum number of threads per block + int maxThreadsPerBlock() const; + + //! maximum size of each dimension of a block + Vec3i maxThreadsDim() const; + + //! maximum size of each dimension of a grid + Vec3i maxGridSize() const; + + //! clock frequency in kilohertz + int clockRate() const; + + //! constant memory available on device in bytes + size_t totalConstMem() const; + + //! major compute capability + int majorVersion() const; + + //! minor compute capability + int minorVersion() const; + + //! alignment requirement for textures + size_t textureAlignment() const; + + //! pitch alignment requirement for texture references bound to pitched memory + size_t texturePitchAlignment() const; + + //! number of multiprocessors on device + int multiProcessorCount() const; + + //! specified whether there is a run time limit on kernels + bool kernelExecTimeoutEnabled() const; + + //! device is integrated as opposed to discrete + bool integrated() const; + + //! device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer + bool canMapHostMemory() const; + + enum ComputeMode + { + ComputeModeDefault, /**< default compute mode (Multiple threads can use cudaSetDevice with this device) */ + ComputeModeExclusive, /**< compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice with this device) */ + ComputeModeProhibited, /**< compute-prohibited mode (No threads can use cudaSetDevice with this device) */ + ComputeModeExclusiveProcess /**< compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice with this device) */ + }; + + //! compute mode + ComputeMode computeMode() const; + + //! maximum 1D texture size + int maxTexture1D() const; + + //! maximum 1D mipmapped texture size + int maxTexture1DMipmap() const; + + //! maximum size for 1D textures bound to linear memory + int maxTexture1DLinear() const; + + //! maximum 2D texture dimensions + Vec2i maxTexture2D() const; + + //! maximum 2D mipmapped texture dimensions + Vec2i maxTexture2DMipmap() const; + + //! maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory + Vec3i maxTexture2DLinear() const; + + //! maximum 2D texture dimensions if texture gather operations have to be performed + Vec2i maxTexture2DGather() const; + + //! maximum 3D texture dimensions + Vec3i maxTexture3D() const; + + //! maximum Cubemap texture dimensions + int maxTextureCubemap() const; + + //! maximum 1D layered texture dimensions + Vec2i maxTexture1DLayered() const; + + //! maximum 2D layered texture dimensions + Vec3i maxTexture2DLayered() const; + + //! maximum Cubemap layered texture dimensions + Vec2i maxTextureCubemapLayered() const; + + //! maximum 1D surface size + int maxSurface1D() const; + + //! maximum 2D surface dimensions + Vec2i maxSurface2D() const; + + //! maximum 3D surface dimensions + Vec3i maxSurface3D() const; + + //! maximum 1D layered surface dimensions + Vec2i maxSurface1DLayered() const; + + //! maximum 2D layered surface dimensions + Vec3i maxSurface2DLayered() const; + + //! maximum Cubemap surface dimensions + int maxSurfaceCubemap() const; + + //! maximum Cubemap layered surface dimensions + Vec2i maxSurfaceCubemapLayered() const; + + //! alignment requirements for surfaces + size_t surfaceAlignment() const; + + //! device can possibly execute multiple kernels concurrently + bool concurrentKernels() const; + + //! device has ECC support enabled + bool ECCEnabled() const; + + //! PCI bus ID of the device + int pciBusID() const; + + //! PCI device ID of the device + int pciDeviceID() const; + + //! PCI domain ID of the device + int pciDomainID() const; + + //! true if device is a Tesla device using TCC driver, false otherwise + bool tccDriver() const; + + //! number of asynchronous engines + int asyncEngineCount() const; + + //! device shares a unified address space with the host + bool unifiedAddressing() const; + + //! peak memory clock frequency in kilohertz + int memoryClockRate() const; + + //! global memory bus width in bits + int memoryBusWidth() const; + + //! size of L2 cache in bytes + int l2CacheSize() const; + + //! maximum resident threads per multiprocessor + int maxThreadsPerMultiProcessor() const; + + //! gets free and total device memory + void queryMemory(size_t& totalMemory, size_t& freeMemory) const; + size_t freeMemory() const; + size_t totalMemory() const; + + /** @brief Provides information on CUDA feature support. + + @param feature_set Features to be checked. See cuda::FeatureSet. + + This function returns true if the device has the specified CUDA feature. Otherwise, it returns false + */ + bool supports(FeatureSet feature_set) const; + + /** @brief Checks the CUDA module and device compatibility. + + This function returns true if the CUDA module can be run on the specified device. Otherwise, it + returns false . + */ + bool isCompatible() const; + +private: + int device_id_; +}; + +CV_EXPORTS void printCudaDeviceInfo(int device); +CV_EXPORTS void printShortCudaDeviceInfo(int device); + +/** @brief Converts an array to half precision floating number. + +@param _src input array. +@param _dst output array. +@param stream Stream for the asynchronous version. +@sa convertFp16 +*/ +CV_EXPORTS void convertFp16(InputArray _src, OutputArray _dst, Stream& stream = Stream::Null()); + +//! @} cudacore_init + +}} // namespace cv { namespace cuda { + + +#include "opencv2/core/cuda.inl.hpp" + +#endif /* OPENCV_CORE_CUDA_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.inl.hpp new file mode 100644 index 00000000..35ae2e49 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda.inl.hpp @@ -0,0 +1,631 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CUDAINL_HPP +#define OPENCV_CORE_CUDAINL_HPP + +#include "opencv2/core/cuda.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { + +//=================================================================================== +// GpuMat +//=================================================================================== + +inline +GpuMat::GpuMat(Allocator* allocator_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{} + +inline +GpuMat::GpuMat(int rows_, int cols_, int type_, Allocator* allocator_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{ + if (rows_ > 0 && cols_ > 0) + create(rows_, cols_, type_); +} + +inline +GpuMat::GpuMat(Size size_, int type_, Allocator* allocator_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{ + if (size_.height > 0 && size_.width > 0) + create(size_.height, size_.width, type_); +} + +inline +GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_, Allocator* allocator_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{ + if (rows_ > 0 && cols_ > 0) + { + create(rows_, cols_, type_); + setTo(s_); + } +} + +inline +GpuMat::GpuMat(Size size_, int type_, Scalar s_, Allocator* allocator_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{ + if (size_.height > 0 && size_.width > 0) + { + create(size_.height, size_.width, type_); + setTo(s_); + } +} + +inline +GpuMat::GpuMat(const GpuMat& m) + : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), allocator(m.allocator) +{ + if (refcount) + CV_XADD(refcount, 1); +} + +inline +GpuMat::GpuMat(InputArray arr, Allocator* allocator_) : + flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), allocator(allocator_) +{ + upload(arr); +} + +inline +GpuMat::~GpuMat() +{ + release(); +} + +inline +GpuMat& GpuMat::operator =(const GpuMat& m) +{ + if (this != &m) + { + GpuMat temp(m); + swap(temp); + } + + return *this; +} + +inline +void GpuMat::create(Size size_, int type_) +{ + create(size_.height, size_.width, type_); +} + +inline +void GpuMat::swap(GpuMat& b) +{ + std::swap(flags, b.flags); + std::swap(rows, b.rows); + std::swap(cols, b.cols); + std::swap(step, b.step); + std::swap(data, b.data); + std::swap(datastart, b.datastart); + std::swap(dataend, b.dataend); + std::swap(refcount, b.refcount); + std::swap(allocator, b.allocator); +} + +inline +GpuMat GpuMat::clone() const +{ + GpuMat m; + copyTo(m); + return m; +} + +inline +void GpuMat::copyTo(OutputArray dst, InputArray mask) const +{ + copyTo(dst, mask, Stream::Null()); +} + +inline +GpuMat& GpuMat::setTo(Scalar s) +{ + return setTo(s, Stream::Null()); +} + +inline +GpuMat& GpuMat::setTo(Scalar s, InputArray mask) +{ + return setTo(s, mask, Stream::Null()); +} + +inline +void GpuMat::convertTo(OutputArray dst, int rtype) const +{ + convertTo(dst, rtype, Stream::Null()); +} + +inline +void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, double beta) const +{ + convertTo(dst, rtype, alpha, beta, Stream::Null()); +} + +inline +void GpuMat::convertTo(OutputArray dst, int rtype, double alpha, Stream& stream) const +{ + convertTo(dst, rtype, alpha, 0.0, stream); +} + +inline +void GpuMat::assignTo(GpuMat& m, int _type) const +{ + if (_type < 0) + m = *this; + else + convertTo(m, _type); +} + +inline +uchar* GpuMat::ptr(int y) +{ + CV_DbgAssert( (unsigned)y < (unsigned)rows ); + return data + step * y; +} + +inline +const uchar* GpuMat::ptr(int y) const +{ + CV_DbgAssert( (unsigned)y < (unsigned)rows ); + return data + step * y; +} + +template inline +_Tp* GpuMat::ptr(int y) +{ + return (_Tp*)ptr(y); +} + +template inline +const _Tp* GpuMat::ptr(int y) const +{ + return (const _Tp*)ptr(y); +} + +template inline +GpuMat::operator PtrStepSz() const +{ + return PtrStepSz(rows, cols, (T*)data, step); +} + +template inline +GpuMat::operator PtrStep() const +{ + return PtrStep((T*)data, step); +} + +inline +GpuMat GpuMat::row(int y) const +{ + return GpuMat(*this, Range(y, y+1), Range::all()); +} + +inline +GpuMat GpuMat::col(int x) const +{ + return GpuMat(*this, Range::all(), Range(x, x+1)); +} + +inline +GpuMat GpuMat::rowRange(int startrow, int endrow) const +{ + return GpuMat(*this, Range(startrow, endrow), Range::all()); +} + +inline +GpuMat GpuMat::rowRange(Range r) const +{ + return GpuMat(*this, r, Range::all()); +} + +inline +GpuMat GpuMat::colRange(int startcol, int endcol) const +{ + return GpuMat(*this, Range::all(), Range(startcol, endcol)); +} + +inline +GpuMat GpuMat::colRange(Range r) const +{ + return GpuMat(*this, Range::all(), r); +} + +inline +GpuMat GpuMat::operator ()(Range rowRange_, Range colRange_) const +{ + return GpuMat(*this, rowRange_, colRange_); +} + +inline +GpuMat GpuMat::operator ()(Rect roi) const +{ + return GpuMat(*this, roi); +} + +inline +bool GpuMat::isContinuous() const +{ + return (flags & Mat::CONTINUOUS_FLAG) != 0; +} + +inline +size_t GpuMat::elemSize() const +{ + return CV_ELEM_SIZE(flags); +} + +inline +size_t GpuMat::elemSize1() const +{ + return CV_ELEM_SIZE1(flags); +} + +inline +int GpuMat::type() const +{ + return CV_MAT_TYPE(flags); +} + +inline +int GpuMat::depth() const +{ + return CV_MAT_DEPTH(flags); +} + +inline +int GpuMat::channels() const +{ + return CV_MAT_CN(flags); +} + +inline +size_t GpuMat::step1() const +{ + return step / elemSize1(); +} + +inline +Size GpuMat::size() const +{ + return Size(cols, rows); +} + +inline +bool GpuMat::empty() const +{ + return data == 0; +} + +static inline +GpuMat createContinuous(int rows, int cols, int type) +{ + GpuMat m; + createContinuous(rows, cols, type, m); + return m; +} + +static inline +void createContinuous(Size size, int type, OutputArray arr) +{ + createContinuous(size.height, size.width, type, arr); +} + +static inline +GpuMat createContinuous(Size size, int type) +{ + GpuMat m; + createContinuous(size, type, m); + return m; +} + +static inline +void ensureSizeIsEnough(Size size, int type, OutputArray arr) +{ + ensureSizeIsEnough(size.height, size.width, type, arr); +} + +static inline +void swap(GpuMat& a, GpuMat& b) +{ + a.swap(b); +} + +//=================================================================================== +// HostMem +//=================================================================================== + +inline +HostMem::HostMem(AllocType alloc_type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) +{ +} + +inline +HostMem::HostMem(const HostMem& m) + : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data), refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), alloc_type(m.alloc_type) +{ + if( refcount ) + CV_XADD(refcount, 1); +} + +inline +HostMem::HostMem(int rows_, int cols_, int type_, AllocType alloc_type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) +{ + if (rows_ > 0 && cols_ > 0) + create(rows_, cols_, type_); +} + +inline +HostMem::HostMem(Size size_, int type_, AllocType alloc_type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) +{ + if (size_.height > 0 && size_.width > 0) + create(size_.height, size_.width, type_); +} + +inline +HostMem::HostMem(InputArray arr, AllocType alloc_type_) + : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), alloc_type(alloc_type_) +{ + arr.getMat().copyTo(*this); +} + +inline +HostMem::~HostMem() +{ + release(); +} + +inline +HostMem& HostMem::operator =(const HostMem& m) +{ + if (this != &m) + { + HostMem temp(m); + swap(temp); + } + + return *this; +} + +inline +void HostMem::swap(HostMem& b) +{ + std::swap(flags, b.flags); + std::swap(rows, b.rows); + std::swap(cols, b.cols); + std::swap(step, b.step); + std::swap(data, b.data); + std::swap(datastart, b.datastart); + std::swap(dataend, b.dataend); + std::swap(refcount, b.refcount); + std::swap(alloc_type, b.alloc_type); +} + +inline +HostMem HostMem::clone() const +{ + HostMem m(size(), type(), alloc_type); + createMatHeader().copyTo(m); + return m; +} + +inline +void HostMem::create(Size size_, int type_) +{ + create(size_.height, size_.width, type_); +} + +inline +Mat HostMem::createMatHeader() const +{ + return Mat(size(), type(), data, step); +} + +inline +bool HostMem::isContinuous() const +{ + return (flags & Mat::CONTINUOUS_FLAG) != 0; +} + +inline +size_t HostMem::elemSize() const +{ + return CV_ELEM_SIZE(flags); +} + +inline +size_t HostMem::elemSize1() const +{ + return CV_ELEM_SIZE1(flags); +} + +inline +int HostMem::type() const +{ + return CV_MAT_TYPE(flags); +} + +inline +int HostMem::depth() const +{ + return CV_MAT_DEPTH(flags); +} + +inline +int HostMem::channels() const +{ + return CV_MAT_CN(flags); +} + +inline +size_t HostMem::step1() const +{ + return step / elemSize1(); +} + +inline +Size HostMem::size() const +{ + return Size(cols, rows); +} + +inline +bool HostMem::empty() const +{ + return data == 0; +} + +static inline +void swap(HostMem& a, HostMem& b) +{ + a.swap(b); +} + +//=================================================================================== +// Stream +//=================================================================================== + +inline +Stream::Stream(const Ptr& impl) + : impl_(impl) +{ +} + +//=================================================================================== +// Event +//=================================================================================== + +inline +Event::Event(const Ptr& impl) + : impl_(impl) +{ +} + +//=================================================================================== +// Initialization & Info +//=================================================================================== + +inline +bool TargetArchs::has(int major, int minor) +{ + return hasPtx(major, minor) || hasBin(major, minor); +} + +inline +bool TargetArchs::hasEqualOrGreater(int major, int minor) +{ + return hasEqualOrGreaterPtx(major, minor) || hasEqualOrGreaterBin(major, minor); +} + +inline +DeviceInfo::DeviceInfo() +{ + device_id_ = getDevice(); +} + +inline +DeviceInfo::DeviceInfo(int device_id) +{ + CV_Assert( device_id >= 0 && device_id < getCudaEnabledDeviceCount() ); + device_id_ = device_id; +} + +inline +int DeviceInfo::deviceID() const +{ + return device_id_; +} + +inline +size_t DeviceInfo::freeMemory() const +{ + size_t _totalMemory = 0, _freeMemory = 0; + queryMemory(_totalMemory, _freeMemory); + return _freeMemory; +} + +inline +size_t DeviceInfo::totalMemory() const +{ + size_t _totalMemory = 0, _freeMemory = 0; + queryMemory(_totalMemory, _freeMemory); + return _totalMemory; +} + +inline +bool DeviceInfo::supports(FeatureSet feature_set) const +{ + int version = majorVersion() * 10 + minorVersion(); + return version >= feature_set; +} + + +}} // namespace cv { namespace cuda { + +//=================================================================================== +// Mat +//=================================================================================== + +namespace cv { + +inline +Mat::Mat(const cuda::GpuMat& m) + : flags(0), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) +{ + m.download(*this); +} + +} + +//! @endcond + +#endif // OPENCV_CORE_CUDAINL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/block.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/block.hpp new file mode 100644 index 00000000..c277f0ea --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/block.hpp @@ -0,0 +1,211 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_DEVICE_BLOCK_HPP +#define OPENCV_CUDA_DEVICE_BLOCK_HPP + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + struct Block + { + static __device__ __forceinline__ unsigned int id() + { + return blockIdx.x; + } + + static __device__ __forceinline__ unsigned int stride() + { + return blockDim.x * blockDim.y * blockDim.z; + } + + static __device__ __forceinline__ void sync() + { + __syncthreads(); + } + + static __device__ __forceinline__ int flattenedThreadId() + { + return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; + } + + template + static __device__ __forceinline__ void fill(It beg, It end, const T& value) + { + int STRIDE = stride(); + It t = beg + flattenedThreadId(); + + for(; t < end; t += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) + { + int STRIDE = stride(); + int tid = flattenedThreadId(); + value += tid; + + for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out) + { + int STRIDE = stride(); + InIt t = beg + flattenedThreadId(); + OutIt o = out + (t - beg); + + for(; t < end; t += STRIDE, o += STRIDE) + *o = *t; + } + + template + static __device__ __forceinline__ void transform(InIt beg, InIt end, OutIt out, UnOp op) + { + int STRIDE = stride(); + InIt t = beg + flattenedThreadId(); + OutIt o = out + (t - beg); + + for(; t < end; t += STRIDE, o += STRIDE) + *o = op(*t); + } + + template + static __device__ __forceinline__ void transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op) + { + int STRIDE = stride(); + InIt1 t1 = beg1 + flattenedThreadId(); + InIt2 t2 = beg2 + flattenedThreadId(); + OutIt o = out + (t1 - beg1); + + for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE) + *o = op(*t1, *t2); + } + + template + static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op) + { + int tid = flattenedThreadId(); + T val = buffer[tid]; + + if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); } + if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); } + if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); } + if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); } + + if (tid < 32) + { + if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); } + if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); } + if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); } + if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); } + if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); } + if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); } + } + } + + template + static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op) + { + int tid = flattenedThreadId(); + T val = buffer[tid] = init; + __syncthreads(); + + if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); } + if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); } + if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); } + if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); } + + if (tid < 32) + { + if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); } + if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); } + if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); } + if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); } + if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); } + if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); } + } + __syncthreads(); + return buffer[0]; + } + + template + static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op) + { + int ftid = flattenedThreadId(); + int sft = stride(); + + if (sft < n) + { + for (unsigned int i = sft + ftid; i < n; i += sft) + data[ftid] = op(data[ftid], data[i]); + + __syncthreads(); + + n = sft; + } + + while (n > 1) + { + unsigned int half = n/2; + + if (ftid < half) + data[ftid] = op(data[ftid], data[n - ftid - 1]); + + __syncthreads(); + + n = n - half; + } + } + }; +}}} + +//! @endcond + +#endif /* OPENCV_CUDA_DEVICE_BLOCK_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/border_interpolate.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/border_interpolate.hpp new file mode 100644 index 00000000..874f705b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/border_interpolate.hpp @@ -0,0 +1,722 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_BORDER_INTERPOLATE_HPP +#define OPENCV_CUDA_BORDER_INTERPOLATE_HPP + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + ////////////////////////////////////////////////////////////// + // BrdConstant + + template struct BrdRowConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits::all(0)) : width(width_), val(val_) {} + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return x >= 0 ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return x < width ? saturate_cast(data[x]) : val; + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return (x >= 0 && x < width) ? saturate_cast(data[x]) : val; + } + + int width; + D val; + }; + + template struct BrdColConstant + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits::all(0)) : height(height_), val(val_) {} + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return y >= 0 ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return y < height ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return (y >= 0 && y < height) ? saturate_cast(*(const T*)((const char*)data + y * step)) : val; + } + + int height; + D val; + }; + + template struct BrdConstant + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits::all(0)) : height(height_), width(width_), val(val_) + { + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(((const T*)((const uchar*)data + y * step))[x]) : val; + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + int height; + int width; + D val; + }; + + ////////////////////////////////////////////////////////////// + // BrdReplicate + + template struct BrdRowReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + int last_col; + }; + + template struct BrdColReplicate + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const T*)((const char*)data + idx_row(y) * step)); + } + + int last_row; + }; + + template struct BrdReplicate + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::max(y, 0); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::min(y, last_row); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::max(x, 0); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::min(x, last_col); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + int last_row; + int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect101 + + template struct BrdRowReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + int last_col; + }; + + template struct BrdColReflect101 + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + int last_row; + }; + + template struct BrdReflect101 + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return ::abs(y) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return ::abs(x) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + int last_row; + int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdReflect + + template struct BrdRowReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(::abs(x) - (x < 0)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + int last_col; + }; + + template struct BrdColReflect + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {} + template __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(::abs(y) - (y < 0)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + int last_row; + }; + + template struct BrdReflect + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {} + template __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (::abs(y) - (y < 0)) % (last_row + 1); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/; + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_low(idx_row_high(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (::abs(x) - (x < 0)) % (last_col + 1); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (last_col - ::abs(last_col - x) + (x > last_col)); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_low(idx_col_high(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + int last_row; + int last_col; + }; + + ////////////////////////////////////////////////////////////// + // BrdWrap + + template struct BrdRowWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {} + template __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {} + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) * x + (x >= width) * (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at_low(int x, const T* data) const + { + return saturate_cast(data[idx_col_low(x)]); + } + + template __device__ __forceinline__ D at_high(int x, const T* data) const + { + return saturate_cast(data[idx_col_high(x)]); + } + + template __device__ __forceinline__ D at(int x, const T* data) const + { + return saturate_cast(data[idx_col(x)]); + } + + int width; + }; + + template struct BrdColWrap + { + typedef D result_type; + + explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {} + template __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {} + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) * y + (y >= height) * (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + template __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_low(y) * step)); + } + + template __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row_high(y) * step)); + } + + template __device__ __forceinline__ D at(int y, const T* data, size_t step) const + { + return saturate_cast(*(const D*)((const char*)data + idx_row(y) * step)); + } + + int height; + }; + + template struct BrdWrap + { + typedef D result_type; + + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) : + height(height_), width(width_) + { + } + template + __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) : + height(height_), width(width_) + { + } + + __device__ __forceinline__ int idx_row_low(int y) const + { + return (y >= 0) ? y : (y - ((y - height + 1) / height) * height); + } + + __device__ __forceinline__ int idx_row_high(int y) const + { + return (y < height) ? y : (y % height); + } + + __device__ __forceinline__ int idx_row(int y) const + { + return idx_row_high(idx_row_low(y)); + } + + __device__ __forceinline__ int idx_col_low(int x) const + { + return (x >= 0) ? x : (x - ((x - width + 1) / width) * width); + } + + __device__ __forceinline__ int idx_col_high(int x) const + { + return (x < width) ? x : (x % width); + } + + __device__ __forceinline__ int idx_col(int x) const + { + return idx_col_high(idx_col_low(x)); + } + + template __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const + { + return saturate_cast(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]); + } + + template __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const + { + return saturate_cast(src(idx_row(y), idx_col(x))); + } + + int height; + int width; + }; + + ////////////////////////////////////////////////////////////// + // BorderReader + + template struct BorderReader + { + typedef typename B::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {} + + __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const + { + return b.at(y, x, ptr); + } + + Ptr2D ptr; + B b; + }; + + // under win32 there is some bug with templated types that passed as kernel parameters + // with this specialization all works fine + template struct BorderReader< Ptr2D, BrdConstant > + { + typedef typename BrdConstant::result_type elem_type; + typedef typename Ptr2D::index_type index_type; + + __host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant& b) : + src(src_), height(b.height), width(b.width), val(b.val) + { + } + + __device__ __forceinline__ D operator ()(index_type y, index_type x) const + { + return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast(src(y, x)) : val; + } + + Ptr2D src; + int height; + int width; + D val; + }; +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_BORDER_INTERPOLATE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/color.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/color.hpp new file mode 100644 index 00000000..dcce2802 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/color.hpp @@ -0,0 +1,309 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_COLOR_HPP +#define OPENCV_CUDA_COLOR_HPP + +#include "detail/color_detail.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + // All OPENCV_CUDA_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements + // template class ColorSpace1_to_ColorSpace2_traits + // { + // typedef ... functor_type; + // static __host__ __device__ functor_type create_functor(); + // }; + + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5) + OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5) + OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6) + + #undef OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3) + OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4) + + #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5) + OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6) + + #undef OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5) + OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6) + + #undef OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS + + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS + + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS + + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS + + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS + + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0) + OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0) + + #undef OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0) + + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS + + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0) + + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0) + OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0) + + #undef OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS + + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0) + + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0) + OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0) + + #undef OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS + + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0) + + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0) + OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0) + + #undef OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_COLOR_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/common.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/common.hpp new file mode 100644 index 00000000..80b2ff08 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/common.hpp @@ -0,0 +1,123 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_COMMON_HPP +#define OPENCV_CUDA_COMMON_HPP + +#include +#include "opencv2/core/cuda_types.hpp" +#include "opencv2/core/cvdef.h" +#include "opencv2/core/base.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +#ifndef CV_PI_F + #ifndef CV_PI + #define CV_PI_F 3.14159265f + #else + #define CV_PI_F ((float)CV_PI) + #endif +#endif + +namespace cv { namespace cuda { + static inline void checkCudaError(cudaError_t err, const char* file, const int line, const char* func) + { + if (cudaSuccess != err) + cv::error(cv::Error::GpuApiCallError, cudaGetErrorString(err), func, file, line); + } +}} + +#ifndef cudaSafeCall + #define cudaSafeCall(expr) cv::cuda::checkCudaError(expr, __FILE__, __LINE__, CV_Func) +#endif + +namespace cv { namespace cuda +{ + template static inline bool isAligned(const T* ptr, size_t size) + { + return reinterpret_cast(ptr) % size == 0; + } + + static inline bool isAligned(size_t step, size_t size) + { + return step % size == 0; + } +}} + +namespace cv { namespace cuda +{ + namespace device + { + __host__ __device__ __forceinline__ int divUp(int total, int grain) + { + return (total + grain - 1) / grain; + } + + template inline void bindTexture(const textureReference* tex, const PtrStepSz& img) + { + cudaChannelFormatDesc desc = cudaCreateChannelDesc(); + cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) ); + } + + template inline void createTextureObjectPitch2D(cudaTextureObject_t* tex, PtrStepSz& img, const cudaTextureDesc& texDesc) + { + cudaResourceDesc resDesc; + memset(&resDesc, 0, sizeof(resDesc)); + resDesc.resType = cudaResourceTypePitch2D; + resDesc.res.pitch2D.devPtr = static_cast(img.ptr()); + resDesc.res.pitch2D.height = img.rows; + resDesc.res.pitch2D.width = img.cols; + resDesc.res.pitch2D.pitchInBytes = img.step; + resDesc.res.pitch2D.desc = cudaCreateChannelDesc(); + + cudaSafeCall( cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL) ); + } + } +}} + +//! @endcond + +#endif // OPENCV_CUDA_COMMON_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/datamov_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/datamov_utils.hpp new file mode 100644 index 00000000..6820d0fd --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/datamov_utils.hpp @@ -0,0 +1,113 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_DATAMOV_UTILS_HPP +#define OPENCV_CUDA_DATAMOV_UTILS_HPP + +#include "common.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 + + // for Fermi memory space is detected automatically + template struct ForceGlob + { + __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; } + }; + + #else // __CUDA_ARCH__ >= 200 + + #if defined(_WIN64) || defined(__LP64__) + // 64-bit register modifier for inlined asm + #define OPENCV_CUDA_ASM_PTR "l" + #else + // 32-bit register modifier for inlined asm + #define OPENCV_CUDA_ASM_PTR "r" + #endif + + template struct ForceGlob; + + #define OPENCV_CUDA_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \ + } \ + }; + + #define OPENCV_CUDA_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \ + template <> struct ForceGlob \ + { \ + __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \ + { \ + asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast(&val)) : OPENCV_CUDA_ASM_PTR(ptr + offset)); \ + } \ + }; + + OPENCV_CUDA_DEFINE_FORCE_GLOB_B(uchar, u8) + OPENCV_CUDA_DEFINE_FORCE_GLOB_B(schar, s8) + OPENCV_CUDA_DEFINE_FORCE_GLOB_B(char, b8) + OPENCV_CUDA_DEFINE_FORCE_GLOB (ushort, u16, h) + OPENCV_CUDA_DEFINE_FORCE_GLOB (short, s16, h) + OPENCV_CUDA_DEFINE_FORCE_GLOB (uint, u32, r) + OPENCV_CUDA_DEFINE_FORCE_GLOB (int, s32, r) + OPENCV_CUDA_DEFINE_FORCE_GLOB (float, f32, f) + OPENCV_CUDA_DEFINE_FORCE_GLOB (double, f64, d) + + #undef OPENCV_CUDA_DEFINE_FORCE_GLOB + #undef OPENCV_CUDA_DEFINE_FORCE_GLOB_B + #undef OPENCV_CUDA_ASM_PTR + + #endif // __CUDA_ARCH__ >= 200 +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_DATAMOV_UTILS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/color_detail.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/color_detail.hpp new file mode 100644 index 00000000..bfb40550 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/color_detail.hpp @@ -0,0 +1,1980 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_COLOR_DETAIL_HPP +#define OPENCV_CUDA_COLOR_DETAIL_HPP + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../saturate_cast.hpp" +#include "../limits.hpp" +#include "../functional.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + #ifndef CV_DESCALE + #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) + #endif + + namespace color_detail + { + template struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ T max() { return numeric_limits::max(); } + static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); } + }; + + template<> struct ColorChannel + { + typedef float worktype_f; + static __device__ __forceinline__ float max() { return 1.f; } + static __device__ __forceinline__ float half() { return 0.5f; } + }; + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + } + + template static __device__ __forceinline__ void setAlpha(typename TypeVec::vec_type& vec, T val) + { + vec.w = val; + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return ColorChannel::max(); + } + + template static __device__ __forceinline__ T getAlpha(const typename TypeVec::vec_type& vec) + { + return vec.w; + } + + enum + { + yuv_shift = 14, + xyz_shift = 12, + R2Y = 4899, + G2Y = 9617, + B2Y = 1868, + BLOCK_SIZE = 256 + }; + } + +////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// + + namespace color_detail + { + template struct RGB2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + dst.x = (&src.x)[bidx]; + dst.y = src.y; + dst.z = (&src.x)[bidx^2]; + setAlpha(dst, getAlpha(src)); + + return dst; + } + + __host__ __device__ __forceinline__ RGB2RGB() {} + __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {} + }; + + template <> struct RGB2RGB : unary_function + { + __device__ uint operator()(uint src) const + { + uint dst = 0; + + dst |= (0xffu & (src >> 16)); + dst |= (0xffu & (src >> 8)) << 8; + dst |= (0xffu & (src)) << 16; + dst |= (0xffu & (src >> 24)) << 24; + + return dst; + } + + __host__ __device__ __forceinline__ RGB2RGB() {} + __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB ////////// + + namespace color_detail + { + template struct RGB2RGB5x5Converter; + template struct RGB2RGB5x5Converter<6, bidx> + { + static __device__ __forceinline__ ushort cvt(const uchar3& src) + { + return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~3) << 3) | (((&src.x)[bidx^2] & ~7) << 8)); + } + + static __device__ __forceinline__ ushort cvt(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8)); + } + }; + + template struct RGB2RGB5x5Converter<5, bidx> + { + static __device__ __forceinline__ ushort cvt(const uchar3& src) + { + return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~7) << 2) | (((&src.x)[bidx^2] & ~7) << 7)); + } + + static __device__ __forceinline__ ushort cvt(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + uint a = 0xffu & (src >> 24); + return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7) | (a * 0x8000)); + } + }; + + template struct RGB2RGB5x5; + + template struct RGB2RGB5x5<3, bidx,green_bits> : unary_function + { + __device__ __forceinline__ ushort operator()(const uchar3& src) const + { + return RGB2RGB5x5Converter::cvt(src); + } + + __host__ __device__ __forceinline__ RGB2RGB5x5() {} + __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {} + }; + + template struct RGB2RGB5x5<4, bidx,green_bits> : unary_function + { + __device__ __forceinline__ ushort operator()(uint src) const + { + return RGB2RGB5x5Converter::cvt(src); + } + + __host__ __device__ __forceinline__ RGB2RGB5x5() {} + __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct RGB5x52RGBConverter; + + template struct RGB5x52RGBConverter<5, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (&dst.x)[bidx] = src << 3; + dst.y = (src >> 2) & ~7; + (&dst.x)[bidx ^ 2] = (src >> 7) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu & ((src >> 2) & ~7)) << 8; + dst |= (0xffu & ((src >> 7) & ~7)) << ((bidx ^ 2) * 8); + dst |= ((src & 0x8000) * 0xffu) << 24; + } + }; + + template struct RGB5x52RGBConverter<6, bidx> + { + static __device__ __forceinline__ void cvt(uint src, uchar3& dst) + { + (&dst.x)[bidx] = src << 3; + dst.y = (src >> 3) & ~3; + (&dst.x)[bidx ^ 2] = (src >> 8) & ~7; + } + + static __device__ __forceinline__ void cvt(uint src, uint& dst) + { + dst = 0xffu << 24; + + dst |= (0xffu & (src << 3)) << (bidx * 8); + dst |= (0xffu &((src >> 3) & ~3)) << 8; + dst |= (0xffu & ((src >> 8) & ~7)) << ((bidx ^ 2) * 8); + } + }; + + template struct RGB5x52RGB; + + template struct RGB5x52RGB<3, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uchar3 operator()(ushort src) const + { + uchar3 dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + __host__ __device__ __forceinline__ RGB5x52RGB() {} + __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {} + + }; + + template struct RGB5x52RGB<4, bidx, green_bits> : unary_function + { + __device__ __forceinline__ uint operator()(ushort src) const + { + uint dst; + RGB5x52RGBConverter::cvt(src, dst); + return dst; + } + __host__ __device__ __forceinline__ RGB5x52RGB() {} + __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB5x52RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Grayscale to Color //////////////////////////////// + + namespace color_detail + { + template struct Gray2RGB : unary_function::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(T src) const + { + typename TypeVec::vec_type dst; + + dst.z = dst.y = dst.x = src; + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ Gray2RGB() {} + __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {} + }; + + template <> struct Gray2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + uint dst = 0xffu << 24; + + dst |= src; + dst |= src << 8; + dst |= src << 16; + + return dst; + } + __host__ __device__ __forceinline__ Gray2RGB() {} + __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::Gray2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template struct Gray2RGB5x5Converter; + template<> struct Gray2RGB5x5Converter<6> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8)); + } + }; + + template<> struct Gray2RGB5x5Converter<5> + { + static __device__ __forceinline__ ushort cvt(uint t) + { + t >>= 3; + return (ushort)(t | (t << 5) | (t << 10)); + } + }; + + template struct Gray2RGB5x5 : unary_function + { + __device__ __forceinline__ ushort operator()(uint src) const + { + return Gray2RGB5x5Converter::cvt(src); + } + + __host__ __device__ __forceinline__ Gray2RGB5x5() {} + __host__ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::Gray2RGB5x5 functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////// Color to Grayscale //////////////////////////////// + + namespace color_detail + { + template struct RGB5x52GrayConverter; + template <> struct RGB5x52GrayConverter<6> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift); + } + }; + + template <> struct RGB5x52GrayConverter<5> + { + static __device__ __forceinline__ uchar cvt(uint t) + { + return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 2) & 0xf8) * G2Y + ((t >> 7) & 0xf8) * R2Y, yuv_shift); + } + }; + + template struct RGB5x52Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB5x52GrayConverter::cvt(src); + } + __host__ __device__ __forceinline__ RGB5x52Gray() {} + __host__ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \ + struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB5x52Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template static __device__ __forceinline__ T RGB2GrayConvert(const T* src) + { + return (T)CV_DESCALE((unsigned)(src[bidx] * B2Y + src[1] * G2Y + src[bidx^2] * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ uchar RGB2GrayConvert(uint src) + { + uint b = 0xffu & (src >> (bidx * 8)); + uint g = 0xffu & (src >> 8); + uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift); + } + + template static __device__ __forceinline__ float RGB2GrayConvert(const float* src) + { + return src[bidx] * 0.114f + src[1] * 0.587f + src[bidx^2] * 0.299f; + } + + template struct RGB2Gray : unary_function::vec_type, T> + { + __device__ __forceinline__ T operator()(const typename TypeVec::vec_type& src) const + { + return RGB2GrayConvert(&src.x); + } + __host__ __device__ __forceinline__ RGB2Gray() {} + __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {} + }; + + template struct RGB2Gray : unary_function + { + __device__ __forceinline__ uchar operator()(uint src) const + { + return RGB2GrayConvert(src); + } + __host__ __device__ __forceinline__ RGB2Gray() {} + __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2Gray functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YUV ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f }; + __constant__ int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 }; + + template static __device__ void RGB2YUVConvert(const T* src, D& dst) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE(src[0] * c_RGB2YUVCoeffs_i[bidx^2] + src[1] * c_RGB2YUVCoeffs_i[1] + src[2] * c_RGB2YUVCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YUVCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YUVCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast(Y); + dst.y = saturate_cast(Cr); + dst.z = saturate_cast(Cb); + } + + template static __device__ __forceinline__ void RGB2YUVConvert(const float* src, D& dst) + { + dst.x = src[0] * c_RGB2YUVCoeffs_f[bidx^2] + src[1] * c_RGB2YUVCoeffs_f[1] + src[2] * c_RGB2YUVCoeffs_f[bidx]; + dst.y = (src[bidx^2] - dst.x) * c_RGB2YUVCoeffs_f[3] + ColorChannel::half(); + dst.z = (src[bidx] - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YUV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YUVConvert(&src.x, dst); + return dst; + } + __host__ __device__ __forceinline__ RGB2YUV() {} + __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2YUV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f }; + __constant__ int c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 }; + + template static __device__ void YUV2RGBConvert(const T& src, D* dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[2] + + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = src.x + CV_DESCALE((src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + } + + template static __device__ uint YUV2RGBConvert(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift); + + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YUV2RGBCoeffs_i[2] + + (y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift); + + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void YUV2RGBConvert(const T& src, float* dst) + { + dst[bidx] = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[3]; + + dst[1] = src.x + (src.z - ColorChannel::half()) * c_YUV2RGBCoeffs_f[2] + + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[1]; + + dst[bidx^2] = src.x + (src.y - ColorChannel::half()) * c_YUV2RGBCoeffs_f[0]; + } + + template struct YUV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YUV2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ YUV2RGB() {} + __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {} + }; + + template struct YUV2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YUV2RGBConvert(src); + } + __host__ __device__ __forceinline__ YUV2RGB() {} + __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::YUV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> YCrCb ////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + __constant__ int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241}; + + template static __device__ void RGB2YCrCbConvert(const T* src, D& dst) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE(src[0] * c_RGB2YCrCbCoeffs_i[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_i[1] + src[2] * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE((src[bidx^2] - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE((src[bidx] - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + dst.x = saturate_cast(Y); + dst.y = saturate_cast(Cr); + dst.z = saturate_cast(Cb); + } + + template static __device__ uint RGB2YCrCbConvert(uint src) + { + const int delta = ColorChannel::half() * (1 << yuv_shift); + + const int Y = CV_DESCALE((0xffu & src) * c_RGB2YCrCbCoeffs_i[bidx^2] + (0xffu & (src >> 8)) * c_RGB2YCrCbCoeffs_i[1] + (0xffu & (src >> 16)) * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift); + const int Cr = CV_DESCALE(((0xffu & (src >> ((bidx ^ 2) * 8))) - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift); + const int Cb = CV_DESCALE(((0xffu & (src >> (bidx * 8))) - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift); + + uint dst = 0; + + dst |= saturate_cast(Y); + dst |= saturate_cast(Cr) << 8; + dst |= saturate_cast(Cb) << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2YCrCbConvert(const float* src, D& dst) + { + dst.x = src[0] * c_RGB2YCrCbCoeffs_f[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_f[1] + src[2] * c_RGB2YCrCbCoeffs_f[bidx]; + dst.y = (src[bidx^2] - dst.x) * c_RGB2YCrCbCoeffs_f[3] + ColorChannel::half(); + dst.z = (src[bidx] - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel::half(); + } + + template struct RGB2YCrCb + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + RGB2YCrCbConvert(&src.x, dst); + return dst; + } + __host__ __device__ __forceinline__ RGB2YCrCb() {} + __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {} + }; + + template struct RGB2YCrCb : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return RGB2YCrCbConvert(src); + } + + __host__ __device__ __forceinline__ RGB2YCrCb() {} + __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2YCrCb functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_YCrCb2RGBCoeffs_f[5] = {1.403f, -0.714f, -0.344f, 1.773f}; + __constant__ int c_YCrCb2RGBCoeffs_i[5] = {22987, -11698, -5636, 29049}; + + template static __device__ void YCrCb2RGBConvert(const T& src, D* dst) + { + const int b = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = src.x + CV_DESCALE((src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[2] + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = src.x + CV_DESCALE((src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + dst[bidx] = saturate_cast(b); + dst[1] = saturate_cast(g); + dst[bidx^2] = saturate_cast(r); + } + + template static __device__ uint YCrCb2RGBConvert(uint src) + { + const int x = 0xff & (src); + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const int b = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift); + const int g = x + CV_DESCALE((z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[2] + (y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift); + const int r = x + CV_DESCALE((y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(b) << (bidx * 8); + dst |= saturate_cast(g) << 8; + dst |= saturate_cast(r) << ((bidx ^ 2) * 8); + + return dst; + } + + template __device__ __forceinline__ void YCrCb2RGBConvert(const T& src, float* dst) + { + dst[bidx] = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[3]; + dst[1] = src.x + (src.z - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[2] + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[1]; + dst[bidx^2] = src.x + (src.y - ColorChannel::half()) * c_YCrCb2RGBCoeffs_f[0]; + } + + template struct YCrCb2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + YCrCb2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ YCrCb2RGB() {} + __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {} + }; + + template struct YCrCb2RGB : unary_function + { + __device__ __forceinline__ uint operator ()(uint src) const + { + return YCrCb2RGBConvert(src); + } + __host__ __device__ __forceinline__ YCrCb2RGB() {} + __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::YCrCb2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// + + namespace color_detail + { + __constant__ float c_RGB2XYZ_D65f[9] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f }; + __constant__ int c_RGB2XYZ_D65i[9] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 }; + + template static __device__ __forceinline__ void RGB2XYZConvert(const T* src, D& dst) + { + dst.z = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[6] + src[1] * c_RGB2XYZ_D65i[7] + src[bidx] * c_RGB2XYZ_D65i[8], xyz_shift)); + dst.x = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[0] + src[1] * c_RGB2XYZ_D65i[1] + src[bidx] * c_RGB2XYZ_D65i[2], xyz_shift)); + dst.y = saturate_cast(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[3] + src[1] * c_RGB2XYZ_D65i[4] + src[bidx] * c_RGB2XYZ_D65i[5], xyz_shift)); + } + + template static __device__ __forceinline__ uint RGB2XYZConvert(uint src) + { + const uint b = 0xffu & (src >> (bidx * 8)); + const uint g = 0xffu & (src >> 8); + const uint r = 0xffu & (src >> ((bidx ^ 2) * 8)); + + const uint x = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift)); + const uint y = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift)); + const uint z = saturate_cast(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift)); + + uint dst = 0; + + dst |= x; + dst |= y << 8; + dst |= z << 16; + + return dst; + } + + template static __device__ __forceinline__ void RGB2XYZConvert(const float* src, D& dst) + { + dst.x = src[bidx^2] * c_RGB2XYZ_D65f[0] + src[1] * c_RGB2XYZ_D65f[1] + src[bidx] * c_RGB2XYZ_D65f[2]; + dst.y = src[bidx^2] * c_RGB2XYZ_D65f[3] + src[1] * c_RGB2XYZ_D65f[4] + src[bidx] * c_RGB2XYZ_D65f[5]; + dst.z = src[bidx^2] * c_RGB2XYZ_D65f[6] + src[1] * c_RGB2XYZ_D65f[7] + src[bidx] * c_RGB2XYZ_D65f[8]; + } + + template struct RGB2XYZ + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2XYZConvert(&src.x, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2XYZ() {} + __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {} + }; + + template struct RGB2XYZ : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2XYZConvert(src); + } + __host__ __device__ __forceinline__ RGB2XYZ() {} + __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2XYZ functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_XYZ2sRGB_D65f[9] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f }; + __constant__ int c_XYZ2sRGB_D65i[9] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 }; + + template static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, D* dst) + { + dst[bidx^2] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[0] + src.y * c_XYZ2sRGB_D65i[1] + src.z * c_XYZ2sRGB_D65i[2], xyz_shift)); + dst[1] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift)); + dst[bidx] = saturate_cast(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift)); + } + + template static __device__ __forceinline__ uint XYZ2RGBConvert(uint src) + { + const int x = 0xff & src; + const int y = 0xff & (src >> 8); + const int z = 0xff & (src >> 16); + + const uint r = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[0] + y * c_XYZ2sRGB_D65i[1] + z * c_XYZ2sRGB_D65i[2], xyz_shift)); + const uint g = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[3] + y * c_XYZ2sRGB_D65i[4] + z * c_XYZ2sRGB_D65i[5], xyz_shift)); + const uint b = saturate_cast(CV_DESCALE(x * c_XYZ2sRGB_D65i[6] + y * c_XYZ2sRGB_D65i[7] + z * c_XYZ2sRGB_D65i[8], xyz_shift)); + + uint dst = 0xffu << 24; + + dst |= b << (bidx * 8); + dst |= g << 8; + dst |= r << ((bidx ^ 2) * 8); + + return dst; + } + + template static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, float* dst) + { + dst[bidx^2] = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2]; + dst[1] = src.x * c_XYZ2sRGB_D65f[3] + src.y * c_XYZ2sRGB_D65f[4] + src.z * c_XYZ2sRGB_D65f[5]; + dst[bidx] = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8]; + } + + template struct XYZ2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + XYZ2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ XYZ2RGB() {} + __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {} + }; + + template struct XYZ2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return XYZ2RGBConvert(src); + } + __host__ __device__ __forceinline__ XYZ2RGB() {} + __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::XYZ2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +////////////////////////////////////// RGB <-> HSV /////////////////////////////////////// + + namespace color_detail + { + __constant__ int c_HsvDivTable [256] = {0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782, 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221, 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777, 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569, 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249, 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370, 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096}; + __constant__ int c_HsvDivTable180[256] = {0, 122880, 61440, 40960, 30720, 24576, 20480, 17554, 15360, 13653, 12288, 11171, 10240, 9452, 8777, 8192, 7680, 7228, 6827, 6467, 6144, 5851, 5585, 5343, 5120, 4915, 4726, 4551, 4389, 4237, 4096, 3964, 3840, 3724, 3614, 3511, 3413, 3321, 3234, 3151, 3072, 2997, 2926, 2858, 2793, 2731, 2671, 2614, 2560, 2508, 2458, 2409, 2363, 2318, 2276, 2234, 2194, 2156, 2119, 2083, 2048, 2014, 1982, 1950, 1920, 1890, 1862, 1834, 1807, 1781, 1755, 1731, 1707, 1683, 1661, 1638, 1617, 1596, 1575, 1555, 1536, 1517, 1499, 1480, 1463, 1446, 1429, 1412, 1396, 1381, 1365, 1350, 1336, 1321, 1307, 1293, 1280, 1267, 1254, 1241, 1229, 1217, 1205, 1193, 1182, 1170, 1159, 1148, 1138, 1127, 1117, 1107, 1097, 1087, 1078, 1069, 1059, 1050, 1041, 1033, 1024, 1016, 1007, 999, 991, 983, 975, 968, 960, 953, 945, 938, 931, 924, 917, 910, 904, 897, 890, 884, 878, 871, 865, 859, 853, 847, 842, 836, 830, 825, 819, 814, 808, 803, 798, 793, 788, 783, 778, 773, 768, 763, 759, 754, 749, 745, 740, 736, 731, 727, 723, 719, 714, 710, 706, 702, 698, 694, 690, 686, 683, 679, 675, 671, 668, 664, 661, 657, 654, 650, 647, 643, 640, 637, 633, 630, 627, 624, 621, 617, 614, 611, 608, 605, 602, 599, 597, 594, 591, 588, 585, 582, 580, 577, 574, 572, 569, 566, 564, 561, 559, 556, 554, 551, 549, 546, 544, 541, 539, 537, 534, 532, 530, 527, 525, 523, 521, 518, 516, 514, 512, 510, 508, 506, 504, 502, 500, 497, 495, 493, 492, 490, 488, 486, 484, 482}; + __constant__ int c_HsvDivTable256[256] = {0, 174763, 87381, 58254, 43691, 34953, 29127, 24966, 21845, 19418, 17476, 15888, 14564, 13443, 12483, 11651, 10923, 10280, 9709, 9198, 8738, 8322, 7944, 7598, 7282, 6991, 6722, 6473, 6242, 6026, 5825, 5638, 5461, 5296, 5140, 4993, 4855, 4723, 4599, 4481, 4369, 4263, 4161, 4064, 3972, 3884, 3799, 3718, 3641, 3567, 3495, 3427, 3361, 3297, 3236, 3178, 3121, 3066, 3013, 2962, 2913, 2865, 2819, 2774, 2731, 2689, 2648, 2608, 2570, 2533, 2497, 2461, 2427, 2394, 2362, 2330, 2300, 2270, 2241, 2212, 2185, 2158, 2131, 2106, 2081, 2056, 2032, 2009, 1986, 1964, 1942, 1920, 1900, 1879, 1859, 1840, 1820, 1802, 1783, 1765, 1748, 1730, 1713, 1697, 1680, 1664, 1649, 1633, 1618, 1603, 1589, 1574, 1560, 1547, 1533, 1520, 1507, 1494, 1481, 1469, 1456, 1444, 1432, 1421, 1409, 1398, 1387, 1376, 1365, 1355, 1344, 1334, 1324, 1314, 1304, 1295, 1285, 1276, 1266, 1257, 1248, 1239, 1231, 1222, 1214, 1205, 1197, 1189, 1181, 1173, 1165, 1157, 1150, 1142, 1135, 1128, 1120, 1113, 1106, 1099, 1092, 1085, 1079, 1072, 1066, 1059, 1053, 1046, 1040, 1034, 1028, 1022, 1016, 1010, 1004, 999, 993, 987, 982, 976, 971, 966, 960, 955, 950, 945, 940, 935, 930, 925, 920, 915, 910, 906, 901, 896, 892, 887, 883, 878, 874, 869, 865, 861, 857, 853, 848, 844, 840, 836, 832, 828, 824, 820, 817, 813, 809, 805, 802, 798, 794, 791, 787, 784, 780, 777, 773, 770, 767, 763, 760, 757, 753, 750, 747, 744, 741, 737, 734, 731, 728, 725, 722, 719, 716, 713, 710, 708, 705, 702, 699, 696, 694, 691, 688, 685}; + + template static __device__ void RGB2HSVConvert(const uchar* src, D& dst) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + int b = src[bidx], g = src[1], r = src[bidx^2]; + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + dst.x = saturate_cast(h); + dst.y = (uchar)s; + dst.z = (uchar)v; + } + + template static __device__ uint RGB2HSVConvert(uint src) + { + const int hsv_shift = 12; + const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256; + + const int b = 0xff & (src >> (bidx * 8)); + const int g = 0xff & (src >> 8); + const int r = 0xff & (src >> ((bidx ^ 2) * 8)); + + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + v = ::max(v, g); + v = ::max(v, r); + vmin = ::min(vmin, g); + vmin = ::min(vmin, r); + + diff = v - vmin; + vr = (v == r) * -1; + vg = (v == g) * -1; + + s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += (h < 0) * hr; + + uint dst = 0; + + dst |= saturate_cast(h); + dst |= (0xffu & s) << 8; + dst |= (0xffu & v) << 16; + + return dst; + } + + template static __device__ void RGB2HSVConvert(const float* src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h, s, v; + + float vmin, diff; + + v = vmin = r; + v = fmax(v, g); + v = fmax(v, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = v - vmin; + s = diff / (float)(::fabs(v) + numeric_limits::epsilon()); + diff = (float)(60. / (diff + numeric_limits::epsilon())); + + h = (v == r) * (g - b) * diff; + h += (v != r && v == g) * ((b - r) * diff + 120.f); + h += (v != r && v != g) * ((r - g) * diff + 240.f); + h += (h < 0) * 360.f; + + dst.x = h * hscale; + dst.y = s; + dst.z = v; + } + + template struct RGB2HSV + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HSVConvert(&src.x, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2HSV() {} + __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {} + }; + + template struct RGB2HSV : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HSVConvert(src); + } + __host__ __device__ __forceinline__ RGB2HSV() {} + __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HSV functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HsvSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HSV2RGBConvert(const T& src, float* dst) + { + const float hscale = 6.f / hr; + + float h = src.x, s = src.y, v = src.z; + float b = v, g = v, r = v; + + if (s != 0) + { + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector = __float2int_rd(h); + h -= sector; + + if ( (unsigned)sector >= 6u ) + { + sector = 0; + h = 0.f; + } + + float tab[4]; + tab[0] = v; + tab[1] = v * (1.f - s); + tab[2] = v * (1.f - s * h); + tab[3] = v * (1.f - s * (1.f - h)); + + b = tab[c_HsvSectorData[sector][0]]; + g = tab[c_HsvSectorData[sector][1]]; + r = tab[c_HsvSectorData[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + } + + template static __device__ void HSV2RGBConvert(const T& src, uchar* dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HSV2RGBConvert(buf, &buf.x); + + dst[0] = saturate_cast(buf.x * 255.f); + dst[1] = saturate_cast(buf.y * 255.f); + dst[2] = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HSV2RGBConvert(uint src) + { + float3 buf; + + buf.x = src & 0xff; + buf.y = ((src >> 8) & 0xff) * (1.f/255.f); + buf.z = ((src >> 16) & 0xff) * (1.f/255.f); + + HSV2RGBConvert(buf, &buf.x); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HSV2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HSV2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ HSV2RGB() {} + __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {} + }; + + template struct HSV2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HSV2RGBConvert(src); + } + __host__ __device__ __forceinline__ HSV2RGB() {} + __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::HSV2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +/////////////////////////////////////// RGB <-> HLS //////////////////////////////////////// + + namespace color_detail + { + template static __device__ void RGB2HLSConvert(const float* src, D& dst) + { + const float hscale = hr * (1.f / 360.f); + + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h = 0.f, s = 0.f, l; + float vmin, vmax, diff; + + vmax = vmin = r; + vmax = fmax(vmax, g); + vmax = fmax(vmax, b); + vmin = fmin(vmin, g); + vmin = fmin(vmin, b); + + diff = vmax - vmin; + l = (vmax + vmin) * 0.5f; + + if (diff > numeric_limits::epsilon()) + { + s = (l < 0.5f) * diff / (vmax + vmin); + s += (l >= 0.5f) * diff / (2.0f - vmax - vmin); + + diff = 60.f / diff; + + h = (vmax == r) * (g - b) * diff; + h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f); + h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f); + h += (h < 0.f) * 360.f; + } + + dst.x = h * hscale; + dst.y = l; + dst.z = s; + } + + template static __device__ void RGB2HLSConvert(const uchar* src, D& dst) + { + float3 buf; + + buf.x = src[0] * (1.f / 255.f); + buf.y = src[1] * (1.f / 255.f); + buf.z = src[2] * (1.f / 255.f); + + RGB2HLSConvert(&buf.x, buf); + + dst.x = saturate_cast(buf.x); + dst.y = saturate_cast(buf.y*255.f); + dst.z = saturate_cast(buf.z*255.f); + } + + template static __device__ uint RGB2HLSConvert(uint src) + { + float3 buf; + + buf.x = (0xff & src) * (1.f / 255.f); + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + RGB2HLSConvert(&buf.x, buf); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct RGB2HLS + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2HLSConvert(&src.x, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2HLS() {} + __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {} + }; + + template struct RGB2HLS : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return RGB2HLSConvert(src); + } + __host__ __device__ __forceinline__ RGB2HLS() {} + __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2HLS functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ int c_HlsSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} }; + + template static __device__ void HLS2RGBConvert(const T& src, float* dst) + { + const float hscale = 6.0f / hr; + + float h = src.x, l = src.y, s = src.z; + float b = l, g = l, r = l; + + if (s != 0) + { + float p2 = (l <= 0.5f) * l * (1 + s); + p2 += (l > 0.5f) * (l + s - l * s); + float p1 = 2 * l - p2; + + h *= hscale; + + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + int sector; + sector = __float2int_rd(h); + + h -= sector; + + float tab[4]; + tab[0] = p2; + tab[1] = p1; + tab[2] = p1 + (p2 - p1) * (1 - h); + tab[3] = p1 + (p2 - p1) * h; + + b = tab[c_HlsSectorData[sector][0]]; + g = tab[c_HlsSectorData[sector][1]]; + r = tab[c_HlsSectorData[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + } + + template static __device__ void HLS2RGBConvert(const T& src, uchar* dst) + { + float3 buf; + + buf.x = src.x; + buf.y = src.y * (1.f / 255.f); + buf.z = src.z * (1.f / 255.f); + + HLS2RGBConvert(buf, &buf.x); + + dst[0] = saturate_cast(buf.x * 255.f); + dst[1] = saturate_cast(buf.y * 255.f); + dst[2] = saturate_cast(buf.z * 255.f); + } + + template static __device__ uint HLS2RGBConvert(uint src) + { + float3 buf; + + buf.x = 0xff & src; + buf.y = (0xff & (src >> 8)) * (1.f / 255.f); + buf.z = (0xff & (src >> 16)) * (1.f / 255.f); + + HLS2RGBConvert(buf, &buf.x); + + uint dst = 0xffu << 24; + + dst |= saturate_cast(buf.x * 255.f); + dst |= saturate_cast(buf.y * 255.f) << 8; + dst |= saturate_cast(buf.z * 255.f) << 16; + + return dst; + } + + template struct HLS2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + HLS2RGBConvert(src, &dst.x); + setAlpha(dst, ColorChannel::max()); + + return dst; + } + __host__ __device__ __forceinline__ HLS2RGB() {} + __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {} + }; + + template struct HLS2RGB : unary_function + { + __device__ __forceinline__ uint operator()(uint src) const + { + return HLS2RGBConvert(src); + } + __host__ __device__ __forceinline__ HLS2RGB() {} + __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; \ + template <> struct name ## _full_traits \ + { \ + typedef ::cv::cuda::device::color_detail::HLS2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> Lab ///////////////////////////////////// + + namespace color_detail + { + enum + { + LAB_CBRT_TAB_SIZE = 1024, + GAMMA_TAB_SIZE = 1024, + lab_shift = xyz_shift, + gamma_shift = 3, + lab_shift2 = (lab_shift + gamma_shift), + LAB_CBRT_TAB_SIZE_B = (256 * 3 / 2 * (1 << gamma_shift)) + }; + + __constant__ ushort c_sRGBGammaTab_b[] = {0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,11,11,12,13,14,15,16,17,19,20,21,22,24,25,26,28,29,31,33,34,36,38,40,41,43,45,47,49,51,54,56,58,60,63,65,68,70,73,75,78,81,83,86,89,92,95,98,101,105,108,111,115,118,121,125,129,132,136,140,144,147,151,155,160,164,168,172,176,181,185,190,194,199,204,209,213,218,223,228,233,239,244,249,255,260,265,271,277,282,288,294,300,306,312,318,324,331,337,343,350,356,363,370,376,383,390,397,404,411,418,426,433,440,448,455,463,471,478,486,494,502,510,518,527,535,543,552,560,569,578,586,595,604,613,622,631,641,650,659,669,678,688,698,707,717,727,737,747,757,768,778,788,799,809,820,831,842,852,863,875,886,897,908,920,931,943,954,966,978,990,1002,1014,1026,1038,1050,1063,1075,1088,1101,1113,1126,1139,1152,1165,1178,1192,1205,1218,1232,1245,1259,1273,1287,1301,1315,1329,1343,1357,1372,1386,1401,1415,1430,1445,1460,1475,1490,1505,1521,1536,1551,1567,1583,1598,1614,1630,1646,1662,1678,1695,1711,1728,1744,1761,1778,1794,1811,1828,1846,1863,1880,1897,1915,1933,1950,1968,1986,2004,2022,2040}; + + __device__ __forceinline__ int LabCbrt_b(int i) + { + float x = i * (1.f / (255.f * (1 << gamma_shift))); + return (1 << lab_shift2) * (x < 0.008856f ? x * 7.787f + 0.13793103448275862f : ::cbrtf(x)); + } + + template + __device__ __forceinline__ void RGB2LabConvert_b(const T& src, D& dst) + { + const int Lscale = (116 * 255 + 50) / 100; + const int Lshift = -((16 * 255 * (1 << lab_shift2) + 50) / 100); + + int B = blueIdx == 0 ? src.x : src.z; + int G = src.y; + int R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = c_sRGBGammaTab_b[B]; + G = c_sRGBGammaTab_b[G]; + R = c_sRGBGammaTab_b[R]; + } + else + { + B <<= 3; + G <<= 3; + R <<= 3; + } + + int fX = LabCbrt_b(CV_DESCALE(B * 778 + G * 1541 + R * 1777, lab_shift)); + int fY = LabCbrt_b(CV_DESCALE(B * 296 + G * 2929 + R * 871, lab_shift)); + int fZ = LabCbrt_b(CV_DESCALE(B * 3575 + G * 448 + R * 73, lab_shift)); + + int L = CV_DESCALE(Lscale * fY + Lshift, lab_shift2); + int a = CV_DESCALE(500 * (fX - fY) + 128 * (1 << lab_shift2), lab_shift2); + int b = CV_DESCALE(200 * (fY - fZ) + 128 * (1 << lab_shift2), lab_shift2); + + dst.x = saturate_cast(L); + dst.y = saturate_cast(a); + dst.z = saturate_cast(b); + } + + __device__ __forceinline__ float splineInterpolate(float x, const float* tab, int n) + { + int ix = ::min(::max(int(x), 0), n-1); + x -= ix; + tab += ix * 4; + return ((tab[3] * x + tab[2]) * x + tab[1]) * x + tab[0]; + } + + __constant__ float c_sRGBGammaTab[] = {0,7.55853e-05,0.,-7.51331e-13,7.55853e-05,7.55853e-05,-2.25399e-12,3.75665e-12,0.000151171,7.55853e-05,9.01597e-12,-6.99932e-12,0.000226756,7.55853e-05,-1.1982e-11,2.41277e-12,0.000302341,7.55853e-05,-4.74369e-12,1.19001e-11,0.000377927,7.55853e-05,3.09568e-11,-2.09095e-11,0.000453512,7.55853e-05,-3.17718e-11,1.35303e-11,0.000529097,7.55853e-05,8.81905e-12,-4.10782e-12,0.000604683,7.55853e-05,-3.50439e-12,2.90097e-12,0.000680268,7.55853e-05,5.19852e-12,-7.49607e-12,0.000755853,7.55853e-05,-1.72897e-11,2.70833e-11,0.000831439,7.55854e-05,6.39602e-11,-4.26295e-11,0.000907024,7.55854e-05,-6.39282e-11,2.70193e-11,0.000982609,7.55853e-05,1.71298e-11,-7.24017e-12,0.00105819,7.55853e-05,-4.59077e-12,1.94137e-12,0.00113378,7.55853e-05,1.23333e-12,-5.25291e-13,0.00120937,7.55853e-05,-3.42545e-13,1.59799e-13,0.00128495,7.55853e-05,1.36852e-13,-1.13904e-13,0.00136054,7.55853e-05,-2.04861e-13,2.95818e-13,0.00143612,7.55853e-05,6.82594e-13,-1.06937e-12,0.00151171,7.55853e-05,-2.52551e-12,3.98166e-12,0.00158729,7.55853e-05,9.41946e-12,-1.48573e-11,0.00166288,7.55853e-05,-3.51523e-11,5.54474e-11,0.00173846,7.55854e-05,1.3119e-10,-9.0517e-11,0.00181405,7.55854e-05,-1.40361e-10,7.37899e-11,0.00188963,7.55853e-05,8.10085e-11,-8.82272e-11,0.00196522,7.55852e-05,-1.83673e-10,1.62704e-10,0.0020408,7.55853e-05,3.04438e-10,-2.13341e-10,0.00211639,7.55853e-05,-3.35586e-10,2.25e-10,0.00219197,7.55853e-05,3.39414e-10,-2.20997e-10,0.00226756,7.55853e-05,-3.23576e-10,1.93326e-10,0.00234315,7.55853e-05,2.564e-10,-8.66446e-11,0.00241873,7.55855e-05,-3.53328e-12,-7.9578e-11,0.00249432,7.55853e-05,-2.42267e-10,1.72126e-10,0.0025699,7.55853e-05,2.74111e-10,-1.43265e-10,0.00264549,7.55854e-05,-1.55683e-10,-6.47292e-11,0.00272107,7.55849e-05,-3.4987e-10,8.67842e-10,0.00279666,7.55868e-05,2.25366e-09,-3.8723e-09,0.00287224,7.55797e-05,-9.36325e-09,1.5087e-08,0.00294783,7.56063e-05,3.58978e-08,-5.69415e-08,0.00302341,7.55072e-05,-1.34927e-07,2.13144e-07,0.003099,7.58768e-05,5.04507e-07,1.38713e-07,0.00317552,7.7302e-05,9.20646e-07,-1.55186e-07,0.00325359,7.86777e-05,4.55087e-07,4.26813e-08,0.00333276,7.97159e-05,5.83131e-07,-1.06495e-08,0.00341305,8.08502e-05,5.51182e-07,3.87467e-09,0.00349446,8.19642e-05,5.62806e-07,-1.92586e-10,0.00357698,8.30892e-05,5.62228e-07,1.0866e-09,0.00366063,8.4217e-05,5.65488e-07,5.02818e-10,0.00374542,8.53494e-05,5.66997e-07,8.60211e-10,0.00383133,8.6486e-05,5.69577e-07,7.13044e-10,0.00391839,8.76273e-05,5.71716e-07,4.78527e-10,0.00400659,8.87722e-05,5.73152e-07,1.09818e-09,0.00409594,8.99218e-05,5.76447e-07,2.50964e-10,0.00418644,9.10754e-05,5.772e-07,1.15762e-09,0.00427809,9.22333e-05,5.80672e-07,2.40865e-10,0.0043709,9.33954e-05,5.81395e-07,1.13854e-09,0.00446488,9.45616e-05,5.84811e-07,3.27267e-10,0.00456003,9.57322e-05,5.85792e-07,8.1197e-10,0.00465635,9.69062e-05,5.88228e-07,6.15823e-10,0.00475384,9.80845e-05,5.90076e-07,9.15747e-10,0.00485252,9.92674e-05,5.92823e-07,3.778e-10,0.00495238,0.000100454,5.93956e-07,8.32623e-10,0.00505343,0.000101645,5.96454e-07,4.82695e-10,0.00515567,0.000102839,5.97902e-07,9.61904e-10,0.00525911,0.000104038,6.00788e-07,3.26281e-10,0.00536375,0.00010524,6.01767e-07,9.926e-10,0.00546959,0.000106447,6.04745e-07,3.59933e-10,0.00557664,0.000107657,6.05824e-07,8.2728e-10,0.0056849,0.000108871,6.08306e-07,5.21898e-10,0.00579438,0.00011009,6.09872e-07,8.10492e-10,0.00590508,0.000111312,6.12303e-07,4.27046e-10,0.00601701,0.000112538,6.13585e-07,7.40878e-10,0.00613016,0.000113767,6.15807e-07,8.00469e-10,0.00624454,0.000115001,6.18209e-07,2.48178e-10,0.00636016,0.000116238,6.18953e-07,1.00073e-09,0.00647702,0.000117479,6.21955e-07,4.05654e-10,0.00659512,0.000118724,6.23172e-07,6.36192e-10,0.00671447,0.000119973,6.25081e-07,7.74927e-10,0.00683507,0.000121225,6.27406e-07,4.54975e-10,0.00695692,0.000122481,6.28771e-07,6.64841e-10,0.00708003,0.000123741,6.30765e-07,6.10972e-10,0.00720441,0.000125004,6.32598e-07,6.16543e-10,0.00733004,0.000126271,6.34448e-07,6.48204e-10,0.00745695,0.000127542,6.36392e-07,5.15835e-10,0.00758513,0.000128816,6.3794e-07,5.48103e-10,0.00771458,0.000130094,6.39584e-07,1.01706e-09,0.00784532,0.000131376,6.42635e-07,4.0283e-11,0.00797734,0.000132661,6.42756e-07,6.84471e-10,0.00811064,0.000133949,6.4481e-07,9.47144e-10,0.00824524,0.000135241,6.47651e-07,1.83472e-10,0.00838112,0.000136537,6.48201e-07,1.11296e-09,0.00851831,0.000137837,6.5154e-07,2.13163e-11,0.0086568,0.00013914,6.51604e-07,6.64462e-10,0.00879659,0.000140445,6.53598e-07,1.04613e-09,0.00893769,0.000141756,6.56736e-07,-1.92377e-10,0.0090801,0.000143069,6.56159e-07,1.58601e-09,0.00922383,0.000144386,6.60917e-07,-5.63754e-10,0.00936888,0.000145706,6.59226e-07,1.60033e-09,0.00951524,0.000147029,6.64027e-07,-2.49543e-10,0.00966294,0.000148356,6.63278e-07,1.26043e-09,0.00981196,0.000149687,6.67059e-07,-1.35572e-10,0.00996231,0.00015102,6.66653e-07,1.14458e-09,0.010114,0.000152357,6.70086e-07,2.13864e-10,0.010267,0.000153698,6.70728e-07,7.93856e-10,0.0104214,0.000155042,6.73109e-07,3.36077e-10,0.0105771,0.000156389,6.74118e-07,6.55765e-10,0.0107342,0.000157739,6.76085e-07,7.66211e-10,0.0108926,0.000159094,6.78384e-07,4.66116e-12,0.0110524,0.000160451,6.78398e-07,1.07775e-09,0.0112135,0.000161811,6.81631e-07,3.41023e-10,0.011376,0.000163175,6.82654e-07,3.5205e-10,0.0115398,0.000164541,6.8371e-07,1.04473e-09,0.0117051,0.000165912,6.86844e-07,1.25757e-10,0.0118717,0.000167286,6.87222e-07,3.14818e-10,0.0120396,0.000168661,6.88166e-07,1.40886e-09,0.012209,0.000170042,6.92393e-07,-3.62244e-10,0.0123797,0.000171425,6.91306e-07,9.71397e-10,0.0125518,0.000172811,6.9422e-07,2.02003e-10,0.0127253,0.0001742,6.94826e-07,1.01448e-09,0.0129002,0.000175593,6.97869e-07,3.96653e-10,0.0130765,0.00017699,6.99059e-07,1.92927e-10,0.0132542,0.000178388,6.99638e-07,6.94305e-10,0.0134333,0.00017979,7.01721e-07,7.55108e-10,0.0136138,0.000181195,7.03986e-07,1.05918e-11,0.0137957,0.000182603,7.04018e-07,1.06513e-09,0.013979,0.000184015,7.07214e-07,3.85512e-10,0.0141637,0.00018543,7.0837e-07,1.86769e-10,0.0143499,0.000186848,7.0893e-07,7.30116e-10,0.0145374,0.000188268,7.11121e-07,6.17983e-10,0.0147264,0.000189692,7.12975e-07,5.23282e-10,0.0149168,0.000191119,7.14545e-07,8.28398e-11,0.0151087,0.000192549,7.14793e-07,1.0081e-09,0.0153019,0.000193981,7.17817e-07,5.41244e-10,0.0154966,0.000195418,7.19441e-07,-3.7907e-10,0.0156928,0.000196856,7.18304e-07,1.90641e-09,0.0158903,0.000198298,7.24023e-07,-7.27387e-10,0.0160893,0.000199744,7.21841e-07,1.00317e-09,0.0162898,0.000201191,7.24851e-07,4.39949e-10,0.0164917,0.000202642,7.2617e-07,9.6234e-10,0.0166951,0.000204097,7.29057e-07,-5.64019e-10,0.0168999,0.000205554,7.27365e-07,1.29374e-09,0.0171062,0.000207012,7.31247e-07,9.77025e-10,0.017314,0.000208478,7.34178e-07,-1.47651e-09,0.0175232,0.000209942,7.29748e-07,3.06636e-09,0.0177338,0.00021141,7.38947e-07,-1.47573e-09,0.017946,0.000212884,7.3452e-07,9.7386e-10,0.0181596,0.000214356,7.37442e-07,1.30562e-09,0.0183747,0.000215835,7.41358e-07,-6.08376e-10,0.0185913,0.000217315,7.39533e-07,1.12785e-09,0.0188093,0.000218798,7.42917e-07,-1.77711e-10,0.0190289,0.000220283,7.42384e-07,1.44562e-09,0.0192499,0.000221772,7.46721e-07,-1.68825e-11,0.0194724,0.000223266,7.4667e-07,4.84533e-10,0.0196964,0.000224761,7.48124e-07,-5.85298e-11,0.0199219,0.000226257,7.47948e-07,1.61217e-09,0.0201489,0.000227757,7.52785e-07,-8.02136e-10,0.0203775,0.00022926,7.50378e-07,1.59637e-09,0.0206075,0.000230766,7.55167e-07,4.47168e-12,0.020839,0.000232276,7.55181e-07,2.48387e-10,0.021072,0.000233787,7.55926e-07,8.6474e-10,0.0213066,0.000235302,7.5852e-07,1.78299e-11,0.0215426,0.000236819,7.58573e-07,9.26567e-10,0.0217802,0.000238339,7.61353e-07,1.34529e-12,0.0220193,0.000239862,7.61357e-07,9.30659e-10,0.0222599,0.000241387,7.64149e-07,1.34529e-12,0.0225021,0.000242915,7.64153e-07,9.26567e-10,0.0227458,0.000244447,7.66933e-07,1.76215e-11,0.022991,0.00024598,7.66986e-07,8.65536e-10,0.0232377,0.000247517,7.69582e-07,2.45677e-10,0.023486,0.000249057,7.70319e-07,1.44193e-11,0.0237358,0.000250598,7.70363e-07,1.55918e-09,0.0239872,0.000252143,7.7504e-07,-6.63173e-10,0.0242401,0.000253691,7.73051e-07,1.09357e-09,0.0244946,0.000255241,7.76331e-07,1.41919e-11,0.0247506,0.000256793,7.76374e-07,7.12248e-10,0.0250082,0.000258348,7.78511e-07,8.62049e-10,0.0252673,0.000259908,7.81097e-07,-4.35061e-10,0.025528,0.000261469,7.79792e-07,8.7825e-10,0.0257902,0.000263031,7.82426e-07,6.47181e-10,0.0260541,0.000264598,7.84368e-07,2.58448e-10,0.0263194,0.000266167,7.85143e-07,1.81558e-10,0.0265864,0.000267738,7.85688e-07,8.78041e-10,0.0268549,0.000269312,7.88322e-07,3.15102e-11,0.027125,0.000270889,7.88417e-07,8.58525e-10,0.0273967,0.000272468,7.90992e-07,2.59812e-10,0.02767,0.000274051,7.91772e-07,-3.5224e-11,0.0279448,0.000275634,7.91666e-07,1.74377e-09,0.0282212,0.000277223,7.96897e-07,-1.35196e-09,0.0284992,0.000278813,7.92841e-07,1.80141e-09,0.0287788,0.000280404,7.98246e-07,-2.65629e-10,0.0290601,0.000281999,7.97449e-07,1.12374e-09,0.0293428,0.000283598,8.0082e-07,-5.04106e-10,0.0296272,0.000285198,7.99308e-07,8.92764e-10,0.0299132,0.000286799,8.01986e-07,6.58379e-10,0.0302008,0.000288405,8.03961e-07,1.98971e-10,0.0304901,0.000290014,8.04558e-07,4.08382e-10,0.0307809,0.000291624,8.05783e-07,3.01839e-11,0.0310733,0.000293236,8.05874e-07,1.33343e-09,0.0313673,0.000294851,8.09874e-07,2.2419e-10,0.031663,0.000296472,8.10547e-07,-3.67606e-10,0.0319603,0.000298092,8.09444e-07,1.24624e-09,0.0322592,0.000299714,8.13182e-07,-8.92025e-10,0.0325597,0.000301338,8.10506e-07,2.32183e-09,0.0328619,0.000302966,8.17472e-07,-9.44719e-10,0.0331657,0.000304598,8.14638e-07,1.45703e-09,0.0334711,0.000306232,8.19009e-07,-1.15805e-09,0.0337781,0.000307866,8.15535e-07,3.17507e-09,0.0340868,0.000309507,8.2506e-07,-4.09161e-09,0.0343971,0.000311145,8.12785e-07,5.74079e-09,0.0347091,0.000312788,8.30007e-07,-3.97034e-09,0.0350227,0.000314436,8.18096e-07,2.68985e-09,0.035338,0.00031608,8.26166e-07,6.61676e-10,0.0356549,0.000317734,8.28151e-07,-1.61123e-09,0.0359734,0.000319386,8.23317e-07,2.05786e-09,0.0362936,0.000321038,8.29491e-07,8.30388e-10,0.0366155,0.0003227,8.31982e-07,-1.65424e-09,0.036939,0.000324359,8.27019e-07,2.06129e-09,0.0372642,0.000326019,8.33203e-07,8.59719e-10,0.0375911,0.000327688,8.35782e-07,-1.77488e-09,0.0379196,0.000329354,8.30458e-07,2.51464e-09,0.0382498,0.000331023,8.38002e-07,-8.33135e-10,0.0385817,0.000332696,8.35502e-07,8.17825e-10,0.0389152,0.00033437,8.37956e-07,1.28718e-09,0.0392504,0.00033605,8.41817e-07,-2.2413e-09,0.0395873,0.000337727,8.35093e-07,3.95265e-09,0.0399258,0.000339409,8.46951e-07,-2.39332e-09,0.0402661,0.000341095,8.39771e-07,1.89533e-09,0.040608,0.000342781,8.45457e-07,-1.46271e-09,0.0409517,0.000344467,8.41069e-07,3.95554e-09,0.041297,0.000346161,8.52936e-07,-3.18369e-09,0.041644,0.000347857,8.43385e-07,1.32873e-09,0.0419927,0.000349548,8.47371e-07,1.59402e-09,0.0423431,0.000351248,8.52153e-07,-2.54336e-10,0.0426952,0.000352951,8.5139e-07,-5.76676e-10,0.043049,0.000354652,8.4966e-07,2.56114e-09,0.0434045,0.000356359,8.57343e-07,-2.21744e-09,0.0437617,0.000358067,8.50691e-07,2.58344e-09,0.0441206,0.000359776,8.58441e-07,-6.65826e-10,0.0444813,0.000361491,8.56444e-07,7.99218e-11,0.0448436,0.000363204,8.56684e-07,3.46063e-10,0.0452077,0.000364919,8.57722e-07,2.26116e-09,0.0455734,0.000366641,8.64505e-07,-1.94005e-09,0.045941,0.000368364,8.58685e-07,1.77384e-09,0.0463102,0.000370087,8.64007e-07,-1.43005e-09,0.0466811,0.000371811,8.59717e-07,3.94634e-09,0.0470538,0.000373542,8.71556e-07,-3.17946e-09,0.0474282,0.000375276,8.62017e-07,1.32104e-09,0.0478043,0.000377003,8.6598e-07,1.62045e-09,0.0481822,0.00037874,8.70842e-07,-3.52297e-10,0.0485618,0.000380481,8.69785e-07,-2.11211e-10,0.0489432,0.00038222,8.69151e-07,1.19716e-09,0.0493263,0.000383962,8.72743e-07,-8.52026e-10,0.0497111,0.000385705,8.70187e-07,2.21092e-09,0.0500977,0.000387452,8.76819e-07,-5.41339e-10,0.050486,0.000389204,8.75195e-07,-4.5361e-11,0.0508761,0.000390954,8.75059e-07,7.22669e-10,0.0512679,0.000392706,8.77227e-07,8.79936e-10,0.0516615,0.000394463,8.79867e-07,-5.17048e-10,0.0520568,0.000396222,8.78316e-07,1.18833e-09,0.0524539,0.000397982,8.81881e-07,-5.11022e-10,0.0528528,0.000399744,8.80348e-07,8.55683e-10,0.0532534,0.000401507,8.82915e-07,8.13562e-10,0.0536558,0.000403276,8.85356e-07,-3.84603e-10,0.05406,0.000405045,8.84202e-07,7.24962e-10,0.0544659,0.000406816,8.86377e-07,1.20986e-09,0.0548736,0.000408592,8.90006e-07,-1.83896e-09,0.0552831,0.000410367,8.84489e-07,2.42071e-09,0.0556944,0.000412143,8.91751e-07,-3.93413e-10,0.0561074,0.000413925,8.90571e-07,-8.46967e-10,0.0565222,0.000415704,8.8803e-07,3.78122e-09,0.0569388,0.000417491,8.99374e-07,-3.1021e-09,0.0573572,0.000419281,8.90068e-07,1.17658e-09,0.0577774,0.000421064,8.93597e-07,2.12117e-09,0.0581993,0.000422858,8.99961e-07,-2.21068e-09,0.0586231,0.000424651,8.93329e-07,2.9961e-09,0.0590486,0.000426447,9.02317e-07,-2.32311e-09,0.059476,0.000428244,8.95348e-07,2.57122e-09,0.0599051,0.000430043,9.03062e-07,-5.11098e-10,0.0603361,0.000431847,9.01528e-07,-5.27166e-10,0.0607688,0.000433649,8.99947e-07,2.61984e-09,0.0612034,0.000435457,9.07806e-07,-2.50141e-09,0.0616397,0.000437265,9.00302e-07,3.66045e-09,0.0620779,0.000439076,9.11283e-07,-4.68977e-09,0.0625179,0.000440885,8.97214e-07,7.64783e-09,0.0629597,0.000442702,9.20158e-07,-7.27499e-09,0.0634033,0.000444521,8.98333e-07,6.55113e-09,0.0638487,0.000446337,9.17986e-07,-4.02844e-09,0.0642959,0.000448161,9.05901e-07,2.11196e-09,0.064745,0.000449979,9.12236e-07,3.03125e-09,0.0651959,0.000451813,9.2133e-07,-6.78648e-09,0.0656486,0.000453635,9.00971e-07,9.21375e-09,0.0661032,0.000455464,9.28612e-07,-7.71684e-09,0.0665596,0.000457299,9.05462e-07,6.7522e-09,0.0670178,0.00045913,9.25718e-07,-4.3907e-09,0.0674778,0.000460968,9.12546e-07,3.36e-09,0.0679397,0.000462803,9.22626e-07,-1.59876e-09,0.0684034,0.000464644,9.1783e-07,3.0351e-09,0.068869,0.000466488,9.26935e-07,-3.09101e-09,0.0693364,0.000468333,9.17662e-07,1.8785e-09,0.0698057,0.000470174,9.23298e-07,3.02733e-09,0.0702768,0.00047203,9.3238e-07,-6.53722e-09,0.0707497,0.000473875,9.12768e-07,8.22054e-09,0.0712245,0.000475725,9.37429e-07,-3.99325e-09,0.0717012,0.000477588,9.2545e-07,3.01839e-10,0.0721797,0.00047944,9.26355e-07,2.78597e-09,0.0726601,0.000481301,9.34713e-07,-3.99507e-09,0.0731423,0.000483158,9.22728e-07,5.7435e-09,0.0736264,0.000485021,9.39958e-07,-4.07776e-09,0.0741123,0.000486888,9.27725e-07,3.11695e-09,0.0746002,0.000488753,9.37076e-07,-9.39394e-10,0.0750898,0.000490625,9.34258e-07,6.4055e-10,0.0755814,0.000492495,9.3618e-07,-1.62265e-09,0.0760748,0.000494363,9.31312e-07,5.84995e-09,0.0765701,0.000496243,9.48861e-07,-6.87601e-09,0.0770673,0.00049812,9.28233e-07,6.75296e-09,0.0775664,0.000499997,9.48492e-07,-5.23467e-09,0.0780673,0.000501878,9.32788e-07,6.73523e-09,0.0785701,0.000503764,9.52994e-07,-6.80514e-09,0.0790748,0.000505649,9.32578e-07,5.5842e-09,0.0795814,0.000507531,9.49331e-07,-6.30583e-10,0.0800899,0.000509428,9.47439e-07,-3.0618e-09,0.0806003,0.000511314,9.38254e-07,5.4273e-09,0.0811125,0.000513206,9.54536e-07,-3.74627e-09,0.0816267,0.000515104,9.43297e-07,2.10713e-09,0.0821427,0.000516997,9.49618e-07,2.76839e-09,0.0826607,0.000518905,9.57924e-07,-5.73006e-09,0.0831805,0.000520803,9.40733e-07,5.25072e-09,0.0837023,0.0005227,9.56486e-07,-3.71718e-10,0.084226,0.000524612,9.5537e-07,-3.76404e-09,0.0847515,0.000526512,9.44078e-07,7.97735e-09,0.085279,0.000528424,9.6801e-07,-5.79367e-09,0.0858084,0.000530343,9.50629e-07,2.96268e-10,0.0863397,0.000532245,9.51518e-07,4.6086e-09,0.0868729,0.000534162,9.65344e-07,-3.82947e-09,0.087408,0.000536081,9.53856e-07,3.25861e-09,0.087945,0.000537998,9.63631e-07,-1.7543e-09,0.088484,0.00053992,9.58368e-07,3.75849e-09,0.0890249,0.000541848,9.69644e-07,-5.82891e-09,0.0895677,0.00054377,9.52157e-07,4.65593e-09,0.0901124,0.000545688,9.66125e-07,2.10643e-09,0.0906591,0.000547627,9.72444e-07,-5.63099e-09,0.0912077,0.000549555,9.55551e-07,5.51627e-09,0.0917582,0.000551483,9.721e-07,-1.53292e-09,0.0923106,0.000553422,9.67501e-07,6.15311e-10,0.092865,0.000555359,9.69347e-07,-9.28291e-10,0.0934213,0.000557295,9.66562e-07,3.09774e-09,0.0939796,0.000559237,9.75856e-07,-4.01186e-09,0.0945398,0.000561177,9.6382e-07,5.49892e-09,0.095102,0.000563121,9.80317e-07,-3.08258e-09,0.0956661,0.000565073,9.71069e-07,-6.19176e-10,0.0962321,0.000567013,9.69212e-07,5.55932e-09,0.0968001,0.000568968,9.8589e-07,-6.71704e-09,0.09737,0.00057092,9.65738e-07,6.40762e-09,0.0979419,0.00057287,9.84961e-07,-4.0122e-09,0.0985158,0.000574828,9.72925e-07,2.19059e-09,0.0990916,0.000576781,9.79496e-07,2.70048e-09,0.0996693,0.000578748,9.87598e-07,-5.54193e-09,0.100249,0.000580706,9.70972e-07,4.56597e-09,0.100831,0.000582662,9.8467e-07,2.17923e-09,0.101414,0.000584638,9.91208e-07,-5.83232e-09,0.102,0.000586603,9.73711e-07,6.24884e-09,0.102588,0.000588569,9.92457e-07,-4.26178e-09,0.103177,0.000590541,9.79672e-07,3.34781e-09,0.103769,0.00059251,9.89715e-07,-1.67904e-09,0.104362,0.000594485,9.84678e-07,3.36839e-09,0.104958,0.000596464,9.94783e-07,-4.34397e-09,0.105555,0.000598441,9.81751e-07,6.55696e-09,0.106155,0.000600424,1.00142e-06,-6.98272e-09,0.106756,0.000602406,9.80474e-07,6.4728e-09,0.107359,0.000604386,9.99893e-07,-4.00742e-09,0.107965,0.000606374,9.8787e-07,2.10654e-09,0.108572,0.000608356,9.9419e-07,3.0318e-09,0.109181,0.000610353,1.00329e-06,-6.7832e-09,0.109793,0.00061234,9.82936e-07,9.1998e-09,0.110406,0.000614333,1.01054e-06,-7.6642e-09,0.111021,0.000616331,9.87543e-07,6.55579e-09,0.111639,0.000618326,1.00721e-06,-3.65791e-09,0.112258,0.000620329,9.96236e-07,6.25467e-10,0.112879,0.000622324,9.98113e-07,1.15593e-09,0.113503,0.000624323,1.00158e-06,2.20158e-09,0.114128,0.000626333,1.00819e-06,-2.51191e-09,0.114755,0.000628342,1.00065e-06,3.95517e-10,0.115385,0.000630345,1.00184e-06,9.29807e-10,0.116016,0.000632351,1.00463e-06,3.33599e-09,0.116649,0.00063437,1.01463e-06,-6.82329e-09,0.117285,0.000636379,9.94163e-07,9.05595e-09,0.117922,0.000638395,1.02133e-06,-7.04862e-09,0.118562,0.000640416,1.00019e-06,4.23737e-09,0.119203,0.000642429,1.0129e-06,-2.45033e-09,0.119847,0.000644448,1.00555e-06,5.56395e-09,0.120492,0.000646475,1.02224e-06,-4.9043e-09,0.121139,0.000648505,1.00753e-06,-8.47952e-10,0.121789,0.000650518,1.00498e-06,8.29622e-09,0.122441,0.000652553,1.02987e-06,-9.98538e-09,0.123094,0.000654582,9.99914e-07,9.2936e-09,0.12375,0.00065661,1.02779e-06,-4.83707e-09,0.124407,0.000658651,1.01328e-06,2.60411e-09,0.125067,0.000660685,1.0211e-06,-5.57945e-09,0.125729,0.000662711,1.00436e-06,1.22631e-08,0.126392,0.000664756,1.04115e-06,-1.36704e-08,0.127058,0.000666798,1.00014e-06,1.26161e-08,0.127726,0.000668836,1.03798e-06,-6.99155e-09,0.128396,0.000670891,1.01701e-06,4.48836e-10,0.129068,0.000672926,1.01836e-06,5.19606e-09,0.129742,0.000674978,1.03394e-06,-6.3319e-09,0.130418,0.000677027,1.01495e-06,5.2305e-09,0.131096,0.000679073,1.03064e-06,3.11123e-10,0.131776,0.000681135,1.03157e-06,-6.47511e-09,0.132458,0.000683179,1.01215e-06,1.06882e-08,0.133142,0.000685235,1.04421e-06,-6.47519e-09,0.133829,0.000687304,1.02479e-06,3.11237e-10,0.134517,0.000689355,1.02572e-06,5.23035e-09,0.135207,0.000691422,1.04141e-06,-6.3316e-09,0.1359,0.000693486,1.02242e-06,5.19484e-09,0.136594,0.000695546,1.038e-06,4.53497e-10,0.137291,0.000697623,1.03936e-06,-7.00891e-09,0.137989,0.000699681,1.01834e-06,1.2681e-08,0.13869,0.000701756,1.05638e-06,-1.39128e-08,0.139393,0.000703827,1.01464e-06,1.31679e-08,0.140098,0.000705896,1.05414e-06,-8.95659e-09,0.140805,0.000707977,1.02727e-06,7.75742e-09,0.141514,0.000710055,1.05055e-06,-7.17182e-09,0.142225,0.000712135,1.02903e-06,6.02862e-09,0.142938,0.000714211,1.04712e-06,-2.04163e-09,0.143653,0.000716299,1.04099e-06,2.13792e-09,0.144371,0.000718387,1.04741e-06,-6.51009e-09,0.14509,0.000720462,1.02787e-06,9.00123e-09,0.145812,0.000722545,1.05488e-06,3.07523e-10,0.146535,0.000724656,1.0558e-06,-1.02312e-08,0.147261,0.000726737,1.02511e-06,1.0815e-08,0.147989,0.000728819,1.05755e-06,-3.22681e-09,0.148719,0.000730925,1.04787e-06,2.09244e-09,0.14945,0.000733027,1.05415e-06,-5.143e-09,0.150185,0.00073512,1.03872e-06,3.57844e-09,0.150921,0.000737208,1.04946e-06,5.73027e-09,0.151659,0.000739324,1.06665e-06,-1.15983e-08,0.152399,0.000741423,1.03185e-06,1.08605e-08,0.153142,0.000743519,1.06443e-06,-2.04106e-09,0.153886,0.000745642,1.05831e-06,-2.69642e-09,0.154633,0.00074775,1.05022e-06,-2.07425e-09,0.155382,0.000749844,1.044e-06,1.09934e-08,0.156133,0.000751965,1.07698e-06,-1.20972e-08,0.156886,0.000754083,1.04069e-06,7.59288e-09,0.157641,0.000756187,1.06347e-06,-3.37305e-09,0.158398,0.000758304,1.05335e-06,5.89921e-09,0.159158,0.000760428,1.07104e-06,-5.32248e-09,0.159919,0.000762554,1.05508e-06,4.8927e-10,0.160683,0.000764666,1.05654e-06,3.36547e-09,0.161448,0.000766789,1.06664e-06,9.50081e-10,0.162216,0.000768925,1.06949e-06,-7.16568e-09,0.162986,0.000771043,1.04799e-06,1.28114e-08,0.163758,0.000773177,1.08643e-06,-1.42774e-08,0.164533,0.000775307,1.0436e-06,1.44956e-08,0.165309,0.000777438,1.08708e-06,-1.39025e-08,0.166087,0.00077957,1.04538e-06,1.13118e-08,0.166868,0.000781695,1.07931e-06,-1.54224e-09,0.167651,0.000783849,1.07468e-06,-5.14312e-09,0.168436,0.000785983,1.05925e-06,7.21381e-09,0.169223,0.000788123,1.0809e-06,-8.81096e-09,0.170012,0.000790259,1.05446e-06,1.31289e-08,0.170803,0.000792407,1.09385e-06,-1.39022e-08,0.171597,0.000794553,1.05214e-06,1.26775e-08,0.172392,0.000796695,1.09018e-06,-7.00557e-09,0.17319,0.000798855,1.06916e-06,4.43796e-10,0.17399,0.000800994,1.07049e-06,5.23031e-09,0.174792,0.000803151,1.08618e-06,-6.46397e-09,0.175596,0.000805304,1.06679e-06,5.72444e-09,0.176403,0.000807455,1.08396e-06,-1.53254e-09,0.177211,0.000809618,1.07937e-06,4.05673e-10,0.178022,0.000811778,1.08058e-06,-9.01916e-11,0.178835,0.000813939,1.08031e-06,-4.49821e-11,0.17965,0.000816099,1.08018e-06,2.70234e-10,0.180467,0.00081826,1.08099e-06,-1.03603e-09,0.181286,0.000820419,1.07788e-06,3.87392e-09,0.182108,0.000822587,1.0895e-06,4.41522e-10,0.182932,0.000824767,1.09083e-06,-5.63997e-09,0.183758,0.000826932,1.07391e-06,7.21707e-09,0.184586,0.000829101,1.09556e-06,-8.32718e-09,0.185416,0.000831267,1.07058e-06,1.11907e-08,0.186248,0.000833442,1.10415e-06,-6.63336e-09,0.187083,0.00083563,1.08425e-06,4.41484e-10,0.187919,0.0008378,1.08557e-06,4.86754e-09,0.188758,0.000839986,1.10017e-06,-5.01041e-09,0.189599,0.000842171,1.08514e-06,2.72811e-10,0.190443,0.000844342,1.08596e-06,3.91916e-09,0.191288,0.000846526,1.09772e-06,-1.04819e-09,0.192136,0.000848718,1.09457e-06,2.73531e-10,0.192985,0.000850908,1.0954e-06,-4.58916e-11,0.193837,0.000853099,1.09526e-06,-9.01158e-11,0.194692,0.000855289,1.09499e-06,4.06506e-10,0.195548,0.00085748,1.09621e-06,-1.53595e-09,0.196407,0.000859668,1.0916e-06,5.73717e-09,0.197267,0.000861869,1.10881e-06,-6.51164e-09,0.19813,0.000864067,1.08928e-06,5.40831e-09,0.198995,0.000866261,1.1055e-06,-2.20401e-10,0.199863,0.000868472,1.10484e-06,-4.52652e-09,0.200732,0.000870668,1.09126e-06,3.42508e-09,0.201604,0.000872861,1.10153e-06,5.72762e-09,0.202478,0.000875081,1.11872e-06,-1.14344e-08,0.203354,0.000877284,1.08441e-06,1.02076e-08,0.204233,0.000879484,1.11504e-06,4.06355e-10,0.205113,0.000881715,1.11626e-06,-1.18329e-08,0.205996,0.000883912,1.08076e-06,1.71227e-08,0.206881,0.000886125,1.13213e-06,-1.19546e-08,0.207768,0.000888353,1.09626e-06,8.93465e-10,0.208658,0.000890548,1.09894e-06,8.38062e-09,0.209549,0.000892771,1.12408e-06,-4.61353e-09,0.210443,0.000895006,1.11024e-06,-4.82756e-09,0.211339,0.000897212,1.09576e-06,9.02245e-09,0.212238,0.00089943,1.12283e-06,-1.45997e-09,0.213138,0.000901672,1.11845e-06,-3.18255e-09,0.214041,0.000903899,1.1089e-06,-7.11073e-10,0.214946,0.000906115,1.10677e-06,6.02692e-09,0.215853,0.000908346,1.12485e-06,-8.49548e-09,0.216763,0.00091057,1.09936e-06,1.30537e-08,0.217675,0.000912808,1.13852e-06,-1.3917e-08,0.218588,0.000915044,1.09677e-06,1.28121e-08,0.219505,0.000917276,1.13521e-06,-7.5288e-09,0.220423,0.000919523,1.11262e-06,2.40205e-09,0.221344,0.000921756,1.11983e-06,-2.07941e-09,0.222267,0.000923989,1.11359e-06,5.91551e-09,0.223192,0.000926234,1.13134e-06,-6.68149e-09,0.224119,0.000928477,1.11129e-06,5.90929e-09,0.225049,0.000930717,1.12902e-06,-2.05436e-09,0.22598,0.000932969,1.12286e-06,2.30807e-09,0.226915,0.000935222,1.12978e-06,-7.17796e-09,0.227851,0.00093746,1.10825e-06,1.15028e-08,0.228789,0.000939711,1.14276e-06,-9.03083e-09,0.22973,0.000941969,1.11566e-06,9.71932e-09,0.230673,0.00094423,1.14482e-06,-1.49452e-08,0.231619,0.000946474,1.09998e-06,2.02591e-08,0.232566,0.000948735,1.16076e-06,-2.13879e-08,0.233516,0.000950993,1.0966e-06,2.05888e-08,0.234468,0.000953247,1.15837e-06,-1.62642e-08,0.235423,0.000955515,1.10957e-06,1.46658e-08,0.236379,0.000957779,1.15357e-06,-1.25966e-08,0.237338,0.000960048,1.11578e-06,5.91793e-09,0.238299,0.000962297,1.13353e-06,3.82602e-09,0.239263,0.000964576,1.14501e-06,-6.3208e-09,0.240229,0.000966847,1.12605e-06,6.55613e-09,0.241197,0.000969119,1.14572e-06,-5.00268e-09,0.242167,0.000971395,1.13071e-06,-1.44659e-09,0.243139,0.000973652,1.12637e-06,1.07891e-08,0.244114,0.000975937,1.15874e-06,-1.19073e-08,0.245091,0.000978219,1.12302e-06,7.03782e-09,0.246071,0.000980486,1.14413e-06,-1.34276e-09,0.247052,0.00098277,1.1401e-06,-1.66669e-09,0.248036,0.000985046,1.1351e-06,8.00935e-09,0.249022,0.00098734,1.15913e-06,-1.54694e-08,0.250011,0.000989612,1.11272e-06,2.4066e-08,0.251002,0.000991909,1.18492e-06,-2.11901e-08,0.251995,0.000994215,1.12135e-06,1.08973e-09,0.25299,0.000996461,1.12462e-06,1.68311e-08,0.253988,0.000998761,1.17511e-06,-8.8094e-09,0.254987,0.00100109,1.14868e-06,-1.13958e-08,0.25599,0.00100335,1.1145e-06,2.45902e-08,0.256994,0.00100565,1.18827e-06,-2.73603e-08,0.258001,0.00100795,1.10618e-06,2.52464e-08,0.25901,0.00101023,1.18192e-06,-1.40207e-08,0.260021,0.00101256,1.13986e-06,1.03387e-09,0.261035,0.00101484,1.14296e-06,9.8853e-09,0.262051,0.00101715,1.17262e-06,-1.07726e-08,0.263069,0.00101947,1.1403e-06,3.40272e-09,0.26409,0.00102176,1.15051e-06,-2.83827e-09,0.265113,0.00102405,1.142e-06,7.95039e-09,0.266138,0.00102636,1.16585e-06,8.39047e-10,0.267166,0.00102869,1.16836e-06,-1.13066e-08,0.268196,0.00103099,1.13444e-06,1.4585e-08,0.269228,0.00103331,1.1782e-06,-1.72314e-08,0.270262,0.00103561,1.1265e-06,2.45382e-08,0.271299,0.00103794,1.20012e-06,-2.13166e-08,0.272338,0.00104028,1.13617e-06,1.12364e-09,0.273379,0.00104255,1.13954e-06,1.68221e-08,0.274423,0.00104488,1.19001e-06,-8.80736e-09,0.275469,0.00104723,1.16358e-06,-1.13948e-08,0.276518,0.00104953,1.1294e-06,2.45839e-08,0.277568,0.00105186,1.20315e-06,-2.73361e-08,0.278621,0.00105418,1.12114e-06,2.51559e-08,0.279677,0.0010565,1.19661e-06,-1.36832e-08,0.280734,0.00105885,1.15556e-06,-2.25706e-10,0.281794,0.00106116,1.15488e-06,1.45862e-08,0.282857,0.00106352,1.19864e-06,-2.83167e-08,0.283921,0.00106583,1.11369e-06,3.90759e-08,0.284988,0.00106817,1.23092e-06,-3.85801e-08,0.286058,0.00107052,1.11518e-06,2.58375e-08,0.287129,0.00107283,1.19269e-06,-5.16498e-09,0.288203,0.0010752,1.1772e-06,-5.17768e-09,0.28928,0.00107754,1.16167e-06,-3.92671e-09,0.290358,0.00107985,1.14988e-06,2.08846e-08,0.29144,0.00108221,1.21254e-06,-2.00072e-08,0.292523,0.00108458,1.15252e-06,-4.60659e-10,0.293609,0.00108688,1.15114e-06,2.18499e-08,0.294697,0.00108925,1.21669e-06,-2.73343e-08,0.295787,0.0010916,1.13468e-06,2.78826e-08,0.29688,0.00109395,1.21833e-06,-2.45915e-08,0.297975,0.00109632,1.14456e-06,1.08787e-08,0.299073,0.00109864,1.17719e-06,1.08788e-08,0.300172,0.00110102,1.20983e-06,-2.45915e-08,0.301275,0.00110337,1.13605e-06,2.78828e-08,0.302379,0.00110573,1.2197e-06,-2.73348e-08,0.303486,0.00110808,1.1377e-06,2.18518e-08,0.304595,0.00111042,1.20325e-06,-4.67556e-10,0.305707,0.00111283,1.20185e-06,-1.99816e-08,0.306821,0.00111517,1.14191e-06,2.07891e-08,0.307937,0.00111752,1.20427e-06,-3.57026e-09,0.309056,0.00111992,1.19356e-06,-6.50797e-09,0.310177,0.00112228,1.17404e-06,-2.00165e-10,0.3113,0.00112463,1.17344e-06,7.30874e-09,0.312426,0.001127,1.19536e-06,7.67424e-10,0.313554,0.00112939,1.19767e-06,-1.03784e-08,0.314685,0.00113176,1.16653e-06,1.09437e-08,0.315818,0.00113412,1.19936e-06,-3.59406e-09,0.316953,0.00113651,1.18858e-06,3.43251e-09,0.318091,0.0011389,1.19888e-06,-1.0136e-08,0.319231,0.00114127,1.16847e-06,7.30915e-09,0.320374,0.00114363,1.1904e-06,1.07018e-08,0.321518,0.00114604,1.2225e-06,-2.03137e-08,0.322666,0.00114842,1.16156e-06,1.09484e-08,0.323815,0.00115078,1.19441e-06,6.32224e-09,0.324967,0.00115319,1.21337e-06,-6.43509e-09,0.326122,0.00115559,1.19407e-06,-1.03842e-08,0.327278,0.00115795,1.16291e-06,1.81697e-08,0.328438,0.00116033,1.21742e-06,-2.6901e-09,0.329599,0.00116276,1.20935e-06,-7.40939e-09,0.330763,0.00116515,1.18713e-06,2.52533e-09,0.331929,0.00116754,1.1947e-06,-2.69191e-09,0.333098,0.00116992,1.18663e-06,8.24218e-09,0.334269,0.00117232,1.21135e-06,-4.74377e-10,0.335443,0.00117474,1.20993e-06,-6.34471e-09,0.336619,0.00117714,1.1909e-06,-3.94922e-09,0.337797,0.00117951,1.17905e-06,2.21417e-08,0.338978,0.00118193,1.24547e-06,-2.50128e-08,0.340161,0.00118435,1.17043e-06,1.8305e-08,0.341346,0.00118674,1.22535e-06,-1.84048e-08,0.342534,0.00118914,1.17013e-06,2.55121e-08,0.343725,0.00119156,1.24667e-06,-2.40389e-08,0.344917,0.00119398,1.17455e-06,1.10389e-08,0.346113,0.00119636,1.20767e-06,9.68574e-09,0.34731,0.0011988,1.23673e-06,-1.99797e-08,0.34851,0.00120122,1.17679e-06,1.06284e-08,0.349713,0.0012036,1.20867e-06,7.26868e-09,0.350917,0.00120604,1.23048e-06,-9.90072e-09,0.352125,0.00120847,1.20078e-06,2.53177e-09,0.353334,0.00121088,1.20837e-06,-2.26199e-10,0.354546,0.0012133,1.20769e-06,-1.62705e-09,0.355761,0.00121571,1.20281e-06,6.73435e-09,0.356978,0.00121813,1.22302e-06,4.49207e-09,0.358197,0.00122059,1.23649e-06,-2.47027e-08,0.359419,0.00122299,1.16238e-06,3.47142e-08,0.360643,0.00122542,1.26653e-06,-2.47472e-08,0.36187,0.00122788,1.19229e-06,4.66965e-09,0.363099,0.00123028,1.20629e-06,6.06872e-09,0.36433,0.00123271,1.2245e-06,8.57729e-10,0.365564,0.00123516,1.22707e-06,-9.49952e-09,0.366801,0.00123759,1.19858e-06,7.33792e-09,0.36804,0.00124001,1.22059e-06,9.95025e-09,0.369281,0.00124248,1.25044e-06,-1.73366e-08,0.370525,0.00124493,1.19843e-06,-2.08464e-10,0.371771,0.00124732,1.1978e-06,1.81704e-08,0.373019,0.00124977,1.25232e-06,-1.28683e-08,0.37427,0.00125224,1.21371e-06,3.50042e-09,0.375524,0.00125468,1.22421e-06,-1.1335e-09,0.37678,0.00125712,1.22081e-06,1.03345e-09,0.378038,0.00125957,1.22391e-06,-3.00023e-09,0.379299,0.00126201,1.21491e-06,1.09676e-08,0.380562,0.00126447,1.24781e-06,-1.10676e-08,0.381828,0.00126693,1.21461e-06,3.50042e-09,0.383096,0.00126937,1.22511e-06,-2.93403e-09,0.384366,0.00127181,1.21631e-06,8.23574e-09,0.385639,0.00127427,1.24102e-06,-2.06607e-10,0.386915,0.00127675,1.2404e-06,-7.40935e-09,0.388193,0.00127921,1.21817e-06,4.1761e-11,0.389473,0.00128165,1.21829e-06,7.24223e-09,0.390756,0.0012841,1.24002e-06,7.91564e-10,0.392042,0.00128659,1.2424e-06,-1.04086e-08,0.393329,0.00128904,1.21117e-06,1.10405e-08,0.39462,0.0012915,1.24429e-06,-3.951e-09,0.395912,0.00129397,1.23244e-06,4.7634e-09,0.397208,0.00129645,1.24673e-06,-1.51025e-08,0.398505,0.0012989,1.20142e-06,2.58443e-08,0.399805,0.00130138,1.27895e-06,-2.86702e-08,0.401108,0.00130385,1.19294e-06,2.92318e-08,0.402413,0.00130632,1.28064e-06,-2.86524e-08,0.403721,0.0013088,1.19468e-06,2.57731e-08,0.405031,0.00131127,1.272e-06,-1.48355e-08,0.406343,0.00131377,1.2275e-06,3.76652e-09,0.407658,0.00131623,1.23879e-06,-2.30784e-10,0.408976,0.00131871,1.2381e-06,-2.84331e-09,0.410296,0.00132118,1.22957e-06,1.16041e-08,0.411618,0.00132367,1.26438e-06,-1.37708e-08,0.412943,0.00132616,1.22307e-06,1.36768e-08,0.41427,0.00132865,1.2641e-06,-1.1134e-08,0.4156,0.00133114,1.2307e-06,1.05714e-09,0.416933,0.00133361,1.23387e-06,6.90538e-09,0.418267,0.00133609,1.25459e-06,1.12372e-09,0.419605,0.00133861,1.25796e-06,-1.14002e-08,0.420945,0.00134109,1.22376e-06,1.46747e-08,0.422287,0.00134358,1.26778e-06,-1.7496e-08,0.423632,0.00134606,1.21529e-06,2.5507e-08,0.424979,0.00134857,1.29182e-06,-2.49272e-08,0.426329,0.00135108,1.21703e-06,1.45972e-08,0.427681,0.00135356,1.26083e-06,-3.65935e-09,0.429036,0.00135607,1.24985e-06,4.00178e-11,0.430393,0.00135857,1.24997e-06,3.49917e-09,0.431753,0.00136108,1.26047e-06,-1.40366e-08,0.433116,0.00136356,1.21836e-06,2.28448e-08,0.43448,0.00136606,1.28689e-06,-1.77378e-08,0.435848,0.00136858,1.23368e-06,1.83043e-08,0.437218,0.0013711,1.28859e-06,-2.56769e-08,0.43859,0.0013736,1.21156e-06,2.47987e-08,0.439965,0.0013761,1.28595e-06,-1.39133e-08,0.441342,0.00137863,1.24421e-06,1.05202e-09,0.442722,0.00138112,1.24737e-06,9.70507e-09,0.444104,0.00138365,1.27649e-06,-1.00698e-08,0.445489,0.00138617,1.24628e-06,7.72123e-10,0.446877,0.00138867,1.24859e-06,6.98132e-09,0.448267,0.00139118,1.26954e-06,1.10477e-09,0.449659,0.00139373,1.27285e-06,-1.14003e-08,0.451054,0.00139624,1.23865e-06,1.4694e-08,0.452452,0.00139876,1.28273e-06,-1.75734e-08,0.453852,0.00140127,1.23001e-06,2.5797e-08,0.455254,0.00140381,1.3074e-06,-2.60097e-08,0.456659,0.00140635,1.22937e-06,1.86371e-08,0.458067,0.00140886,1.28529e-06,-1.8736e-08,0.459477,0.00141137,1.22908e-06,2.65048e-08,0.46089,0.00141391,1.30859e-06,-2.76784e-08,0.462305,0.00141645,1.22556e-06,2.46043e-08,0.463722,0.00141897,1.29937e-06,-1.11341e-08,0.465143,0.00142154,1.26597e-06,-9.87033e-09,0.466565,0.00142404,1.23636e-06,2.08131e-08,0.467991,0.00142657,1.2988e-06,-1.37773e-08,0.469419,0.00142913,1.25746e-06,4.49378e-09,0.470849,0.00143166,1.27094e-06,-4.19781e-09,0.472282,0.00143419,1.25835e-06,1.22975e-08,0.473717,0.00143674,1.29524e-06,-1.51902e-08,0.475155,0.00143929,1.24967e-06,1.86608e-08,0.476596,0.00144184,1.30566e-06,-2.96506e-08,0.478039,0.00144436,1.2167e-06,4.03368e-08,0.479485,0.00144692,1.33771e-06,-4.22896e-08,0.480933,0.00144947,1.21085e-06,3.94148e-08,0.482384,0.00145201,1.32909e-06,-2.59626e-08,0.483837,0.00145459,1.2512e-06,4.83124e-09,0.485293,0.0014571,1.2657e-06,6.63757e-09,0.486751,0.00145966,1.28561e-06,-1.57911e-09,0.488212,0.00146222,1.28087e-06,-3.21468e-10,0.489676,0.00146478,1.27991e-06,2.86517e-09,0.491142,0.00146735,1.2885e-06,-1.11392e-08,0.49261,0.00146989,1.25508e-06,1.18893e-08,0.494081,0.00147244,1.29075e-06,-6.61574e-09,0.495555,0.001475,1.27091e-06,1.45736e-08,0.497031,0.00147759,1.31463e-06,-2.18759e-08,0.49851,0.00148015,1.249e-06,1.33252e-08,0.499992,0.00148269,1.28897e-06,-1.62277e-09,0.501476,0.00148526,1.28411e-06,-6.83421e-09,0.502962,0.00148781,1.2636e-06,2.89596e-08,0.504451,0.00149042,1.35048e-06,-4.93997e-08,0.505943,0.00149298,1.20228e-06,4.94299e-08,0.507437,0.00149553,1.35057e-06,-2.91107e-08,0.508934,0.00149814,1.26324e-06,7.40848e-09,0.510434,0.00150069,1.28547e-06,-5.23187e-10,0.511936,0.00150326,1.2839e-06,-5.31585e-09,0.51344,0.00150581,1.26795e-06,2.17866e-08,0.514947,0.00150841,1.33331e-06,-2.22257e-08,0.516457,0.00151101,1.26663e-06,7.51178e-09,0.517969,0.00151357,1.28917e-06,-7.82128e-09,0.519484,0.00151613,1.2657e-06,2.37733e-08,0.521002,0.00151873,1.33702e-06,-2.76674e-08,0.522522,0.00152132,1.25402e-06,2.72917e-08,0.524044,0.00152391,1.3359e-06,-2.18949e-08,0.525569,0.00152652,1.27021e-06,6.83372e-10,0.527097,0.00152906,1.27226e-06,1.91613e-08,0.528628,0.00153166,1.32974e-06,-1.77241e-08,0.53016,0.00153427,1.27657e-06,-7.86963e-09,0.531696,0.0015368,1.25296e-06,4.92027e-08,0.533234,0.00153945,1.40057e-06,-6.9732e-08,0.534775,0.00154204,1.19138e-06,5.09114e-08,0.536318,0.00154458,1.34411e-06,-1.4704e-08,0.537864,0.00154722,1.3e-06,7.9048e-09,0.539413,0.00154984,1.32371e-06,-1.69152e-08,0.540964,0.00155244,1.27297e-06,1.51355e-10,0.542517,0.00155499,1.27342e-06,1.63099e-08,0.544074,0.00155758,1.32235e-06,-5.78647e-09,0.545633,0.00156021,1.30499e-06,6.83599e-09,0.547194,0.00156284,1.3255e-06,-2.15575e-08,0.548758,0.00156543,1.26083e-06,1.97892e-08,0.550325,0.00156801,1.32019e-06,2.00525e-09,0.551894,0.00157065,1.32621e-06,-2.78103e-08,0.553466,0.00157322,1.24278e-06,4.96314e-08,0.555041,0.00157586,1.39167e-06,-5.1506e-08,0.556618,0.00157849,1.23716e-06,3.71835e-08,0.558198,0.00158107,1.34871e-06,-3.76233e-08,0.55978,0.00158366,1.23584e-06,5.37052e-08,0.561365,0.00158629,1.39695e-06,-5.79884e-08,0.562953,0.00158891,1.22299e-06,5.90392e-08,0.564543,0.00159153,1.4001e-06,-5.89592e-08,0.566136,0.00159416,1.22323e-06,5.7588e-08,0.567731,0.00159678,1.39599e-06,-5.21835e-08,0.569329,0.00159941,1.23944e-06,3.19369e-08,0.57093,0.00160199,1.33525e-06,-1.59594e-08,0.572533,0.00160461,1.28737e-06,3.19006e-08,0.574139,0.00160728,1.38307e-06,-5.20383e-08,0.575748,0.00160989,1.22696e-06,5.70431e-08,0.577359,0.00161251,1.39809e-06,-5.69247e-08,0.578973,0.00161514,1.22731e-06,5.14463e-08,0.580589,0.00161775,1.38165e-06,-2.9651e-08,0.582208,0.00162042,1.2927e-06,7.55339e-09,0.58383,0.00162303,1.31536e-06,-5.62636e-10,0.585455,0.00162566,1.31367e-06,-5.30281e-09,0.587081,0.00162827,1.29776e-06,2.17738e-08,0.588711,0.00163093,1.36309e-06,-2.21875e-08,0.590343,0.00163359,1.29652e-06,7.37164e-09,0.591978,0.00163621,1.31864e-06,-7.29907e-09,0.593616,0.00163882,1.29674e-06,2.18247e-08,0.595256,0.00164148,1.36221e-06,-2.03952e-08,0.596899,0.00164414,1.30103e-06,1.51241e-10,0.598544,0.00164675,1.30148e-06,1.97902e-08,0.600192,0.00164941,1.36085e-06,-1.97074e-08,0.601843,0.00165207,1.30173e-06,-5.65175e-10,0.603496,0.00165467,1.30004e-06,2.1968e-08,0.605152,0.00165734,1.36594e-06,-2.77024e-08,0.606811,0.00165999,1.28283e-06,2.92369e-08,0.608472,0.00166264,1.37054e-06,-2.96407e-08,0.610136,0.00166529,1.28162e-06,2.97215e-08,0.611803,0.00166795,1.37079e-06,-2.96408e-08,0.613472,0.0016706,1.28186e-06,2.92371e-08,0.615144,0.00167325,1.36957e-06,-2.77031e-08,0.616819,0.00167591,1.28647e-06,2.19708e-08,0.618496,0.00167855,1.35238e-06,-5.75407e-10,0.620176,0.00168125,1.35065e-06,-1.9669e-08,0.621858,0.00168389,1.29164e-06,1.96468e-08,0.623544,0.00168653,1.35058e-06,6.86403e-10,0.625232,0.00168924,1.35264e-06,-2.23924e-08,0.626922,0.00169187,1.28547e-06,2.92788e-08,0.628615,0.00169453,1.3733e-06,-3.51181e-08,0.630311,0.00169717,1.26795e-06,5.15889e-08,0.63201,0.00169987,1.42272e-06,-5.2028e-08,0.633711,0.00170255,1.26663e-06,3.73139e-08,0.635415,0.0017052,1.37857e-06,-3.76227e-08,0.637121,0.00170784,1.2657e-06,5.35722e-08,0.63883,0.00171054,1.42642e-06,-5.74567e-08,0.640542,0.00171322,1.25405e-06,5.70456e-08,0.642257,0.0017159,1.42519e-06,-5.15163e-08,0.643974,0.00171859,1.27064e-06,2.98103e-08,0.645694,0.00172122,1.36007e-06,-8.12016e-09,0.647417,0.00172392,1.33571e-06,2.67039e-09,0.649142,0.0017266,1.34372e-06,-2.56152e-09,0.65087,0.00172928,1.33604e-06,7.57571e-09,0.6526,0.00173197,1.35876e-06,-2.77413e-08,0.654334,0.00173461,1.27554e-06,4.3785e-08,0.65607,0.00173729,1.40689e-06,-2.81896e-08,0.657808,0.00174002,1.32233e-06,9.36893e-09,0.65955,0.00174269,1.35043e-06,-9.28617e-09,0.661294,0.00174536,1.32257e-06,2.77757e-08,0.66304,0.00174809,1.4059e-06,-4.2212e-08,0.66479,0.00175078,1.27926e-06,2.1863e-08,0.666542,0.0017534,1.34485e-06,1.43648e-08,0.668297,0.00175613,1.38795e-06,-1.97177e-08,0.670054,0.00175885,1.3288e-06,4.90115e-09,0.671814,0.00176152,1.3435e-06,1.13232e-10,0.673577,0.00176421,1.34384e-06,-5.3542e-09,0.675343,0.00176688,1.32778e-06,2.13035e-08,0.677111,0.0017696,1.39169e-06,-2.02553e-08,0.678882,0.00177232,1.33092e-06,1.13005e-10,0.680656,0.00177499,1.33126e-06,1.98031e-08,0.682432,0.00177771,1.39067e-06,-1.97211e-08,0.684211,0.00178043,1.33151e-06,-5.2349e-10,0.685993,0.00178309,1.32994e-06,2.18151e-08,0.687777,0.00178582,1.39538e-06,-2.71325e-08,0.689564,0.00178853,1.31398e-06,2.71101e-08,0.691354,0.00179124,1.39531e-06,-2.17035e-08,0.693147,0.00179396,1.3302e-06,9.92865e-11,0.694942,0.00179662,1.3305e-06,2.13063e-08,0.69674,0.00179935,1.39442e-06,-2.57198e-08,0.698541,0.00180206,1.31726e-06,2.19682e-08,0.700344,0.00180476,1.38317e-06,-2.54852e-09,0.70215,0.00180752,1.37552e-06,-1.17741e-08,0.703959,0.00181023,1.3402e-06,-9.95999e-09,0.705771,0.00181288,1.31032e-06,5.16141e-08,0.707585,0.00181566,1.46516e-06,-7.72869e-08,0.709402,0.00181836,1.2333e-06,7.87197e-08,0.711222,0.00182106,1.46946e-06,-5.87781e-08,0.713044,0.00182382,1.29312e-06,3.71834e-08,0.714869,0.00182652,1.40467e-06,-3.03511e-08,0.716697,0.00182924,1.31362e-06,2.46161e-08,0.718528,0.00183194,1.38747e-06,-8.5087e-09,0.720361,0.00183469,1.36194e-06,9.41892e-09,0.722197,0.00183744,1.3902e-06,-2.91671e-08,0.724036,0.00184014,1.3027e-06,4.76448e-08,0.725878,0.00184288,1.44563e-06,-4.22028e-08,0.727722,0.00184565,1.31902e-06,1.95682e-09,0.729569,0.00184829,1.3249e-06,3.43754e-08,0.731419,0.00185104,1.42802e-06,-2.0249e-08,0.733271,0.00185384,1.36727e-06,-1.29838e-08,0.735126,0.00185654,1.32832e-06,1.25794e-08,0.736984,0.00185923,1.36606e-06,2.22711e-08,0.738845,0.00186203,1.43287e-06,-4.20594e-08,0.740708,0.00186477,1.3067e-06,2.67571e-08,0.742574,0.00186746,1.38697e-06,-5.36424e-09,0.744443,0.00187022,1.37087e-06,-5.30023e-09,0.746315,0.00187295,1.35497e-06,2.65653e-08,0.748189,0.00187574,1.43467e-06,-4.13564e-08,0.750066,0.00187848,1.3106e-06,1.9651e-08,0.751946,0.00188116,1.36955e-06,2.23572e-08,0.753828,0.00188397,1.43663e-06,-4.9475e-08,0.755714,0.00188669,1.2882e-06,5.63335e-08,0.757602,0.00188944,1.4572e-06,-5.66499e-08,0.759493,0.00189218,1.28725e-06,5.10567e-08,0.761386,0.00189491,1.44042e-06,-2.83677e-08,0.763283,0.00189771,1.35532e-06,2.80962e-09,0.765182,0.00190042,1.36375e-06,1.71293e-08,0.767083,0.0019032,1.41513e-06,-1.17221e-08,0.768988,0.001906,1.37997e-06,-2.98453e-08,0.770895,0.00190867,1.29043e-06,7.14987e-08,0.772805,0.00191146,1.50493e-06,-7.73354e-08,0.774718,0.00191424,1.27292e-06,5.90292e-08,0.776634,0.00191697,1.45001e-06,-3.9572e-08,0.778552,0.00191975,1.33129e-06,3.9654e-08,0.780473,0.00192253,1.45026e-06,-5.94395e-08,0.782397,0.00192525,1.27194e-06,7.88945e-08,0.784324,0.00192803,1.50862e-06,-7.73249e-08,0.786253,0.00193082,1.27665e-06,5.15913e-08,0.788185,0.00193352,1.43142e-06,-9.83099e-09,0.79012,0.00193636,1.40193e-06,-1.22672e-08,0.792058,0.00193912,1.36513e-06,-7.05275e-10,0.793999,0.00194185,1.36301e-06,1.50883e-08,0.795942,0.00194462,1.40828e-06,-4.33147e-11,0.797888,0.00194744,1.40815e-06,-1.49151e-08,0.799837,0.00195021,1.3634e-06,9.93244e-11,0.801788,0.00195294,1.3637e-06,1.45179e-08,0.803743,0.00195571,1.40725e-06,1.43363e-09,0.8057,0.00195853,1.41155e-06,-2.02525e-08,0.80766,0.00196129,1.35079e-06,1.99718e-08,0.809622,0.00196405,1.41071e-06,-3.01649e-11,0.811588,0.00196687,1.41062e-06,-1.9851e-08,0.813556,0.00196964,1.35107e-06,1.98296e-08,0.815527,0.0019724,1.41056e-06,1.37485e-10,0.817501,0.00197522,1.41097e-06,-2.03796e-08,0.819477,0.00197798,1.34983e-06,2.17763e-08,0.821457,0.00198074,1.41516e-06,-7.12085e-09,0.823439,0.00198355,1.3938e-06,6.70707e-09,0.825424,0.00198636,1.41392e-06,-1.97074e-08,0.827412,0.00198913,1.35479e-06,1.25179e-08,0.829402,0.00199188,1.39235e-06,2.92405e-08,0.831396,0.00199475,1.48007e-06,-6.98755e-08,0.833392,0.0019975,1.27044e-06,7.14477e-08,0.835391,0.00200026,1.48479e-06,-3.71014e-08,0.837392,0.00200311,1.37348e-06,1.73533e-08,0.839397,0.00200591,1.42554e-06,-3.23118e-08,0.841404,0.00200867,1.32861e-06,5.2289e-08,0.843414,0.00201148,1.48547e-06,-5.76348e-08,0.845427,0.00201428,1.31257e-06,5.9041e-08,0.847443,0.00201708,1.48969e-06,-5.93197e-08,0.849461,0.00201988,1.31173e-06,5.90289e-08,0.851482,0.00202268,1.48882e-06,-5.75864e-08,0.853507,0.00202549,1.31606e-06,5.21075e-08,0.855533,0.00202828,1.47238e-06,-3.16344e-08,0.857563,0.00203113,1.37748e-06,1.48257e-08,0.859596,0.00203393,1.42196e-06,-2.76684e-08,0.861631,0.00203669,1.33895e-06,3.62433e-08,0.863669,0.00203947,1.44768e-06,1.90463e-09,0.86571,0.00204237,1.45339e-06,-4.38617e-08,0.867754,0.00204515,1.32181e-06,5.43328e-08,0.8698,0.00204796,1.48481e-06,-5.42603e-08,0.87185,0.00205076,1.32203e-06,4.34989e-08,0.873902,0.00205354,1.45252e-06,-5.26029e-10,0.875957,0.00205644,1.45095e-06,-4.13949e-08,0.878015,0.00205922,1.32676e-06,4.68962e-08,0.880075,0.00206201,1.46745e-06,-2.69807e-08,0.882139,0.00206487,1.38651e-06,1.42181e-09,0.884205,0.00206764,1.39077e-06,2.12935e-08,0.886274,0.00207049,1.45465e-06,-2.69912e-08,0.888346,0.00207332,1.37368e-06,2.70664e-08,0.890421,0.00207615,1.45488e-06,-2.16698e-08,0.892498,0.00207899,1.38987e-06,8.14756e-12,0.894579,0.00208177,1.38989e-06,2.16371e-08,0.896662,0.00208462,1.45481e-06,-2.6952e-08,0.898748,0.00208744,1.37395e-06,2.65663e-08,0.900837,0.00209027,1.45365e-06,-1.97084e-08,0.902928,0.00209312,1.39452e-06,-7.33731e-09,0.905023,0.00209589,1.37251e-06,4.90578e-08,0.90712,0.00209878,1.51968e-06,-6.96845e-08,0.90922,0.00210161,1.31063e-06,5.08664e-08,0.911323,0.00210438,1.46323e-06,-1.45717e-08,0.913429,0.00210727,1.41952e-06,7.42038e-09,0.915538,0.00211013,1.44178e-06,-1.51097e-08,0.917649,0.00211297,1.39645e-06,-6.58618e-09,0.919764,0.00211574,1.37669e-06,4.14545e-08,0.921881,0.00211862,1.50105e-06,-4.00222e-08,0.924001,0.0021215,1.38099e-06,-5.7518e-10,0.926124,0.00212426,1.37926e-06,4.23229e-08,0.92825,0.00212714,1.50623e-06,-4.9507e-08,0.930378,0.00213001,1.35771e-06,3.64958e-08,0.93251,0.00213283,1.4672e-06,-3.68713e-08,0.934644,0.00213566,1.35658e-06,5.13848e-08,0.936781,0.00213852,1.51074e-06,-4.94585e-08,0.938921,0.0021414,1.36236e-06,2.72399e-08,0.941064,0.0021442,1.44408e-06,1.0372e-10,0.943209,0.00214709,1.44439e-06,-2.76547e-08,0.945358,0.0021499,1.36143e-06,5.09106e-08,0.947509,0.00215277,1.51416e-06,-5.67784e-08,0.949663,0.00215563,1.34382e-06,5.69935e-08,0.95182,0.00215849,1.5148e-06,-5.19861e-08,0.95398,0.00216136,1.35885e-06,3.17417e-08,0.956143,0.00216418,1.45407e-06,-1.53758e-08,0.958309,0.00216704,1.40794e-06,2.97615e-08,0.960477,0.00216994,1.49723e-06,-4.40657e-08,0.962649,0.00217281,1.36503e-06,2.72919e-08,0.964823,0.00217562,1.44691e-06,-5.49729e-09,0.967,0.0021785,1.43041e-06,-5.30273e-09,0.96918,0.00218134,1.41451e-06,2.67084e-08,0.971363,0.00218425,1.49463e-06,-4.19265e-08,0.973548,0.00218711,1.36885e-06,2.17881e-08,0.975737,0.00218992,1.43422e-06,1.43789e-08,0.977928,0.00219283,1.47735e-06,-1.96989e-08,0.980122,0.00219572,1.41826e-06,4.81221e-09,0.98232,0.00219857,1.43269e-06,4.50048e-10,0.98452,0.00220144,1.43404e-06,-6.61237e-09,0.986722,0.00220429,1.41421e-06,2.59993e-08,0.988928,0.0022072,1.4922e-06,-3.77803e-08,0.991137,0.00221007,1.37886e-06,5.9127e-09,0.993348,0.00221284,1.3966e-06,1.33339e-07,0.995563,0.00221604,1.79662e-06,-5.98872e-07,0.99778,0.00222015,0.,0.}; + + template + __device__ __forceinline__ void RGB2LabConvert_f(const T& src, D& dst) + { + const float _1_3 = 1.0f / 3.0f; + const float _a = 16.0f / 116.0f; + + float B = blueIdx == 0 ? src.x : src.z; + float G = src.y; + float R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + } + + float X = B * 0.189828f + G * 0.376219f + R * 0.433953f; + float Y = B * 0.072169f + G * 0.715160f + R * 0.212671f; + float Z = B * 0.872766f + G * 0.109477f + R * 0.017758f; + + float FX = X > 0.008856f ? ::powf(X, _1_3) : (7.787f * X + _a); + float FY = Y > 0.008856f ? ::powf(Y, _1_3) : (7.787f * Y + _a); + float FZ = Z > 0.008856f ? ::powf(Z, _1_3) : (7.787f * Z + _a); + + float L = Y > 0.008856f ? (116.f * FY - 16.f) : (903.3f * Y); + float a = 500.f * (FX - FY); + float b = 200.f * (FY - FZ); + + dst.x = L; + dst.y = a; + dst.z = b; + } + + template struct RGB2Lab; + template + struct RGB2Lab + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LabConvert_b(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2Lab() {} + __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {} + }; + template + struct RGB2Lab + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LabConvert_f(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2Lab() {} + __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2Lab functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + __constant__ float c_sRGBInvGammaTab[] = {0,0.0126255,0.,-8.33961e-06,0.0126172,0.0126005,-2.50188e-05,4.1698e-05,0.0252344,0.0126756,0.000100075,-0.000158451,0.0378516,0.0124004,-0.000375277,-0.000207393,0.0496693,0.0110276,-0.000997456,0.00016837,0.0598678,0.00953783,-0.000492346,2.07235e-05,0.068934,0.00861531,-0.000430176,3.62876e-05,0.0771554,0.00786382,-0.000321313,1.87625e-05,0.0847167,0.00727748,-0.000265025,1.53594e-05,0.0917445,0.00679351,-0.000218947,1.10545e-05,0.0983301,0.00638877,-0.000185784,8.66984e-06,0.104542,0.00604322,-0.000159774,6.82996e-06,0.110432,0.00574416,-0.000139284,5.51008e-06,0.116042,0.00548212,-0.000122754,4.52322e-06,0.121406,0.00525018,-0.000109184,3.75557e-06,0.126551,0.00504308,-9.79177e-05,3.17134e-06,0.131499,0.00485676,-8.84037e-05,2.68469e-06,0.13627,0.004688,-8.03496e-05,2.31725e-06,0.14088,0.00453426,-7.33978e-05,2.00868e-06,0.145343,0.00439349,-6.73718e-05,1.74775e-06,0.149671,0.00426399,-6.21286e-05,1.53547e-06,0.153875,0.00414434,-5.75222e-05,1.364e-06,0.157963,0.00403338,-5.34301e-05,1.20416e-06,0.161944,0.00393014,-4.98177e-05,1.09114e-06,0.165825,0.00383377,-4.65443e-05,9.57987e-07,0.169613,0.00374356,-4.36703e-05,8.88359e-07,0.173314,0.00365888,-4.10052e-05,7.7849e-07,0.176933,0.00357921,-3.86697e-05,7.36254e-07,0.180474,0.00350408,-3.6461e-05,6.42534e-07,0.183942,0.00343308,-3.45334e-05,6.12614e-07,0.187342,0.00336586,-3.26955e-05,5.42894e-07,0.190675,0.00330209,-3.10669e-05,5.08967e-07,0.193947,0.00324149,-2.954e-05,4.75977e-07,0.197159,0.00318383,-2.8112e-05,4.18343e-07,0.200315,0.00312887,-2.6857e-05,4.13651e-07,0.203418,0.00307639,-2.5616e-05,3.70847e-07,0.206469,0.00302627,-2.45035e-05,3.3813e-07,0.209471,0.00297828,-2.34891e-05,3.32999e-07,0.212426,0.0029323,-2.24901e-05,2.96826e-07,0.215336,0.00288821,-2.15996e-05,2.82736e-07,0.218203,0.00284586,-2.07514e-05,2.70961e-07,0.221029,0.00280517,-1.99385e-05,2.42744e-07,0.223814,0.00276602,-1.92103e-05,2.33277e-07,0.226561,0.0027283,-1.85105e-05,2.2486e-07,0.229271,0.00269195,-1.78359e-05,2.08383e-07,0.231945,0.00265691,-1.72108e-05,1.93305e-07,0.234585,0.00262307,-1.66308e-05,1.80687e-07,0.237192,0.00259035,-1.60888e-05,1.86632e-07,0.239766,0.00255873,-1.55289e-05,1.60569e-07,0.24231,0.00252815,-1.50472e-05,1.54566e-07,0.244823,0.00249852,-1.45835e-05,1.59939e-07,0.247307,0.00246983,-1.41037e-05,1.29549e-07,0.249763,0.00244202,-1.3715e-05,1.41429e-07,0.252191,0.00241501,-1.32907e-05,1.39198e-07,0.254593,0.00238885,-1.28731e-05,1.06444e-07,0.256969,0.00236342,-1.25538e-05,1.2048e-07,0.25932,0.00233867,-1.21924e-05,1.26892e-07,0.261647,0.00231467,-1.18117e-05,8.72084e-08,0.26395,0.00229131,-1.15501e-05,1.20323e-07,0.26623,0.00226857,-1.11891e-05,8.71514e-08,0.268487,0.00224645,-1.09276e-05,9.73165e-08,0.270723,0.00222489,-1.06357e-05,8.98259e-08,0.272937,0.00220389,-1.03662e-05,7.98218e-08,0.275131,0.00218339,-1.01267e-05,9.75254e-08,0.277304,0.00216343,-9.83416e-06,6.65195e-08,0.279458,0.00214396,-9.63461e-06,8.34313e-08,0.281592,0.00212494,-9.38431e-06,7.65919e-08,0.283708,0.00210641,-9.15454e-06,5.7236e-08,0.285805,0.00208827,-8.98283e-06,8.18939e-08,0.287885,0.00207055,-8.73715e-06,6.2224e-08,0.289946,0.00205326,-8.55047e-06,5.66388e-08,0.291991,0.00203633,-8.38056e-06,6.88491e-08,0.294019,0.00201978,-8.17401e-06,5.53955e-08,0.296031,0.00200359,-8.00782e-06,6.71971e-08,0.298027,0.00198778,-7.80623e-06,3.34439e-08,0.300007,0.00197227,-7.7059e-06,6.7248e-08,0.301971,0.00195706,-7.50416e-06,5.51915e-08,0.303921,0.00194221,-7.33858e-06,3.98124e-08,0.305856,0.00192766,-7.21915e-06,5.37795e-08,0.307776,0.00191338,-7.05781e-06,4.30919e-08,0.309683,0.00189939,-6.92853e-06,4.20744e-08,0.311575,0.00188566,-6.80231e-06,5.68321e-08,0.313454,0.00187223,-6.63181e-06,2.86195e-08,0.31532,0.00185905,-6.54595e-06,3.73075e-08,0.317172,0.00184607,-6.43403e-06,6.05684e-08,0.319012,0.00183338,-6.25233e-06,1.84426e-08,0.320839,0.00182094,-6.197e-06,4.44757e-08,0.322654,0.00180867,-6.06357e-06,4.20729e-08,0.324456,0.00179667,-5.93735e-06,2.56511e-08,0.326247,0.00178488,-5.8604e-06,3.41368e-08,0.328026,0.00177326,-5.75799e-06,4.64177e-08,0.329794,0.00176188,-5.61874e-06,1.86107e-08,0.33155,0.0017507,-5.5629e-06,2.81511e-08,0.333295,0.00173966,-5.47845e-06,4.75987e-08,0.335029,0.00172884,-5.33565e-06,1.98726e-08,0.336753,0.00171823,-5.27604e-06,2.19226e-08,0.338466,0.00170775,-5.21027e-06,4.14483e-08,0.340169,0.00169745,-5.08592e-06,2.09017e-08,0.341861,0.00168734,-5.02322e-06,2.39561e-08,0.343543,0.00167737,-4.95135e-06,3.22852e-08,0.345216,0.00166756,-4.85449e-06,2.57173e-08,0.346878,0.00165793,-4.77734e-06,1.38569e-08,0.348532,0.00164841,-4.73577e-06,3.80634e-08,0.350175,0.00163906,-4.62158e-06,1.27043e-08,0.35181,0.00162985,-4.58347e-06,3.03279e-08,0.353435,0.00162078,-4.49249e-06,1.49961e-08,0.355051,0.00161184,-4.4475e-06,2.88977e-08,0.356659,0.00160303,-4.3608e-06,1.84241e-08,0.358257,0.00159436,-4.30553e-06,1.6616e-08,0.359848,0.0015858,-4.25568e-06,3.43218e-08,0.361429,0.00157739,-4.15272e-06,-4.89172e-09,0.363002,0.00156907,-4.16739e-06,4.48498e-08,0.364567,0.00156087,-4.03284e-06,4.30676e-09,0.366124,0.00155282,-4.01992e-06,2.73303e-08,0.367673,0.00154486,-3.93793e-06,5.58036e-09,0.369214,0.001537,-3.92119e-06,3.97554e-08,0.370747,0.00152928,-3.80193e-06,-1.55904e-08,0.372272,0.00152163,-3.8487e-06,5.24081e-08,0.37379,0.00151409,-3.69147e-06,-1.52272e-08,0.375301,0.00150666,-3.73715e-06,3.83028e-08,0.376804,0.0014993,-3.62225e-06,1.10278e-08,0.378299,0.00149209,-3.58916e-06,6.99326e-09,0.379788,0.00148493,-3.56818e-06,2.06038e-08,0.381269,0.00147786,-3.50637e-06,2.98009e-08,0.382744,0.00147093,-3.41697e-06,-2.05978e-08,0.384211,0.00146404,-3.47876e-06,5.25899e-08,0.385672,0.00145724,-3.32099e-06,-1.09471e-08,0.387126,0.00145056,-3.35383e-06,2.10009e-08,0.388573,0.00144392,-3.29083e-06,1.63501e-08,0.390014,0.00143739,-3.24178e-06,3.00641e-09,0.391448,0.00143091,-3.23276e-06,3.12282e-08,0.392875,0.00142454,-3.13908e-06,-8.70932e-09,0.394297,0.00141824,-3.16521e-06,3.34114e-08,0.395712,0.00141201,-3.06497e-06,-5.72754e-09,0.397121,0.00140586,-3.08215e-06,1.9301e-08,0.398524,0.00139975,-3.02425e-06,1.7931e-08,0.39992,0.00139376,-2.97046e-06,-1.61822e-09,0.401311,0.00138781,-2.97531e-06,1.83442e-08,0.402696,0.00138192,-2.92028e-06,1.76485e-08,0.404075,0.00137613,-2.86733e-06,4.68617e-10,0.405448,0.00137039,-2.86593e-06,1.02794e-08,0.406816,0.00136469,-2.83509e-06,1.80179e-08,0.408178,0.00135908,-2.78104e-06,7.05594e-09,0.409534,0.00135354,-2.75987e-06,1.33633e-08,0.410885,0.00134806,-2.71978e-06,-9.04568e-10,0.41223,0.00134261,-2.72249e-06,2.0057e-08,0.41357,0.00133723,-2.66232e-06,1.00841e-08,0.414905,0.00133194,-2.63207e-06,-7.88835e-10,0.416234,0.00132667,-2.63444e-06,2.28734e-08,0.417558,0.00132147,-2.56582e-06,-1.29785e-09,0.418877,0.00131633,-2.56971e-06,1.21205e-08,0.420191,0.00131123,-2.53335e-06,1.24202e-08,0.421499,0.0013062,-2.49609e-06,-2.19681e-09,0.422803,0.0013012,-2.50268e-06,2.61696e-08,0.424102,0.00129628,-2.42417e-06,-1.30747e-08,0.425396,0.00129139,-2.46339e-06,2.6129e-08,0.426685,0.00128654,-2.38501e-06,-2.03454e-09,0.427969,0.00128176,-2.39111e-06,1.18115e-08,0.429248,0.00127702,-2.35567e-06,1.43932e-08,0.430523,0.00127235,-2.31249e-06,-9.77965e-09,0.431793,0.00126769,-2.34183e-06,2.47253e-08,0.433058,0.00126308,-2.26766e-06,2.85278e-10,0.434319,0.00125855,-2.2668e-06,3.93614e-09,0.435575,0.00125403,-2.25499e-06,1.37722e-08,0.436827,0.00124956,-2.21368e-06,5.79803e-10,0.438074,0.00124513,-2.21194e-06,1.37112e-08,0.439317,0.00124075,-2.1708e-06,4.17973e-09,0.440556,0.00123642,-2.15826e-06,-6.27703e-10,0.44179,0.0012321,-2.16015e-06,2.81332e-08,0.44302,0.00122787,-2.07575e-06,-2.24985e-08,0.444246,0.00122365,-2.14324e-06,3.20586e-08,0.445467,0.00121946,-2.04707e-06,-1.6329e-08,0.446685,0.00121532,-2.09605e-06,3.32573e-08,0.447898,0.00121122,-1.99628e-06,-2.72927e-08,0.449107,0.00120715,-2.07816e-06,4.6111e-08,0.450312,0.00120313,-1.93983e-06,-3.79416e-08,0.451514,0.00119914,-2.05365e-06,4.60507e-08,0.452711,0.00119517,-1.9155e-06,-2.7052e-08,0.453904,0.00119126,-1.99666e-06,3.23551e-08,0.455093,0.00118736,-1.89959e-06,-1.29613e-08,0.456279,0.00118352,-1.93848e-06,1.94905e-08,0.45746,0.0011797,-1.88e-06,-5.39588e-09,0.458638,0.00117593,-1.89619e-06,2.09282e-09,0.459812,0.00117214,-1.88991e-06,2.68267e-08,0.460982,0.00116844,-1.80943e-06,-1.99925e-08,0.462149,0.00116476,-1.86941e-06,2.3341e-08,0.463312,0.00116109,-1.79939e-06,-1.37674e-08,0.464471,0.00115745,-1.84069e-06,3.17287e-08,0.465627,0.00115387,-1.7455e-06,-2.37407e-08,0.466779,0.00115031,-1.81673e-06,3.34315e-08,0.467927,0.00114677,-1.71643e-06,-2.05786e-08,0.469073,0.00114328,-1.77817e-06,1.90802e-08,0.470214,0.00113978,-1.72093e-06,3.86247e-09,0.471352,0.00113635,-1.70934e-06,-4.72759e-09,0.472487,0.00113292,-1.72352e-06,1.50478e-08,0.473618,0.00112951,-1.67838e-06,4.14108e-09,0.474746,0.00112617,-1.66595e-06,-1.80986e-09,0.47587,0.00112283,-1.67138e-06,3.09816e-09,0.476991,0.0011195,-1.66209e-06,1.92198e-08,0.478109,0.00111623,-1.60443e-06,-2.03726e-08,0.479224,0.00111296,-1.66555e-06,3.2468e-08,0.480335,0.00110973,-1.56814e-06,-2.00922e-08,0.481443,0.00110653,-1.62842e-06,1.80983e-08,0.482548,0.00110333,-1.57413e-06,7.30362e-09,0.48365,0.0011002,-1.55221e-06,-1.75107e-08,0.484749,0.00109705,-1.60475e-06,3.29373e-08,0.485844,0.00109393,-1.50594e-06,-2.48315e-08,0.486937,0.00109085,-1.58043e-06,3.65865e-08,0.488026,0.0010878,-1.47067e-06,-3.21078e-08,0.489112,0.00108476,-1.56699e-06,3.22397e-08,0.490195,0.00108172,-1.47027e-06,-7.44391e-09,0.491276,0.00107876,-1.49261e-06,-2.46428e-09,0.492353,0.00107577,-1.5e-06,1.73011e-08,0.493427,0.00107282,-1.4481e-06,-7.13552e-09,0.494499,0.0010699,-1.4695e-06,1.1241e-08,0.495567,0.001067,-1.43578e-06,-8.02637e-09,0.496633,0.0010641,-1.45986e-06,2.08645e-08,0.497695,0.00106124,-1.39726e-06,-1.58271e-08,0.498755,0.0010584,-1.44475e-06,1.26415e-08,0.499812,0.00105555,-1.40682e-06,2.48655e-08,0.500866,0.00105281,-1.33222e-06,-5.24988e-08,0.501918,0.00104999,-1.48972e-06,6.59206e-08,0.502966,0.00104721,-1.29196e-06,-3.237e-08,0.504012,0.00104453,-1.38907e-06,3.95479e-09,0.505055,0.00104176,-1.3772e-06,1.65509e-08,0.506096,0.00103905,-1.32755e-06,-1.05539e-08,0.507133,0.00103637,-1.35921e-06,2.56648e-08,0.508168,0.00103373,-1.28222e-06,-3.25007e-08,0.509201,0.00103106,-1.37972e-06,4.47336e-08,0.51023,0.00102844,-1.24552e-06,-2.72245e-08,0.511258,0.00102587,-1.32719e-06,4.55952e-09,0.512282,0.00102323,-1.31352e-06,8.98645e-09,0.513304,0.00102063,-1.28656e-06,1.90992e-08,0.514323,0.00101811,-1.22926e-06,-2.57786e-08,0.51534,0.00101557,-1.30659e-06,2.44104e-08,0.516355,0.00101303,-1.23336e-06,-1.22581e-08,0.517366,0.00101053,-1.27014e-06,2.4622e-08,0.518376,0.00100806,-1.19627e-06,-2.66253e-08,0.519383,0.00100559,-1.27615e-06,2.22744e-08,0.520387,0.00100311,-1.20932e-06,-2.8679e-09,0.521389,0.00100068,-1.21793e-06,-1.08029e-08,0.522388,0.000998211,-1.25034e-06,4.60795e-08,0.523385,0.000995849,-1.1121e-06,-5.4306e-08,0.52438,0.000993462,-1.27502e-06,5.19354e-08,0.525372,0.000991067,-1.11921e-06,-3.42262e-08,0.526362,0.000988726,-1.22189e-06,2.53646e-08,0.52735,0.000986359,-1.14579e-06,-7.62782e-09,0.528335,0.000984044,-1.16868e-06,5.14668e-09,0.529318,0.000981722,-1.15324e-06,-1.29589e-08,0.530298,0.000979377,-1.19211e-06,4.66888e-08,0.531276,0.000977133,-1.05205e-06,-5.45868e-08,0.532252,0.000974865,-1.21581e-06,5.24495e-08,0.533226,0.000972591,-1.05846e-06,-3.60019e-08,0.534198,0.000970366,-1.16647e-06,3.19537e-08,0.535167,0.000968129,-1.07061e-06,-3.2208e-08,0.536134,0.000965891,-1.16723e-06,3.72738e-08,0.537099,0.000963668,-1.05541e-06,2.32205e-09,0.538061,0.000961564,-1.04844e-06,-4.65618e-08,0.539022,0.000959328,-1.18813e-06,6.47159e-08,0.53998,0.000957146,-9.93979e-07,-3.3488e-08,0.540936,0.000955057,-1.09444e-06,9.63166e-09,0.54189,0.000952897,-1.06555e-06,-5.03871e-09,0.542842,0.000950751,-1.08066e-06,1.05232e-08,0.543792,0.000948621,-1.04909e-06,2.25503e-08,0.544739,0.000946591,-9.81444e-07,-4.11195e-08,0.545685,0.000944504,-1.1048e-06,2.27182e-08,0.546628,0.000942363,-1.03665e-06,9.85146e-09,0.54757,0.000940319,-1.00709e-06,-2.51938e-09,0.548509,0.000938297,-1.01465e-06,2.25858e-10,0.549446,0.000936269,-1.01397e-06,1.61598e-09,0.550381,0.000934246,-1.00913e-06,-6.68983e-09,0.551315,0.000932207,-1.0292e-06,2.51434e-08,0.552246,0.000930224,-9.53765e-07,-3.42793e-08,0.553175,0.000928214,-1.0566e-06,5.23688e-08,0.554102,0.000926258,-8.99497e-07,-5.59865e-08,0.555028,0.000924291,-1.06746e-06,5.23679e-08,0.555951,0.000922313,-9.10352e-07,-3.42763e-08,0.556872,0.00092039,-1.01318e-06,2.51326e-08,0.557792,0.000918439,-9.37783e-07,-6.64954e-09,0.558709,0.000916543,-9.57732e-07,1.46554e-09,0.559625,0.000914632,-9.53335e-07,7.87281e-10,0.560538,0.000912728,-9.50973e-07,-4.61466e-09,0.56145,0.000910812,-9.64817e-07,1.76713e-08,0.56236,0.000908935,-9.11804e-07,-6.46564e-09,0.563268,0.000907092,-9.312e-07,8.19121e-09,0.564174,0.000905255,-9.06627e-07,-2.62992e-08,0.565078,0.000903362,-9.85524e-07,3.74007e-08,0.565981,0.000901504,-8.73322e-07,-4.0942e-09,0.566882,0.000899745,-8.85605e-07,-2.1024e-08,0.56778,0.00089791,-9.48677e-07,2.85854e-08,0.568677,0.000896099,-8.62921e-07,-3.3713e-08,0.569573,0.000894272,-9.64059e-07,4.6662e-08,0.570466,0.000892484,-8.24073e-07,-3.37258e-08,0.571358,0.000890734,-9.25251e-07,2.86365e-08,0.572247,0.00088897,-8.39341e-07,-2.12155e-08,0.573135,0.000887227,-9.02988e-07,-3.37913e-09,0.574022,0.000885411,-9.13125e-07,3.47319e-08,0.574906,0.000883689,-8.08929e-07,-1.63394e-08,0.575789,0.000882022,-8.57947e-07,-2.8979e-08,0.57667,0.00088022,-9.44885e-07,7.26509e-08,0.57755,0.000878548,-7.26932e-07,-8.28106e-08,0.578427,0.000876845,-9.75364e-07,7.97774e-08,0.579303,0.000875134,-7.36032e-07,-5.74849e-08,0.580178,0.00087349,-9.08486e-07,3.09529e-08,0.58105,0.000871765,-8.15628e-07,-6.72206e-09,0.581921,0.000870114,-8.35794e-07,-4.06451e-09,0.582791,0.00086843,-8.47987e-07,2.29799e-08,0.583658,0.000866803,-7.79048e-07,-2.82503e-08,0.584524,0.00086516,-8.63799e-07,3.04167e-08,0.585388,0.000863524,-7.72548e-07,-3.38119e-08,0.586251,0.000861877,-8.73984e-07,4.52264e-08,0.587112,0.000860265,-7.38305e-07,-2.78842e-08,0.587972,0.000858705,-8.21958e-07,6.70567e-09,0.58883,0.000857081,-8.01841e-07,1.06161e-09,0.589686,0.000855481,-7.98656e-07,-1.09521e-08,0.590541,0.00085385,-8.31512e-07,4.27468e-08,0.591394,0.000852316,-7.03272e-07,-4.08257e-08,0.592245,0.000850787,-8.25749e-07,1.34677e-09,0.593095,0.000849139,-8.21709e-07,3.54387e-08,0.593944,0.000847602,-7.15393e-07,-2.38924e-08,0.59479,0.0008461,-7.8707e-07,5.26143e-10,0.595636,0.000844527,-7.85491e-07,2.17879e-08,0.596479,0.000843021,-7.20127e-07,-2.80733e-08,0.597322,0.000841497,-8.04347e-07,3.09005e-08,0.598162,0.000839981,-7.11646e-07,-3.5924e-08,0.599002,0.00083845,-8.19418e-07,5.3191e-08,0.599839,0.000836971,-6.59845e-07,-5.76307e-08,0.600676,0.000835478,-8.32737e-07,5.81227e-08,0.60151,0.000833987,-6.58369e-07,-5.56507e-08,0.602344,0.000832503,-8.25321e-07,4.52706e-08,0.603175,0.000830988,-6.89509e-07,-6.22236e-09,0.604006,0.000829591,-7.08176e-07,-2.03811e-08,0.604834,0.000828113,-7.6932e-07,2.8142e-08,0.605662,0.000826659,-6.84894e-07,-3.25822e-08,0.606488,0.000825191,-7.8264e-07,4.25823e-08,0.607312,0.000823754,-6.54893e-07,-1.85376e-08,0.608135,0.000822389,-7.10506e-07,-2.80365e-08,0.608957,0.000820883,-7.94616e-07,7.1079e-08,0.609777,0.000819507,-5.81379e-07,-7.74655e-08,0.610596,0.000818112,-8.13775e-07,5.9969e-08,0.611413,0.000816665,-6.33868e-07,-4.32013e-08,0.612229,0.000815267,-7.63472e-07,5.32313e-08,0.613044,0.0008139,-6.03778e-07,-5.05148e-08,0.613857,0.000812541,-7.55323e-07,2.96187e-08,0.614669,0.000811119,-6.66466e-07,-8.35545e-09,0.615479,0.000809761,-6.91533e-07,3.80301e-09,0.616288,0.00080839,-6.80124e-07,-6.85666e-09,0.617096,0.000807009,-7.00694e-07,2.36237e-08,0.617903,0.000805678,-6.29822e-07,-2.80336e-08,0.618708,0.000804334,-7.13923e-07,2.8906e-08,0.619511,0.000802993,-6.27205e-07,-2.79859e-08,0.620314,0.000801655,-7.11163e-07,2.34329e-08,0.621114,0.000800303,-6.40864e-07,-6.14108e-09,0.621914,0.000799003,-6.59287e-07,1.13151e-09,0.622712,0.000797688,-6.55893e-07,1.61507e-09,0.62351,0.000796381,-6.51048e-07,-7.59186e-09,0.624305,0.000795056,-6.73823e-07,2.87524e-08,0.6251,0.000793794,-5.87566e-07,-4.7813e-08,0.625893,0.000792476,-7.31005e-07,4.32901e-08,0.626685,0.000791144,-6.01135e-07,-6.13814e-09,0.627475,0.000789923,-6.19549e-07,-1.87376e-08,0.628264,0.000788628,-6.75762e-07,2.14837e-08,0.629052,0.000787341,-6.11311e-07,-7.59265e-09,0.629839,0.000786095,-6.34089e-07,8.88692e-09,0.630625,0.000784854,-6.07428e-07,-2.7955e-08,0.631409,0.000783555,-6.91293e-07,4.33285e-08,0.632192,0.000782302,-5.61307e-07,-2.61497e-08,0.632973,0.000781101,-6.39757e-07,1.6658e-09,0.633754,0.000779827,-6.34759e-07,1.94866e-08,0.634533,0.000778616,-5.76299e-07,-2.00076e-08,0.635311,0.000777403,-6.36322e-07,9.39091e-10,0.636088,0.000776133,-6.33505e-07,1.62512e-08,0.636863,0.000774915,-5.84751e-07,-6.33937e-09,0.637638,0.000773726,-6.03769e-07,9.10609e-09,0.638411,0.000772546,-5.76451e-07,-3.00849e-08,0.639183,0.000771303,-6.66706e-07,5.1629e-08,0.639953,0.000770125,-5.11819e-07,-5.7222e-08,0.640723,0.000768929,-6.83485e-07,5.80497e-08,0.641491,0.000767736,-5.09336e-07,-5.57674e-08,0.642259,0.000766551,-6.76638e-07,4.58105e-08,0.643024,0.000765335,-5.39206e-07,-8.26541e-09,0.643789,0.000764231,-5.64002e-07,-1.27488e-08,0.644553,0.000763065,-6.02249e-07,-3.44168e-10,0.645315,0.00076186,-6.03281e-07,1.41254e-08,0.646077,0.000760695,-5.60905e-07,3.44727e-09,0.646837,0.000759584,-5.50563e-07,-2.79144e-08,0.647596,0.000758399,-6.34307e-07,4.86057e-08,0.648354,0.000757276,-4.88489e-07,-4.72989e-08,0.64911,0.000756158,-6.30386e-07,2.13807e-08,0.649866,0.000754961,-5.66244e-07,2.13808e-08,0.65062,0.000753893,-5.02102e-07,-4.7299e-08,0.651374,0.000752746,-6.43999e-07,4.86059e-08,0.652126,0.000751604,-4.98181e-07,-2.79154e-08,0.652877,0.000750524,-5.81927e-07,3.45089e-09,0.653627,0.000749371,-5.71575e-07,1.41119e-08,0.654376,0.00074827,-5.29239e-07,-2.93748e-10,0.655123,0.00074721,-5.3012e-07,-1.29368e-08,0.65587,0.000746111,-5.68931e-07,-7.56355e-09,0.656616,0.000744951,-5.91621e-07,4.3191e-08,0.65736,0.000743897,-4.62048e-07,-4.59911e-08,0.658103,0.000742835,-6.00022e-07,2.15642e-08,0.658846,0.0007417,-5.35329e-07,1.93389e-08,0.659587,0.000740687,-4.77312e-07,-3.93152e-08,0.660327,0.000739615,-5.95258e-07,1.87126e-08,0.661066,0.00073848,-5.3912e-07,2.40695e-08,0.661804,0.000737474,-4.66912e-07,-5.53859e-08,0.662541,0.000736374,-6.33069e-07,7.82648e-08,0.663277,0.000735343,-3.98275e-07,-7.88593e-08,0.664012,0.00073431,-6.34853e-07,5.83585e-08,0.664745,0.000733215,-4.59777e-07,-3.53656e-08,0.665478,0.000732189,-5.65874e-07,2.34994e-08,0.66621,0.000731128,-4.95376e-07,9.72743e-10,0.66694,0.00073014,-4.92458e-07,-2.73903e-08,0.66767,0.000729073,-5.74629e-07,4.89839e-08,0.668398,0.000728071,-4.27677e-07,-4.93359e-08,0.669126,0.000727068,-5.75685e-07,2.91504e-08,0.669853,0.000726004,-4.88234e-07,-7.66109e-09,0.670578,0.000725004,-5.11217e-07,1.49392e-09,0.671303,0.000723986,-5.06735e-07,1.68533e-09,0.672026,0.000722978,-5.01679e-07,-8.23525e-09,0.672749,0.00072195,-5.26385e-07,3.12556e-08,0.67347,0.000720991,-4.32618e-07,-5.71825e-08,0.674191,0.000719954,-6.04166e-07,7.8265e-08,0.67491,0.00071898,-3.69371e-07,-7.70634e-08,0.675628,0.00071801,-6.00561e-07,5.11747e-08,0.676346,0.000716963,-4.47037e-07,-8.42615e-09,0.677062,0.000716044,-4.72315e-07,-1.747e-08,0.677778,0.000715046,-5.24725e-07,1.87015e-08,0.678493,0.000714053,-4.68621e-07,2.26856e-09,0.679206,0.000713123,-4.61815e-07,-2.77758e-08,0.679919,0.000712116,-5.45142e-07,4.92298e-08,0.68063,0.000711173,-3.97453e-07,-4.99339e-08,0.681341,0.000710228,-5.47255e-07,3.12967e-08,0.682051,0.000709228,-4.53365e-07,-1.56481e-08,0.68276,0.000708274,-5.00309e-07,3.12958e-08,0.683467,0.000707367,-4.06422e-07,-4.99303e-08,0.684174,0.000706405,-5.56213e-07,4.9216e-08,0.68488,0.00070544,-4.08565e-07,-2.77245e-08,0.685585,0.00070454,-4.91738e-07,2.07748e-09,0.686289,0.000703562,-4.85506e-07,1.94146e-08,0.686992,0.00070265,-4.27262e-07,-2.01314e-08,0.687695,0.000701735,-4.87656e-07,1.50616e-09,0.688396,0.000700764,-4.83137e-07,1.41067e-08,0.689096,0.00069984,-4.40817e-07,1.67168e-09,0.689795,0.000698963,-4.35802e-07,-2.07934e-08,0.690494,0.000698029,-4.98182e-07,2.18972e-08,0.691192,0.000697099,-4.32491e-07,-7.19092e-09,0.691888,0.000696212,-4.54064e-07,6.86642e-09,0.692584,0.000695325,-4.33464e-07,-2.02747e-08,0.693279,0.000694397,-4.94288e-07,1.46279e-08,0.693973,0.000693452,-4.50405e-07,2.13678e-08,0.694666,0.000692616,-3.86301e-07,-4.04945e-08,0.695358,0.000691721,-5.07785e-07,2.14009e-08,0.696049,0.00069077,-4.43582e-07,1.44955e-08,0.69674,0.000689926,-4.00096e-07,-1.97783e-08,0.697429,0.000689067,-4.5943e-07,5.01296e-09,0.698118,0.000688163,-4.44392e-07,-2.73521e-10,0.698805,0.000687273,-4.45212e-07,-3.91893e-09,0.699492,0.000686371,-4.56969e-07,1.59493e-08,0.700178,0.000685505,-4.09121e-07,-2.73351e-10,0.700863,0.000684686,-4.09941e-07,-1.4856e-08,0.701548,0.000683822,-4.54509e-07,9.25979e-11,0.702231,0.000682913,-4.54231e-07,1.44855e-08,0.702913,0.000682048,-4.10775e-07,1.56992e-09,0.703595,0.000681231,-4.06065e-07,-2.07652e-08,0.704276,0.000680357,-4.68361e-07,2.18864e-08,0.704956,0.000679486,-4.02701e-07,-7.17595e-09,0.705635,0.000678659,-4.24229e-07,6.81748e-09,0.706313,0.000677831,-4.03777e-07,-2.0094e-08,0.70699,0.000676963,-4.64059e-07,1.39538e-08,0.707667,0.000676077,-4.22197e-07,2.38835e-08,0.708343,0.000675304,-3.50547e-07,-4.98831e-08,0.709018,0.000674453,-5.00196e-07,5.64395e-08,0.709692,0.000673622,-3.30878e-07,-5.66657e-08,0.710365,0.00067279,-5.00875e-07,5.1014e-08,0.711037,0.000671942,-3.47833e-07,-2.81809e-08,0.711709,0.000671161,-4.32376e-07,2.10513e-09,0.712379,0.000670303,-4.2606e-07,1.97604e-08,0.713049,0.00066951,-3.66779e-07,-2.15422e-08,0.713718,0.000668712,-4.31406e-07,6.8038e-09,0.714387,0.000667869,-4.10994e-07,-5.67295e-09,0.715054,0.00066703,-4.28013e-07,1.5888e-08,0.715721,0.000666222,-3.80349e-07,1.72576e-09,0.716387,0.000665467,-3.75172e-07,-2.27911e-08,0.717052,0.000664648,-4.43545e-07,2.9834e-08,0.717716,0.00066385,-3.54043e-07,-3.69401e-08,0.718379,0.000663031,-4.64864e-07,5.83219e-08,0.719042,0.000662277,-2.89898e-07,-7.71382e-08,0.719704,0.000661465,-5.21313e-07,7.14171e-08,0.720365,0.000660637,-3.07061e-07,-2.97161e-08,0.721025,0.000659934,-3.96209e-07,-1.21575e-08,0.721685,0.000659105,-4.32682e-07,1.87412e-08,0.722343,0.000658296,-3.76458e-07,-3.2029e-09,0.723001,0.000657533,-3.86067e-07,-5.9296e-09,0.723659,0.000656743,-4.03856e-07,2.69213e-08,0.724315,0.000656016,-3.23092e-07,-4.21511e-08,0.724971,0.000655244,-4.49545e-07,2.24737e-08,0.725625,0.000654412,-3.82124e-07,1.18611e-08,0.726279,0.000653683,-3.46541e-07,-1.03132e-08,0.726933,0.000652959,-3.7748e-07,-3.02128e-08,0.727585,0.000652114,-4.68119e-07,7.15597e-08,0.728237,0.000651392,-2.5344e-07,-7.72119e-08,0.728888,0.000650654,-4.85075e-07,5.8474e-08,0.729538,0.000649859,-3.09654e-07,-3.74746e-08,0.730188,0.000649127,-4.22077e-07,3.18197e-08,0.730837,0.000648379,-3.26618e-07,-3.01997e-08,0.731485,0.000647635,-4.17217e-07,2.93747e-08,0.732132,0.000646888,-3.29093e-07,-2.76943e-08,0.732778,0.000646147,-4.12176e-07,2.17979e-08,0.733424,0.000645388,-3.46783e-07,1.07292e-10,0.734069,0.000644695,-3.46461e-07,-2.22271e-08,0.734713,0.000643935,-4.13142e-07,2.91963e-08,0.735357,0.000643197,-3.25553e-07,-3.49536e-08,0.736,0.000642441,-4.30414e-07,5.10133e-08,0.736642,0.000641733,-2.77374e-07,-4.98904e-08,0.737283,0.000641028,-4.27045e-07,2.93392e-08,0.737924,0.000640262,-3.39028e-07,-7.86156e-09,0.738564,0.000639561,-3.62612e-07,2.10703e-09,0.739203,0.000638842,-3.56291e-07,-5.6653e-10,0.739842,0.000638128,-3.57991e-07,1.59086e-10,0.740479,0.000637412,-3.57513e-07,-6.98321e-11,0.741116,0.000636697,-3.57723e-07,1.20214e-10,0.741753,0.000635982,-3.57362e-07,-4.10987e-10,0.742388,0.000635266,-3.58595e-07,1.5237e-09,0.743023,0.000634553,-3.54024e-07,-5.68376e-09,0.743657,0.000633828,-3.71075e-07,2.12113e-08,0.744291,0.00063315,-3.07441e-07,-1.95569e-08,0.744924,0.000632476,-3.66112e-07,-2.58816e-09,0.745556,0.000631736,-3.73877e-07,2.99096e-08,0.746187,0.000631078,-2.84148e-07,-5.74454e-08,0.746818,0.000630337,-4.56484e-07,8.06629e-08,0.747448,0.000629666,-2.14496e-07,-8.63922e-08,0.748077,0.000628978,-4.73672e-07,8.60918e-08,0.748706,0.000628289,-2.15397e-07,-7.91613e-08,0.749334,0.000627621,-4.5288e-07,5.17393e-08,0.749961,0.00062687,-2.97663e-07,-8.58662e-09,0.750588,0.000626249,-3.23422e-07,-1.73928e-08,0.751214,0.00062555,-3.75601e-07,1.85532e-08,0.751839,0.000624855,-3.19941e-07,2.78479e-09,0.752463,0.000624223,-3.11587e-07,-2.96923e-08,0.753087,0.000623511,-4.00664e-07,5.63799e-08,0.75371,0.000622879,-2.31524e-07,-7.66179e-08,0.754333,0.000622186,-4.61378e-07,7.12778e-08,0.754955,0.000621477,-2.47545e-07,-2.96794e-08,0.755576,0.000620893,-3.36583e-07,-1.21648e-08,0.756196,0.000620183,-3.73077e-07,1.87339e-08,0.756816,0.000619493,-3.16875e-07,-3.16622e-09,0.757435,0.00061885,-3.26374e-07,-6.0691e-09,0.758054,0.000618179,-3.44581e-07,2.74426e-08,0.758672,0.000617572,-2.62254e-07,-4.40968e-08,0.759289,0.000616915,-3.94544e-07,2.97352e-08,0.759906,0.000616215,-3.05338e-07,-1.52393e-08,0.760522,0.000615559,-3.51056e-07,3.12221e-08,0.761137,0.000614951,-2.5739e-07,-5.00443e-08,0.761751,0.000614286,-4.07523e-07,4.9746e-08,0.762365,0.00061362,-2.58285e-07,-2.97303e-08,0.762979,0.000613014,-3.47476e-07,9.57079e-09,0.763591,0.000612348,-3.18764e-07,-8.55287e-09,0.764203,0.000611685,-3.44422e-07,2.46407e-08,0.764815,0.00061107,-2.705e-07,-3.04053e-08,0.765426,0.000610437,-3.61716e-07,3.73759e-08,0.766036,0.000609826,-2.49589e-07,-5.94935e-08,0.766645,0.000609149,-4.28069e-07,8.13889e-08,0.767254,0.000608537,-1.83902e-07,-8.72483e-08,0.767862,0.000607907,-4.45647e-07,8.87901e-08,0.76847,0.000607282,-1.79277e-07,-8.90983e-08,0.769077,0.000606656,-4.46572e-07,8.87892e-08,0.769683,0.000606029,-1.80204e-07,-8.72446e-08,0.770289,0.000605407,-4.41938e-07,8.13752e-08,0.770894,0.000604768,-1.97812e-07,-5.94423e-08,0.771498,0.000604194,-3.76139e-07,3.71848e-08,0.772102,0.000603553,-2.64585e-07,-2.96922e-08,0.772705,0.000602935,-3.53661e-07,2.19793e-08,0.773308,0.000602293,-2.87723e-07,1.37955e-09,0.77391,0.000601722,-2.83585e-07,-2.74976e-08,0.774512,0.000601072,-3.66077e-07,4.9006e-08,0.775112,0.000600487,-2.19059e-07,-4.93171e-08,0.775712,0.000599901,-3.67011e-07,2.90531e-08,0.776312,0.000599254,-2.79851e-07,-7.29081e-09,0.776911,0.000598673,-3.01724e-07,1.10077e-10,0.777509,0.00059807,-3.01393e-07,6.85053e-09,0.778107,0.000597487,-2.80842e-07,-2.75123e-08,0.778704,0.000596843,-3.63379e-07,4.35939e-08,0.779301,0.000596247,-2.32597e-07,-2.7654e-08,0.779897,0.000595699,-3.15559e-07,7.41741e-09,0.780492,0.00059509,-2.93307e-07,-2.01562e-09,0.781087,0.000594497,-2.99354e-07,6.45059e-10,0.781681,0.000593901,-2.97418e-07,-5.64635e-10,0.782275,0.000593304,-2.99112e-07,1.61347e-09,0.782868,0.000592711,-2.94272e-07,-5.88926e-09,0.78346,0.000592105,-3.1194e-07,2.19436e-08,0.784052,0.000591546,-2.46109e-07,-2.22805e-08,0.784643,0.000590987,-3.1295e-07,7.57368e-09,0.785234,0.000590384,-2.90229e-07,-8.01428e-09,0.785824,0.00058978,-3.14272e-07,2.44834e-08,0.786414,0.000589225,-2.40822e-07,-3.03148e-08,0.787003,0.000588652,-3.31766e-07,3.7171e-08,0.787591,0.0005881,-2.20253e-07,-5.87646e-08,0.788179,0.000587483,-3.96547e-07,7.86782e-08,0.788766,0.000586926,-1.60512e-07,-7.71342e-08,0.789353,0.000586374,-3.91915e-07,5.10444e-08,0.789939,0.000585743,-2.38782e-07,-7.83422e-09,0.790524,0.000585242,-2.62284e-07,-1.97076e-08,0.791109,0.000584658,-3.21407e-07,2.70598e-08,0.791693,0.000584097,-2.40228e-07,-2.89269e-08,0.792277,0.000583529,-3.27008e-07,2.90431e-08,0.792861,0.000582963,-2.39879e-07,-2.76409e-08,0.793443,0.0005824,-3.22802e-07,2.1916e-08,0.794025,0.00058182,-2.57054e-07,-4.18368e-10,0.794607,0.000581305,-2.58309e-07,-2.02425e-08,0.795188,0.000580727,-3.19036e-07,2.17838e-08,0.795768,0.000580155,-2.53685e-07,-7.28814e-09,0.796348,0.000579625,-2.75549e-07,7.36871e-09,0.796928,0.000579096,-2.53443e-07,-2.21867e-08,0.797506,0.000578523,-3.20003e-07,2.17736e-08,0.798085,0.000577948,-2.54683e-07,-5.30296e-09,0.798662,0.000577423,-2.70592e-07,-5.61698e-10,0.799239,0.00057688,-2.72277e-07,7.54977e-09,0.799816,0.000576358,-2.49627e-07,-2.96374e-08,0.800392,0.00057577,-3.38539e-07,5.1395e-08,0.800968,0.000575247,-1.84354e-07,-5.67335e-08,0.801543,0.000574708,-3.54555e-07,5.63297e-08,0.802117,0.000574168,-1.85566e-07,-4.93759e-08,0.802691,0.000573649,-3.33693e-07,2.19646e-08,0.803264,0.000573047,-2.678e-07,2.1122e-08,0.803837,0.000572575,-2.04433e-07,-4.68482e-08,0.804409,0.000572026,-3.44978e-07,4.70613e-08,0.804981,0.000571477,-2.03794e-07,-2.21877e-08,0.805552,0.000571003,-2.70357e-07,-1.79153e-08,0.806123,0.000570408,-3.24103e-07,3.42443e-08,0.806693,0.000569863,-2.2137e-07,1.47556e-10,0.807263,0.000569421,-2.20928e-07,-3.48345e-08,0.807832,0.000568874,-3.25431e-07,1.99812e-08,0.808401,0.000568283,-2.65487e-07,1.45143e-08,0.808969,0.000567796,-2.21945e-07,-1.84338e-08,0.809536,0.000567297,-2.77246e-07,-3.83608e-10,0.810103,0.000566741,-2.78397e-07,1.99683e-08,0.81067,0.000566244,-2.18492e-07,-1.98848e-08,0.811236,0.000565747,-2.78146e-07,-3.38976e-11,0.811801,0.000565191,-2.78248e-07,2.00204e-08,0.812366,0.000564695,-2.18187e-07,-2.04429e-08,0.812931,0.000564197,-2.79516e-07,2.1467e-09,0.813495,0.000563644,-2.73076e-07,1.18561e-08,0.814058,0.000563134,-2.37507e-07,1.00334e-08,0.814621,0.000562689,-2.07407e-07,-5.19898e-08,0.815183,0.000562118,-3.63376e-07,7.87163e-08,0.815745,0.000561627,-1.27227e-07,-8.40616e-08,0.816306,0.000561121,-3.79412e-07,7.87163e-08,0.816867,0.000560598,-1.43263e-07,-5.19898e-08,0.817428,0.000560156,-2.99233e-07,1.00335e-08,0.817988,0.000559587,-2.69132e-07,1.18559e-08,0.818547,0.000559085,-2.33564e-07,2.14764e-09,0.819106,0.000558624,-2.27122e-07,-2.04464e-08,0.819664,0.000558108,-2.88461e-07,2.00334e-08,0.820222,0.000557591,-2.28361e-07,-8.24277e-11,0.820779,0.000557135,-2.28608e-07,-1.97037e-08,0.821336,0.000556618,-2.87719e-07,1.92925e-08,0.821893,0.000556101,-2.29841e-07,2.13831e-09,0.822448,0.000555647,-2.23427e-07,-2.78458e-08,0.823004,0.000555117,-3.06964e-07,4.96402e-08,0.823559,0.000554652,-1.58043e-07,-5.15058e-08,0.824113,0.000554181,-3.12561e-07,3.71737e-08,0.824667,0.000553668,-2.0104e-07,-3.75844e-08,0.82522,0.000553153,-3.13793e-07,5.35592e-08,0.825773,0.000552686,-1.53115e-07,-5.74431e-08,0.826326,0.000552207,-3.25444e-07,5.7004e-08,0.826878,0.000551728,-1.54433e-07,-5.13635e-08,0.827429,0.000551265,-3.08523e-07,2.92406e-08,0.82798,0.000550735,-2.20801e-07,-5.99424e-09,0.828531,0.000550276,-2.38784e-07,-5.26363e-09,0.829081,0.000549782,-2.54575e-07,2.70488e-08,0.82963,0.000549354,-1.73429e-07,-4.33268e-08,0.83018,0.000548878,-3.03409e-07,2.7049e-08,0.830728,0.000548352,-2.22262e-07,-5.26461e-09,0.831276,0.000547892,-2.38056e-07,-5.99057e-09,0.831824,0.000547397,-2.56027e-07,2.92269e-08,0.832371,0.000546973,-1.68347e-07,-5.13125e-08,0.832918,0.000546482,-3.22284e-07,5.68139e-08,0.833464,0.000546008,-1.51843e-07,-5.67336e-08,0.83401,0.000545534,-3.22043e-07,5.09113e-08,0.834555,0.000545043,-1.6931e-07,-2.77022e-08,0.8351,0.000544621,-2.52416e-07,2.92924e-10,0.835644,0.000544117,-2.51537e-07,2.65305e-08,0.836188,0.000543694,-1.71946e-07,-4.68105e-08,0.836732,0.00054321,-3.12377e-07,4.15021e-08,0.837275,0.000542709,-1.87871e-07,1.13355e-11,0.837817,0.000542334,-1.87837e-07,-4.15474e-08,0.838359,0.000541833,-3.12479e-07,4.69691e-08,0.838901,0.000541349,-1.71572e-07,-2.71196e-08,0.839442,0.000540925,-2.52931e-07,1.90462e-09,0.839983,0.000540425,-2.47217e-07,1.95011e-08,0.840523,0.000539989,-1.88713e-07,-2.03045e-08,0.841063,0.00053955,-2.49627e-07,2.11216e-09,0.841602,0.000539057,-2.4329e-07,1.18558e-08,0.842141,0.000538606,-2.07723e-07,1.00691e-08,0.842679,0.000538221,-1.77516e-07,-5.21324e-08,0.843217,0.00053771,-3.33913e-07,7.92513e-08,0.843755,0.00053728,-9.6159e-08,-8.60587e-08,0.844292,0.000536829,-3.54335e-07,8.61696e-08,0.844828,0.000536379,-9.58263e-08,-7.98057e-08,0.845364,0.000535948,-3.35243e-07,5.42394e-08,0.8459,0.00053544,-1.72525e-07,-1.79426e-08,0.846435,0.000535041,-2.26353e-07,1.75308e-08,0.84697,0.000534641,-1.73761e-07,-5.21806e-08,0.847505,0.000534137,-3.30302e-07,7.19824e-08,0.848038,0.000533692,-1.14355e-07,-5.69349e-08,0.848572,0.000533293,-2.8516e-07,3.65479e-08,0.849105,0.000532832,-1.75516e-07,-2.96519e-08,0.849638,0.000532392,-2.64472e-07,2.2455e-08,0.85017,0.000531931,-1.97107e-07,-5.63451e-10,0.850702,0.000531535,-1.98797e-07,-2.02011e-08,0.851233,0.000531077,-2.59401e-07,2.17634e-08,0.851764,0.000530623,-1.94111e-07,-7.24794e-09,0.852294,0.000530213,-2.15854e-07,7.22832e-09,0.852824,0.000529803,-1.94169e-07,-2.16653e-08,0.853354,0.00052935,-2.59165e-07,1.98283e-08,0.853883,0.000528891,-1.9968e-07,1.95678e-09,0.854412,0.000528497,-1.9381e-07,-2.76554e-08,0.85494,0.000528027,-2.76776e-07,4.90603e-08,0.855468,0.00052762,-1.29596e-07,-4.93764e-08,0.855995,0.000527213,-2.77725e-07,2.92361e-08,0.856522,0.000526745,-1.90016e-07,-7.96341e-09,0.857049,0.000526341,-2.13907e-07,2.61752e-09,0.857575,0.000525922,-2.06054e-07,-2.50665e-09,0.8581,0.000525502,-2.13574e-07,7.40906e-09,0.858626,0.000525097,-1.91347e-07,-2.71296e-08,0.859151,0.000524633,-2.72736e-07,4.15048e-08,0.859675,0.000524212,-1.48221e-07,-1.96802e-08,0.860199,0.000523856,-2.07262e-07,-2.23886e-08,0.860723,0.000523375,-2.74428e-07,4.96299e-08,0.861246,0.000522975,-1.25538e-07,-5.69216e-08,0.861769,0.000522553,-2.96303e-07,5.88473e-08,0.862291,0.000522137,-1.19761e-07,-5.92584e-08,0.862813,0.00052172,-2.97536e-07,5.8977e-08,0.863334,0.000521301,-1.20605e-07,-5.74403e-08,0.863855,0.000520888,-2.92926e-07,5.15751e-08,0.864376,0.000520457,-1.38201e-07,-2.96506e-08,0.864896,0.000520091,-2.27153e-07,7.42277e-09,0.865416,0.000519659,-2.04885e-07,-4.05057e-11,0.865936,0.00051925,-2.05006e-07,-7.26074e-09,0.866455,0.000518818,-2.26788e-07,2.90835e-08,0.866973,0.000518451,-1.39538e-07,-4.94686e-08,0.867492,0.000518024,-2.87944e-07,4.95814e-08,0.868009,0.000517597,-1.39199e-07,-2.96479e-08,0.868527,0.000517229,-2.28143e-07,9.40539e-09,0.869044,0.000516801,-1.99927e-07,-7.9737e-09,0.86956,0.000516378,-2.23848e-07,2.24894e-08,0.870077,0.000515997,-1.5638e-07,-2.23793e-08,0.870592,0.000515617,-2.23517e-07,7.42302e-09,0.871108,0.000515193,-2.01248e-07,-7.31283e-09,0.871623,0.000514768,-2.23187e-07,2.18283e-08,0.872137,0.000514387,-1.57702e-07,-2.03959e-08,0.872652,0.000514011,-2.1889e-07,1.50711e-10,0.873165,0.000513573,-2.18437e-07,1.97931e-08,0.873679,0.000513196,-1.59058e-07,-1.97183e-08,0.874192,0.000512819,-2.18213e-07,-5.24324e-10,0.874704,0.000512381,-2.19786e-07,2.18156e-08,0.875217,0.000512007,-1.54339e-07,-2.71336e-08,0.875728,0.000511616,-2.3574e-07,2.71141e-08,0.87624,0.000511226,-1.54398e-07,-2.17182e-08,0.876751,0.000510852,-2.19552e-07,1.54131e-10,0.877262,0.000510414,-2.1909e-07,2.11017e-08,0.877772,0.000510039,-1.55785e-07,-2.49562e-08,0.878282,0.000509652,-2.30654e-07,1.91183e-08,0.878791,0.000509248,-1.73299e-07,8.08751e-09,0.8793,0.000508926,-1.49036e-07,-5.14684e-08,0.879809,0.000508474,-3.03441e-07,7.85766e-08,0.880317,0.000508103,-6.77112e-08,-8.40242e-08,0.880825,0.000507715,-3.19784e-07,7.87063e-08,0.881333,0.000507312,-8.36649e-08,-5.19871e-08,0.88184,0.000506988,-2.39626e-07,1.00327e-08,0.882346,0.000506539,-2.09528e-07,1.18562e-08,0.882853,0.000506156,-1.73959e-07,2.14703e-09,0.883359,0.000505814,-1.67518e-07,-2.04444e-08,0.883864,0.000505418,-2.28851e-07,2.00258e-08,0.88437,0.00050502,-1.68774e-07,-5.42855e-11,0.884874,0.000504682,-1.68937e-07,-1.98087e-08,0.885379,0.000504285,-2.28363e-07,1.96842e-08,0.885883,0.000503887,-1.6931e-07,6.76342e-10,0.886387,0.000503551,-1.67281e-07,-2.23896e-08,0.88689,0.000503149,-2.3445e-07,2.92774e-08,0.887393,0.000502768,-1.46618e-07,-3.51152e-08,0.887896,0.00050237,-2.51963e-07,5.15787e-08,0.888398,0.00050202,-9.72271e-08,-5.19903e-08,0.8889,0.00050167,-2.53198e-07,3.71732e-08,0.889401,0.000501275,-1.41678e-07,-3.70978e-08,0.889902,0.00050088,-2.52972e-07,5.16132e-08,0.890403,0.000500529,-9.81321e-08,-5.01459e-08,0.890903,0.000500183,-2.4857e-07,2.9761e-08,0.891403,0.000499775,-1.59287e-07,-9.29351e-09,0.891903,0.000499428,-1.87167e-07,7.41301e-09,0.892402,0.000499076,-1.64928e-07,-2.03585e-08,0.892901,0.000498685,-2.26004e-07,1.44165e-08,0.893399,0.000498276,-1.82754e-07,2.22974e-08,0.893898,0.000497978,-1.15862e-07,-4.40013e-08,0.894395,0.000497614,-2.47866e-07,3.44985e-08,0.894893,0.000497222,-1.44371e-07,-3.43882e-08,0.89539,0.00049683,-2.47535e-07,4.34497e-08,0.895886,0.000496465,-1.17186e-07,-2.02012e-08,0.896383,0.00049617,-1.7779e-07,-2.22497e-08,0.896879,0.000495748,-2.44539e-07,4.95952e-08,0.897374,0.000495408,-9.57532e-08,-5.69217e-08,0.89787,0.000495045,-2.66518e-07,5.88823e-08,0.898364,0.000494689,-8.98713e-08,-5.93983e-08,0.898859,0.000494331,-2.68066e-07,5.95017e-08,0.899353,0.000493973,-8.95613e-08,-5.9399e-08,0.899847,0.000493616,-2.67758e-07,5.8885e-08,0.90034,0.000493257,-9.11033e-08,-5.69317e-08,0.900833,0.000492904,-2.61898e-07,4.96326e-08,0.901326,0.000492529,-1.13001e-07,-2.23893e-08,0.901819,0.000492236,-1.80169e-07,-1.968e-08,0.902311,0.000491817,-2.39209e-07,4.15047e-08,0.902802,0.000491463,-1.14694e-07,-2.71296e-08,0.903293,0.000491152,-1.96083e-07,7.409e-09,0.903784,0.000490782,-1.73856e-07,-2.50645e-09,0.904275,0.000490427,-1.81376e-07,2.61679e-09,0.904765,0.000490072,-1.73525e-07,-7.96072e-09,0.905255,0.000489701,-1.97407e-07,2.92261e-08,0.905745,0.000489394,-1.09729e-07,-4.93389e-08,0.906234,0.000489027,-2.57746e-07,4.89204e-08,0.906723,0.000488658,-1.10985e-07,-2.71333e-08,0.907211,0.000488354,-1.92385e-07,8.30861e-12,0.907699,0.00048797,-1.9236e-07,2.71001e-08,0.908187,0.000487666,-1.1106e-07,-4.88041e-08,0.908675,0.000487298,-2.57472e-07,4.89069e-08,0.909162,0.000486929,-1.10751e-07,-2.76143e-08,0.909649,0.000486625,-1.93594e-07,1.9457e-09,0.910135,0.000486244,-1.87757e-07,1.98315e-08,0.910621,0.000485928,-1.28262e-07,-2.16671e-08,0.911107,0.000485606,-1.93264e-07,7.23216e-09,0.911592,0.000485241,-1.71567e-07,-7.26152e-09,0.912077,0.000484877,-1.93352e-07,2.18139e-08,0.912562,0.000484555,-1.2791e-07,-2.03895e-08,0.913047,0.000484238,-1.89078e-07,1.39494e-10,0.913531,0.000483861,-1.8866e-07,1.98315e-08,0.914014,0.000483543,-1.29165e-07,-1.98609e-08,0.914498,0.000483225,-1.88748e-07,7.39912e-12,0.914981,0.000482847,-1.88726e-07,1.98313e-08,0.915463,0.000482529,-1.29232e-07,-1.9728e-08,0.915946,0.000482212,-1.88416e-07,-5.24035e-10,0.916428,0.000481833,-1.89988e-07,2.18241e-08,0.916909,0.000481519,-1.24516e-07,-2.71679e-08,0.917391,0.000481188,-2.06019e-07,2.72427e-08,0.917872,0.000480858,-1.24291e-07,-2.21985e-08,0.918353,0.000480543,-1.90886e-07,1.94644e-09,0.918833,0.000480167,-1.85047e-07,1.44127e-08,0.919313,0.00047984,-1.41809e-07,7.39438e-12,0.919793,0.000479556,-1.41787e-07,-1.44423e-08,0.920272,0.000479229,-1.85114e-07,-1.84291e-09,0.920751,0.000478854,-1.90642e-07,2.18139e-08,0.92123,0.000478538,-1.25201e-07,-2.58081e-08,0.921708,0.00047821,-2.02625e-07,2.18139e-08,0.922186,0.00047787,-1.37183e-07,-1.84291e-09,0.922664,0.00047759,-1.42712e-07,-1.44423e-08,0.923141,0.000477262,-1.86039e-07,7.34701e-12,0.923618,0.00047689,-1.86017e-07,1.44129e-08,0.924095,0.000476561,-1.42778e-07,1.94572e-09,0.924572,0.000476281,-1.36941e-07,-2.21958e-08,0.925048,0.000475941,-2.03528e-07,2.72327e-08,0.925523,0.000475615,-1.2183e-07,-2.71304e-08,0.925999,0.00047529,-2.03221e-07,2.16843e-08,0.926474,0.000474949,-1.38168e-07,-2.16005e-12,0.926949,0.000474672,-1.38175e-07,-2.16756e-08,0.927423,0.000474331,-2.03202e-07,2.71001e-08,0.927897,0.000474006,-1.21902e-07,-2.71201e-08,0.928371,0.000473681,-2.03262e-07,2.17757e-08,0.928845,0.00047334,-1.37935e-07,-3.78028e-10,0.929318,0.000473063,-1.39069e-07,-2.02636e-08,0.929791,0.000472724,-1.9986e-07,2.18276e-08,0.930263,0.000472389,-1.34377e-07,-7.44231e-09,0.930736,0.000472098,-1.56704e-07,7.94165e-09,0.931208,0.000471809,-1.32879e-07,-2.43243e-08,0.931679,0.00047147,-2.05851e-07,2.97508e-08,0.932151,0.000471148,-1.16599e-07,-3.50742e-08,0.932622,0.000470809,-2.21822e-07,5.09414e-08,0.933092,0.000470518,-6.89976e-08,-4.94821e-08,0.933563,0.000470232,-2.17444e-07,2.77775e-08,0.934033,0.00046988,-1.34111e-07,-2.02351e-09,0.934502,0.000469606,-1.40182e-07,-1.96835e-08,0.934972,0.000469267,-1.99232e-07,2.11529e-08,0.935441,0.000468932,-1.35774e-07,-5.32332e-09,0.93591,0.000468644,-1.51743e-07,1.40413e-10,0.936378,0.000468341,-1.51322e-07,4.76166e-09,0.936846,0.000468053,-1.37037e-07,-1.9187e-08,0.937314,0.000467721,-1.94598e-07,1.23819e-08,0.937782,0.000467369,-1.57453e-07,2.92642e-08,0.938249,0.000467142,-6.96601e-08,-6.98342e-08,0.938716,0.000466793,-2.79163e-07,7.12586e-08,0.939183,0.000466449,-6.53869e-08,-3.63863e-08,0.939649,0.000466209,-1.74546e-07,1.46818e-08,0.940115,0.000465904,-1.305e-07,-2.2341e-08,0.940581,0.000465576,-1.97523e-07,1.50774e-08,0.941046,0.000465226,-1.52291e-07,2.16359e-08,0.941511,0.000464986,-8.73832e-08,-4.20162e-08,0.941976,0.000464685,-2.13432e-07,2.72198e-08,0.942441,0.00046434,-1.31773e-07,-7.2581e-09,0.942905,0.000464055,-1.53547e-07,1.81263e-09,0.943369,0.000463753,-1.48109e-07,7.58386e-12,0.943832,0.000463457,-1.48086e-07,-1.84298e-09,0.944296,0.000463155,-1.53615e-07,7.36433e-09,0.944759,0.00046287,-1.31522e-07,-2.76143e-08,0.945221,0.000462524,-2.14365e-07,4.34883e-08,0.945684,0.000462226,-8.39003e-08,-2.71297e-08,0.946146,0.000461977,-1.65289e-07,5.42595e-09,0.946608,0.000461662,-1.49012e-07,5.42593e-09,0.947069,0.000461381,-1.32734e-07,-2.71297e-08,0.94753,0.000461034,-2.14123e-07,4.34881e-08,0.947991,0.000460736,-8.36585e-08,-2.76134e-08,0.948452,0.000460486,-1.66499e-07,7.36083e-09,0.948912,0.000460175,-1.44416e-07,-1.82993e-09,0.949372,0.000459881,-1.49906e-07,-4.11073e-11,0.949832,0.000459581,-1.50029e-07,1.99434e-09,0.950291,0.000459287,-1.44046e-07,-7.93627e-09,0.950751,0.000458975,-1.67855e-07,2.97507e-08,0.951209,0.000458728,-7.86029e-08,-5.1462e-08,0.951668,0.000458417,-2.32989e-07,5.6888e-08,0.952126,0.000458121,-6.2325e-08,-5.68806e-08,0.952584,0.000457826,-2.32967e-07,5.14251e-08,0.953042,0.000457514,-7.86914e-08,-2.96107e-08,0.953499,0.000457268,-1.67523e-07,7.41296e-09,0.953956,0.000456955,-1.45285e-07,-4.11262e-11,0.954413,0.000456665,-1.45408e-07,-7.24847e-09,0.95487,0.000456352,-1.67153e-07,2.9035e-08,0.955326,0.000456105,-8.00484e-08,-4.92869e-08,0.955782,0.000455797,-2.27909e-07,4.89032e-08,0.956238,0.000455488,-8.11994e-08,-2.71166e-08,0.956693,0.000455244,-1.62549e-07,-4.13678e-11,0.957148,0.000454919,-1.62673e-07,2.72821e-08,0.957603,0.000454675,-8.0827e-08,-4.94824e-08,0.958057,0.000454365,-2.29274e-07,5.14382e-08,0.958512,0.000454061,-7.49597e-08,-3.7061e-08,0.958965,0.0004538,-1.86143e-07,3.72013e-08,0.959419,0.000453539,-7.45389e-08,-5.21396e-08,0.959873,0.000453234,-2.30958e-07,5.21476e-08,0.960326,0.000452928,-7.45146e-08,-3.72416e-08,0.960778,0.000452667,-1.8624e-07,3.72143e-08,0.961231,0.000452407,-7.45967e-08,-5.20109e-08,0.961683,0.000452101,-2.30629e-07,5.16199e-08,0.962135,0.000451795,-7.57696e-08,-3.52595e-08,0.962587,0.000451538,-1.81548e-07,2.98133e-08,0.963038,0.000451264,-9.2108e-08,-2.43892e-08,0.963489,0.000451007,-1.65276e-07,8.13892e-09,0.96394,0.000450701,-1.40859e-07,-8.16647e-09,0.964391,0.000450394,-1.65358e-07,2.45269e-08,0.964841,0.000450137,-9.17775e-08,-3.03367e-08,0.965291,0.000449863,-1.82787e-07,3.7215e-08,0.965741,0.000449609,-7.11424e-08,-5.89188e-08,0.96619,0.00044929,-2.47899e-07,7.92509e-08,0.966639,0.000449032,-1.01462e-08,-7.92707e-08,0.967088,0.000448773,-2.47958e-07,5.90181e-08,0.967537,0.000448455,-7.0904e-08,-3.75925e-08,0.967985,0.0004482,-1.83681e-07,3.17471e-08,0.968433,0.000447928,-8.84401e-08,-2.97913e-08,0.968881,0.000447662,-1.77814e-07,2.78133e-08,0.969329,0.000447389,-9.4374e-08,-2.18572e-08,0.969776,0.000447135,-1.59946e-07,1.10134e-11,0.970223,0.000446815,-1.59913e-07,2.18132e-08,0.97067,0.000446561,-9.44732e-08,-2.76591e-08,0.971116,0.000446289,-1.7745e-07,2.92185e-08,0.971562,0.000446022,-8.97948e-08,-2.96104e-08,0.972008,0.000445753,-1.78626e-07,2.96185e-08,0.972454,0.000445485,-8.97706e-08,-2.92588e-08,0.972899,0.000445218,-1.77547e-07,2.78123e-08,0.973344,0.000444946,-9.41103e-08,-2.23856e-08,0.973789,0.000444691,-1.61267e-07,2.12559e-09,0.974233,0.000444374,-1.5489e-07,1.38833e-08,0.974678,0.000444106,-1.13241e-07,1.94591e-09,0.975122,0.000443886,-1.07403e-07,-2.16669e-08,0.975565,0.000443606,-1.72404e-07,2.5117e-08,0.976009,0.000443336,-9.70526e-08,-1.91963e-08,0.976452,0.000443085,-1.54642e-07,-7.93627e-09,0.976895,0.000442752,-1.7845e-07,5.09414e-08,0.977338,0.000442548,-2.56262e-08,-7.66201e-08,0.97778,0.000442266,-2.55486e-07,7.67249e-08,0.978222,0.000441986,-2.53118e-08,-5.14655e-08,0.978664,0.000441781,-1.79708e-07,9.92773e-09,0.979106,0.000441451,-1.49925e-07,1.17546e-08,0.979547,0.000441186,-1.14661e-07,2.65868e-09,0.979988,0.000440965,-1.06685e-07,-2.23893e-08,0.980429,0.000440684,-1.73853e-07,2.72939e-08,0.980869,0.000440419,-9.19716e-08,-2.71816e-08,0.98131,0.000440153,-1.73516e-07,2.18278e-08,0.98175,0.000439872,-1.08033e-07,-5.24833e-10,0.982189,0.000439654,-1.09607e-07,-1.97284e-08,0.982629,0.000439376,-1.68793e-07,1.98339e-08,0.983068,0.000439097,-1.09291e-07,-2.62901e-12,0.983507,0.000438879,-1.09299e-07,-1.98234e-08,0.983946,0.000438601,-1.68769e-07,1.96916e-08,0.984384,0.000438322,-1.09694e-07,6.6157e-10,0.984823,0.000438105,-1.0771e-07,-2.23379e-08,0.985261,0.000437823,-1.74723e-07,2.90855e-08,0.985698,0.00043756,-8.74669e-08,-3.43992e-08,0.986136,0.000437282,-1.90665e-07,4.89068e-08,0.986573,0.000437048,-4.39442e-08,-4.20188e-08,0.98701,0.000436834,-1.7e-07,-4.11073e-11,0.987446,0.000436494,-1.70124e-07,4.21832e-08,0.987883,0.00043628,-4.35742e-08,-4.94824e-08,0.988319,0.000436044,-1.92021e-07,3.6537e-08,0.988755,0.00043577,-8.24102e-08,-3.70611e-08,0.989191,0.000435494,-1.93593e-07,5.21026e-08,0.989626,0.000435263,-3.72855e-08,-5.21402e-08,0.990061,0.000435032,-1.93706e-07,3.7249e-08,0.990496,0.000434756,-8.19592e-08,-3.72512e-08,0.990931,0.000434481,-1.93713e-07,5.21511e-08,0.991365,0.00043425,-3.72595e-08,-5.21439e-08,0.991799,0.000434019,-1.93691e-07,3.72152e-08,0.992233,0.000433743,-8.20456e-08,-3.71123e-08,0.992667,0.000433468,-1.93382e-07,5.16292e-08,0.9931,0.000433236,-3.84947e-08,-5.01953e-08,0.993533,0.000433008,-1.89081e-07,2.99427e-08,0.993966,0.00043272,-9.92525e-08,-9.9708e-09,0.994399,0.000432491,-1.29165e-07,9.94051e-09,0.994831,0.000432263,-9.93434e-08,-2.97912e-08,0.995263,0.000431975,-1.88717e-07,4.96198e-08,0.995695,0.000431746,-3.98578e-08,-4.94785e-08,0.996127,0.000431518,-1.88293e-07,2.9085e-08,0.996558,0.000431229,-1.01038e-07,-7.25675e-09,0.996989,0.000431005,-1.22809e-07,-5.79945e-11,0.99742,0.000430759,-1.22983e-07,7.48873e-09,0.997851,0.000430536,-1.00516e-07,-2.98969e-08,0.998281,0.000430245,-1.90207e-07,5.24942e-08,0.998711,0.000430022,-3.27246e-08,-6.08706e-08,0.999141,0.000429774,-2.15336e-07,7.17788e-08,0.999571,0.000429392,0.,0.}; + + template + __device__ __forceinline__ void Lab2RGBConvert_f(const T& src, D& dst) + { + const float lThresh = 0.008856f * 903.3f; + const float fThresh = 7.787f * 0.008856f + 16.0f / 116.0f; + + float Y, fy; + + if (src.x <= lThresh) + { + Y = src.x / 903.3f; + fy = 7.787f * Y + 16.0f / 116.0f; + } + else + { + fy = (src.x + 16.0f) / 116.0f; + Y = fy * fy * fy; + } + + float X = src.y / 500.0f + fy; + float Z = fy - src.z / 200.0f; + + if (X <= fThresh) + X = (X - 16.0f / 116.0f) / 7.787f; + else + X = X * X * X; + + if (Z <= fThresh) + Z = (Z - 16.0f / 116.0f) / 7.787f; + else + Z = Z * Z * Z; + + float B = 0.052891f * X - 0.204043f * Y + 1.151152f * Z; + float G = -0.921235f * X + 1.875991f * Y + 0.045244f * Z; + float R = 3.079933f * X - 1.537150f * Y - 0.542782f * Z; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + } + + dst.x = blueIdx == 0 ? B : R; + dst.y = G; + dst.z = blueIdx == 0 ? R : B; + setAlpha(dst, ColorChannel::max()); + } + + template + __device__ __forceinline__ void Lab2RGBConvert_b(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (100.f / 255.f); + srcf.y = src.y - 128; + srcf.z = src.z - 128; + + Lab2RGBConvert_f(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 255.f); + dst.y = saturate_cast(dstf.y * 255.f); + dst.z = saturate_cast(dstf.z * 255.f); + setAlpha(dst, ColorChannel::max()); + } + + template struct Lab2RGB; + template + struct Lab2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Lab2RGBConvert_b(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ Lab2RGB() {} + __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {} + }; + template + struct Lab2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Lab2RGBConvert_f(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ Lab2RGB() {} + __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::Lab2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + +///////////////////////////////////// RGB <-> Luv ///////////////////////////////////// + + namespace color_detail + { + __constant__ float c_LabCbrtTab[] = {0.137931,0.0114066,0.,1.18859e-07,0.149338,0.011407,3.56578e-07,-5.79396e-07,0.160745,0.0114059,-1.38161e-06,2.16892e-06,0.172151,0.0114097,5.12516e-06,-8.0814e-06,0.183558,0.0113957,-1.9119e-05,3.01567e-05,0.194965,0.0114479,7.13509e-05,-0.000112545,0.206371,0.011253,-0.000266285,-0.000106493,0.217252,0.0104009,-0.000585765,7.32149e-05,0.22714,0.00944906,-0.00036612,1.21917e-05,0.236235,0.0087534,-0.000329545,2.01753e-05,0.244679,0.00815483,-0.000269019,1.24435e-05,0.252577,0.00765412,-0.000231689,1.05618e-05,0.26001,0.00722243,-0.000200003,8.26662e-06,0.267041,0.00684723,-0.000175203,6.76746e-06,0.27372,0.00651712,-0.000154901,5.61192e-06,0.280088,0.00622416,-0.000138065,4.67009e-06,0.286179,0.00596204,-0.000124055,3.99012e-06,0.292021,0.0057259,-0.000112085,3.36032e-06,0.297638,0.00551181,-0.000102004,2.95338e-06,0.30305,0.00531666,-9.31435e-05,2.52875e-06,0.308277,0.00513796,-8.55572e-05,2.22022e-06,0.313331,0.00497351,-7.88966e-05,1.97163e-06,0.318228,0.00482163,-7.29817e-05,1.7248e-06,0.322978,0.00468084,-6.78073e-05,1.55998e-06,0.327593,0.0045499,-6.31274e-05,1.36343e-06,0.332081,0.00442774,-5.90371e-05,1.27136e-06,0.336451,0.00431348,-5.5223e-05,1.09111e-06,0.34071,0.00420631,-5.19496e-05,1.0399e-06,0.344866,0.00410553,-4.88299e-05,9.18347e-07,0.348923,0.00401062,-4.60749e-05,8.29942e-07,0.352889,0.00392096,-4.35851e-05,7.98478e-07,0.356767,0.00383619,-4.11896e-05,6.84917e-07,0.360562,0.00375586,-3.91349e-05,6.63976e-07,0.36428,0.00367959,-3.7143e-05,5.93086e-07,0.367923,0.00360708,-3.53637e-05,5.6976e-07,0.371495,0.00353806,-3.36544e-05,4.95533e-07,0.375,0.00347224,-3.21678e-05,4.87951e-07,0.378441,0.00340937,-3.0704e-05,4.4349e-07,0.38182,0.00334929,-2.93735e-05,4.20297e-07,0.38514,0.0032918,-2.81126e-05,3.7872e-07,0.388404,0.00323671,-2.69764e-05,3.596e-07,0.391614,0.00318384,-2.58976e-05,3.5845e-07,0.394772,0.00313312,-2.48223e-05,2.92765e-07,0.397881,0.00308435,-2.3944e-05,3.18232e-07,0.400942,0.00303742,-2.29893e-05,2.82046e-07,0.403957,0.00299229,-2.21432e-05,2.52315e-07,0.406927,0.00294876,-2.13862e-05,2.58416e-07,0.409855,0.00290676,-2.0611e-05,2.33939e-07,0.412741,0.00286624,-1.99092e-05,2.36342e-07,0.415587,0.00282713,-1.92001e-05,1.916e-07,0.418396,0.00278931,-1.86253e-05,2.1915e-07,0.421167,0.00275271,-1.79679e-05,1.83498e-07,0.423901,0.00271733,-1.74174e-05,1.79343e-07,0.426602,0.00268303,-1.68794e-05,1.72013e-07,0.429268,0.00264979,-1.63633e-05,1.75686e-07,0.431901,0.00261759,-1.58363e-05,1.3852e-07,0.434503,0.00258633,-1.54207e-05,1.64304e-07,0.437074,0.00255598,-1.49278e-05,1.28136e-07,0.439616,0.00252651,-1.45434e-05,1.57618e-07,0.442128,0.0024979,-1.40705e-05,1.0566e-07,0.444612,0.00247007,-1.37535e-05,1.34998e-07,0.447068,0.00244297,-1.33485e-05,1.29207e-07,0.449498,0.00241666,-1.29609e-05,9.32347e-08,0.451902,0.00239102,-1.26812e-05,1.23703e-07,0.45428,0.00236603,-1.23101e-05,9.74072e-08,0.456634,0.0023417,-1.20179e-05,1.12518e-07,0.458964,0.002318,-1.16803e-05,7.83681e-08,0.46127,0.00229488,-1.14452e-05,1.10452e-07,0.463554,0.00227232,-1.11139e-05,7.58719e-08,0.465815,0.00225032,-1.08863e-05,9.2699e-08,0.468055,0.00222882,-1.06082e-05,8.97738e-08,0.470273,0.00220788,-1.03388e-05,5.4845e-08,0.47247,0.00218736,-1.01743e-05,1.0808e-07,0.474648,0.00216734,-9.85007e-06,4.9277e-08,0.476805,0.00214779,-9.70224e-06,8.22408e-08,0.478943,0.00212863,-9.45551e-06,6.87942e-08,0.481063,0.00210993,-9.24913e-06,5.98144e-08,0.483163,0.00209161,-9.06969e-06,7.93789e-08,0.485246,0.00207371,-8.83155e-06,3.99032e-08,0.487311,0.00205616,-8.71184e-06,8.88325e-08,0.489358,0.002039,-8.44534e-06,2.20004e-08,0.491389,0.00202218,-8.37934e-06,9.13872e-08,0.493403,0.0020057,-8.10518e-06,2.96829e-08,0.495401,0.00198957,-8.01613e-06,5.81028e-08,0.497382,0.00197372,-7.84183e-06,6.5731e-08,0.499348,0.00195823,-7.64463e-06,3.66019e-08,0.501299,0.00194305,-7.53483e-06,2.62811e-08,0.503234,0.00192806,-7.45598e-06,9.66907e-08,0.505155,0.00191344,-7.16591e-06,4.18928e-09,0.507061,0.00189912,-7.15334e-06,6.53665e-08,0.508953,0.00188501,-6.95724e-06,3.23686e-08,0.510831,0.00187119,-6.86014e-06,4.35774e-08,0.512696,0.0018576,-6.72941e-06,3.17406e-08,0.514547,0.00184424,-6.63418e-06,6.78785e-08,0.516384,0.00183117,-6.43055e-06,-5.23126e-09,0.518209,0.0018183,-6.44624e-06,7.22562e-08,0.520021,0.00180562,-6.22947e-06,1.42292e-08,0.52182,0.0017932,-6.18679e-06,4.9641e-08,0.523607,0.00178098,-6.03786e-06,2.56259e-08,0.525382,0.00176898,-5.96099e-06,2.66696e-08,0.527145,0.00175714,-5.88098e-06,4.65094e-08,0.528897,0.00174552,-5.74145e-06,2.57114e-08,0.530637,0.00173411,-5.66431e-06,2.94588e-08,0.532365,0.00172287,-5.57594e-06,3.52667e-08,0.534082,0.00171182,-5.47014e-06,8.28868e-09,0.535789,0.00170091,-5.44527e-06,5.07871e-08,0.537484,0.00169017,-5.29291e-06,2.69817e-08,0.539169,0.00167967,-5.21197e-06,2.01009e-08,0.540844,0.0016693,-5.15166e-06,1.18237e-08,0.542508,0.00165903,-5.11619e-06,5.18135e-08,0.544162,0.00164896,-4.96075e-06,1.9341e-08,0.545806,0.00163909,-4.90273e-06,-9.96867e-09,0.54744,0.00162926,-4.93263e-06,8.01382e-08,0.549064,0.00161963,-4.69222e-06,-1.25601e-08,0.550679,0.00161021,-4.7299e-06,2.97067e-08,0.552285,0.00160084,-4.64078e-06,1.29426e-08,0.553881,0.0015916,-4.60195e-06,3.77327e-08,0.555468,0.00158251,-4.48875e-06,1.49412e-08,0.557046,0.00157357,-4.44393e-06,2.17118e-08,0.558615,0.00156475,-4.3788e-06,1.74206e-08,0.560176,0.00155605,-4.32653e-06,2.78152e-08,0.561727,0.00154748,-4.24309e-06,-9.47239e-09,0.563271,0.00153896,-4.27151e-06,6.9679e-08,0.564805,0.00153063,-4.06247e-06,-3.08246e-08,0.566332,0.00152241,-4.15494e-06,5.36188e-08,0.56785,0.00151426,-3.99409e-06,-4.83594e-09,0.56936,0.00150626,-4.00859e-06,2.53293e-08,0.570863,0.00149832,-3.93261e-06,2.27286e-08,0.572357,0.00149052,-3.86442e-06,2.96541e-09,0.573844,0.0014828,-3.85552e-06,2.50147e-08,0.575323,0.00147516,-3.78048e-06,1.61842e-08,0.576794,0.00146765,-3.73193e-06,2.94582e-08,0.578258,0.00146028,-3.64355e-06,-1.48076e-08,0.579715,0.00145295,-3.68798e-06,2.97724e-08,0.581164,0.00144566,-3.59866e-06,1.49272e-08,0.582606,0.00143851,-3.55388e-06,2.97285e-08,0.584041,0.00143149,-3.46469e-06,-1.46323e-08,0.585469,0.00142451,-3.50859e-06,2.88004e-08,0.58689,0.00141758,-3.42219e-06,1.864e-08,0.588304,0.00141079,-3.36627e-06,1.58482e-08,0.589712,0.00140411,-3.31872e-06,-2.24279e-08,0.591112,0.00139741,-3.38601e-06,7.38639e-08,0.592507,0.00139085,-3.16441e-06,-3.46088e-08,0.593894,0.00138442,-3.26824e-06,4.96675e-09,0.595275,0.0013779,-3.25334e-06,7.4346e-08,0.59665,0.00137162,-3.0303e-06,-6.39319e-08,0.598019,0.00136536,-3.2221e-06,6.21725e-08,0.599381,0.00135911,-3.03558e-06,-5.94423e-09,0.600737,0.00135302,-3.05341e-06,2.12091e-08,0.602087,0.00134697,-2.98979e-06,-1.92876e-08,0.603431,0.00134094,-3.04765e-06,5.5941e-08,0.604769,0.00133501,-2.87983e-06,-2.56622e-08,0.606101,0.00132917,-2.95681e-06,4.67078e-08,0.607427,0.0013234,-2.81669e-06,-4.19592e-08,0.608748,0.00131764,-2.94257e-06,6.15243e-08,0.610062,0.00131194,-2.75799e-06,-2.53244e-08,0.611372,0.00130635,-2.83397e-06,3.97739e-08,0.612675,0.0013008,-2.71465e-06,-1.45618e-08,0.613973,0.00129533,-2.75833e-06,1.84733e-08,0.615266,0.00128986,-2.70291e-06,2.73606e-10,0.616553,0.00128446,-2.70209e-06,4.00367e-08,0.617835,0.00127918,-2.58198e-06,-4.12113e-08,0.619111,0.00127389,-2.70561e-06,6.52039e-08,0.620383,0.00126867,-2.51e-06,-4.07901e-08,0.621649,0.00126353,-2.63237e-06,3.83516e-08,0.62291,0.00125838,-2.51732e-06,6.59315e-09,0.624166,0.00125337,-2.49754e-06,-5.11939e-09,0.625416,0.00124836,-2.5129e-06,1.38846e-08,0.626662,0.00124337,-2.47124e-06,9.18514e-09,0.627903,0.00123846,-2.44369e-06,8.97952e-09,0.629139,0.0012336,-2.41675e-06,1.45012e-08,0.63037,0.00122881,-2.37325e-06,-7.37949e-09,0.631597,0.00122404,-2.39538e-06,1.50169e-08,0.632818,0.00121929,-2.35033e-06,6.91648e-09,0.634035,0.00121461,-2.32958e-06,1.69219e-08,0.635248,0.00121,-2.27882e-06,-1.49997e-08,0.636455,0.0012054,-2.32382e-06,4.30769e-08,0.637659,0.00120088,-2.19459e-06,-3.80986e-08,0.638857,0.00119638,-2.30888e-06,4.97134e-08,0.640051,0.00119191,-2.15974e-06,-4.15463e-08,0.641241,0.00118747,-2.28438e-06,5.68667e-08,0.642426,0.00118307,-2.11378e-06,-7.10641e-09,0.643607,0.00117882,-2.1351e-06,-2.8441e-08,0.644784,0.00117446,-2.22042e-06,6.12658e-08,0.645956,0.00117021,-2.03663e-06,-3.78083e-08,0.647124,0.00116602,-2.15005e-06,3.03627e-08,0.648288,0.00116181,-2.05896e-06,-2.40379e-08,0.649448,0.00115762,-2.13108e-06,6.57887e-08,0.650603,0.00115356,-1.93371e-06,-6.03028e-08,0.651755,0.00114951,-2.11462e-06,5.62134e-08,0.652902,0.00114545,-1.94598e-06,-4.53417e-08,0.654046,0.00114142,-2.082e-06,6.55489e-08,0.655185,0.00113745,-1.88536e-06,-3.80396e-08,0.656321,0.00113357,-1.99948e-06,2.70049e-08,0.657452,0.00112965,-1.91846e-06,-1.03755e-08,0.65858,0.00112578,-1.94959e-06,1.44973e-08,0.659704,0.00112192,-1.9061e-06,1.1991e-08,0.660824,0.00111815,-1.87012e-06,-2.85634e-09,0.66194,0.0011144,-1.87869e-06,-5.65782e-10,0.663053,0.00111064,-1.88039e-06,5.11947e-09,0.664162,0.0011069,-1.86503e-06,3.96924e-08,0.665267,0.00110328,-1.74595e-06,-4.46795e-08,0.666368,0.00109966,-1.87999e-06,1.98161e-08,0.667466,0.00109596,-1.82054e-06,2.502e-08,0.66856,0.00109239,-1.74548e-06,-6.86593e-10,0.669651,0.0010889,-1.74754e-06,-2.22739e-08,0.670738,0.00108534,-1.81437e-06,3.01776e-08,0.671821,0.0010818,-1.72383e-06,2.07732e-08,0.672902,0.00107841,-1.66151e-06,-5.36658e-08,0.673978,0.00107493,-1.82251e-06,7.46802e-08,0.675051,0.00107151,-1.59847e-06,-6.62411e-08,0.676121,0.00106811,-1.79719e-06,7.10748e-08,0.677188,0.00106473,-1.58397e-06,-3.92441e-08,0.678251,0.00106145,-1.7017e-06,2.62973e-08,0.679311,0.00105812,-1.62281e-06,-6.34035e-09,0.680367,0.00105486,-1.64183e-06,-9.36249e-10,0.68142,0.00105157,-1.64464e-06,1.00854e-08,0.68247,0.00104831,-1.61438e-06,2.01995e-08,0.683517,0.00104514,-1.55378e-06,-3.1279e-08,0.68456,0.00104194,-1.64762e-06,4.53114e-08,0.685601,0.00103878,-1.51169e-06,-3.07573e-08,0.686638,0.00103567,-1.60396e-06,1.81133e-08,0.687672,0.00103251,-1.54962e-06,1.79085e-08,0.688703,0.00102947,-1.49589e-06,-3.01428e-08,0.689731,0.00102639,-1.58632e-06,4.30583e-08,0.690756,0.00102334,-1.45715e-06,-2.28814e-08,0.691778,0.00102036,-1.52579e-06,-1.11373e-08,0.692797,0.00101727,-1.5592e-06,6.74305e-08,0.693812,0.00101436,-1.35691e-06,-7.97709e-08,0.694825,0.0010114,-1.59622e-06,7.28391e-08,0.695835,0.00100843,-1.37771e-06,-3.27715e-08,0.696842,0.00100558,-1.47602e-06,-1.35807e-09,0.697846,0.00100262,-1.48009e-06,3.82037e-08,0.698847,0.000999775,-1.36548e-06,-3.22474e-08,0.699846,0.000996948,-1.46223e-06,3.11809e-08,0.700841,0.000994117,-1.36868e-06,-3.28714e-08,0.701834,0.000991281,-1.4673e-06,4.07001e-08,0.702824,0.000988468,-1.3452e-06,-1.07197e-08,0.703811,0.000985746,-1.37736e-06,2.17866e-09,0.704795,0.000982998,-1.37082e-06,2.00521e-09,0.705777,0.000980262,-1.3648e-06,-1.01996e-08,0.706756,0.000977502,-1.3954e-06,3.87931e-08,0.707732,0.000974827,-1.27902e-06,-2.57632e-08,0.708706,0.000972192,-1.35631e-06,4.65513e-09,0.709676,0.000969493,-1.34235e-06,7.14257e-09,0.710645,0.00096683,-1.32092e-06,2.63791e-08,0.71161,0.000964267,-1.24178e-06,-5.30543e-08,0.712573,0.000961625,-1.40095e-06,6.66289e-08,0.713533,0.000959023,-1.20106e-06,-3.46474e-08,0.714491,0.000956517,-1.305e-06,1.23559e-08,0.715446,0.000953944,-1.26793e-06,-1.47763e-08,0.716399,0.000951364,-1.31226e-06,4.67494e-08,0.717349,0.000948879,-1.17201e-06,-5.3012e-08,0.718297,0.000946376,-1.33105e-06,4.60894e-08,0.719242,0.000943852,-1.19278e-06,-1.21366e-08,0.720185,0.00094143,-1.22919e-06,2.45673e-09,0.721125,0.000938979,-1.22182e-06,2.30966e-09,0.722063,0.000936543,-1.21489e-06,-1.16954e-08,0.722998,0.000934078,-1.24998e-06,4.44718e-08,0.723931,0.000931711,-1.11656e-06,-4.69823e-08,0.724861,0.000929337,-1.25751e-06,2.4248e-08,0.725789,0.000926895,-1.18477e-06,9.5949e-09,0.726715,0.000924554,-1.15598e-06,-3.02286e-09,0.727638,0.000922233,-1.16505e-06,2.49649e-09,0.72856,0.00091991,-1.15756e-06,-6.96321e-09,0.729478,0.000917575,-1.17845e-06,2.53564e-08,0.730395,0.000915294,-1.10238e-06,-3.48578e-08,0.731309,0.000912984,-1.20695e-06,5.44704e-08,0.732221,0.000910734,-1.04354e-06,-6.38144e-08,0.73313,0.000908455,-1.23499e-06,8.15781e-08,0.734038,0.00090623,-9.90253e-07,-8.3684e-08,0.734943,0.000903999,-1.2413e-06,7.43441e-08,0.735846,0.000901739,-1.01827e-06,-3.48787e-08,0.736746,0.000899598,-1.12291e-06,5.56596e-09,0.737645,0.000897369,-1.10621e-06,1.26148e-08,0.738541,0.000895194,-1.06837e-06,3.57935e-09,0.739435,0.000893068,-1.05763e-06,-2.69322e-08,0.740327,0.000890872,-1.13842e-06,4.45448e-08,0.741217,0.000888729,-1.00479e-06,-3.20376e-08,0.742105,0.000886623,-1.1009e-06,2.40011e-08,0.74299,0.000884493,-1.0289e-06,-4.36209e-09,0.743874,0.000882422,-1.04199e-06,-6.55268e-09,0.744755,0.000880319,-1.06164e-06,3.05728e-08,0.745634,0.000878287,-9.69926e-07,-5.61338e-08,0.746512,0.000876179,-1.13833e-06,7.4753e-08,0.747387,0.000874127,-9.14068e-07,-6.40644e-08,0.74826,0.000872106,-1.10626e-06,6.22955e-08,0.749131,0.000870081,-9.19375e-07,-6.59083e-08,0.75,0.000868044,-1.1171e-06,8.21284e-08,0.750867,0.000866056,-8.70714e-07,-8.37915e-08,0.751732,0.000864064,-1.12209e-06,7.42237e-08,0.752595,0.000862042,-8.99418e-07,-3.42894e-08,0.753456,0.00086014,-1.00229e-06,3.32955e-09,0.754315,0.000858146,-9.92297e-07,2.09712e-08,0.755173,0.000856224,-9.29384e-07,-2.76096e-08,0.756028,0.000854282,-1.01221e-06,2.98627e-08,0.756881,0.000852348,-9.22625e-07,-3.22365e-08,0.757733,0.000850406,-1.01933e-06,3.94786e-08,0.758582,0.000848485,-9.00898e-07,-6.46833e-09,0.75943,0.000846664,-9.20303e-07,-1.36052e-08,0.760275,0.000844783,-9.61119e-07,1.28447e-09,0.761119,0.000842864,-9.57266e-07,8.4674e-09,0.761961,0.000840975,-9.31864e-07,2.44506e-08,0.762801,0.000839185,-8.58512e-07,-4.6665e-08,0.763639,0.000837328,-9.98507e-07,4.30001e-08,0.764476,0.00083546,-8.69507e-07,-6.12609e-09,0.76531,0.000833703,-8.87885e-07,-1.84959e-08,0.766143,0.000831871,-9.43372e-07,2.05052e-08,0.766974,0.000830046,-8.81857e-07,-3.92026e-09,0.767803,0.000828271,-8.93618e-07,-4.82426e-09,0.768631,0.000826469,-9.0809e-07,2.32172e-08,0.769456,0.000824722,-8.38439e-07,-2.84401e-08,0.77028,0.00082296,-9.23759e-07,3.09386e-08,0.771102,0.000821205,-8.30943e-07,-3.57099e-08,0.771922,0.000819436,-9.38073e-07,5.22963e-08,0.772741,0.000817717,-7.81184e-07,-5.42658e-08,0.773558,0.000815992,-9.43981e-07,4.55579e-08,0.774373,0.000814241,-8.07308e-07,-8.75656e-09,0.775186,0.0008126,-8.33578e-07,-1.05315e-08,0.775998,0.000810901,-8.65172e-07,-8.72188e-09,0.776808,0.000809145,-8.91338e-07,4.54191e-08,0.777616,0.000807498,-7.5508e-07,-5.37454e-08,0.778423,0.000805827,-9.16317e-07,5.03532e-08,0.779228,0.000804145,-7.65257e-07,-2.84584e-08,0.780031,0.000802529,-8.50632e-07,3.87579e-09,0.780833,0.00080084,-8.39005e-07,1.29552e-08,0.781633,0.0007992,-8.00139e-07,3.90804e-09,0.782432,0.000797612,-7.88415e-07,-2.85874e-08,0.783228,0.000795949,-8.74177e-07,5.0837e-08,0.784023,0.000794353,-7.21666e-07,-5.55513e-08,0.784817,0.000792743,-8.8832e-07,5.21587e-08,0.785609,0.000791123,-7.31844e-07,-3.38744e-08,0.786399,0.000789558,-8.33467e-07,2.37342e-08,0.787188,0.000787962,-7.62264e-07,-1.45775e-09,0.787975,0.000786433,-7.66638e-07,-1.79034e-08,0.788761,0.000784846,-8.20348e-07,1.34665e-08,0.789545,0.000783246,-7.79948e-07,2.3642e-08,0.790327,0.000781757,-7.09022e-07,-4.84297e-08,0.791108,0.000780194,-8.54311e-07,5.08674e-08,0.791888,0.000778638,-7.01709e-07,-3.58303e-08,0.792666,0.000777127,-8.092e-07,3.28493e-08,0.793442,0.000775607,-7.10652e-07,-3.59624e-08,0.794217,0.000774078,-8.1854e-07,5.13959e-08,0.79499,0.000772595,-6.64352e-07,-5.04121e-08,0.795762,0.000771115,-8.15588e-07,3.10431e-08,0.796532,0.000769577,-7.22459e-07,-1.41557e-08,0.797301,0.00076809,-7.64926e-07,2.55795e-08,0.798069,0.000766636,-6.88187e-07,-2.85578e-08,0.798835,0.000765174,-7.73861e-07,2.90472e-08,0.799599,0.000763714,-6.86719e-07,-2.80262e-08,0.800362,0.000762256,-7.70798e-07,2.34531e-08,0.801123,0.000760785,-7.00438e-07,-6.18144e-09,0.801884,0.000759366,-7.18983e-07,1.27263e-09,0.802642,0.000757931,-7.15165e-07,1.09101e-09,0.803399,0.000756504,-7.11892e-07,-5.63675e-09,0.804155,0.000755064,-7.28802e-07,2.14559e-08,0.80491,0.00075367,-6.64434e-07,-2.05821e-08,0.805663,0.00075228,-7.26181e-07,1.26812e-09,0.806414,0.000750831,-7.22377e-07,1.55097e-08,0.807164,0.000749433,-6.75848e-07,-3.70216e-09,0.807913,0.00074807,-6.86954e-07,-7.0105e-10,0.80866,0.000746694,-6.89057e-07,6.5063e-09,0.809406,0.000745336,-6.69538e-07,-2.53242e-08,0.810151,0.000743921,-7.45511e-07,3.51858e-08,0.810894,0.000742535,-6.39953e-07,3.79034e-09,0.811636,0.000741267,-6.28582e-07,-5.03471e-08,0.812377,0.000739858,-7.79624e-07,7.83886e-08,0.813116,0.000738534,-5.44458e-07,-8.43935e-08,0.813854,0.000737192,-7.97638e-07,8.03714e-08,0.81459,0.000735838,-5.56524e-07,-5.82784e-08,0.815325,0.00073455,-7.31359e-07,3.35329e-08,0.816059,0.000733188,-6.3076e-07,-1.62486e-08,0.816792,0.000731878,-6.79506e-07,3.14614e-08,0.817523,0.000730613,-5.85122e-07,-4.99925e-08,0.818253,0.000729293,-7.35099e-07,4.92994e-08,0.818982,0.000727971,-5.87201e-07,-2.79959e-08,0.819709,0.000726712,-6.71189e-07,3.07959e-09,0.820435,0.000725379,-6.6195e-07,1.56777e-08,0.82116,0.000724102,-6.14917e-07,-6.18564e-09,0.821883,0.000722854,-6.33474e-07,9.06488e-09,0.822606,0.000721614,-6.06279e-07,-3.00739e-08,0.823327,0.000720311,-6.96501e-07,5.16262e-08,0.824046,0.000719073,-5.41623e-07,-5.72214e-08,0.824765,0.000717818,-7.13287e-07,5.80503e-08,0.825482,0.000716566,-5.39136e-07,-5.57703e-08,0.826198,0.00071532,-7.06447e-07,4.58215e-08,0.826912,0.000714045,-5.68983e-07,-8.30636e-09,0.827626,0.000712882,-5.93902e-07,-1.25961e-08,0.828338,0.000711656,-6.3169e-07,-9.13985e-10,0.829049,0.00071039,-6.34432e-07,1.62519e-08,0.829759,0.00070917,-5.85676e-07,-4.48904e-09,0.830468,0.000707985,-5.99143e-07,1.70418e-09,0.831175,0.000706792,-5.9403e-07,-2.32768e-09,0.831881,0.000705597,-6.01014e-07,7.60648e-09,0.832586,0.000704418,-5.78194e-07,-2.80982e-08,0.83329,0.000703177,-6.62489e-07,4.51817e-08,0.833993,0.000701988,-5.26944e-07,-3.34192e-08,0.834694,0.000700834,-6.27201e-07,2.88904e-08,0.835394,0.000699666,-5.4053e-07,-2.25378e-08,0.836093,0.000698517,-6.08143e-07,1.65589e-09,0.836791,0.000697306,-6.03176e-07,1.59142e-08,0.837488,0.000696147,-5.55433e-07,-5.70801e-09,0.838184,0.000695019,-5.72557e-07,6.91792e-09,0.838878,0.000693895,-5.51803e-07,-2.19637e-08,0.839571,0.000692725,-6.17694e-07,2.13321e-08,0.840263,0.000691554,-5.53698e-07,-3.75996e-09,0.840954,0.000690435,-5.64978e-07,-6.29219e-09,0.841644,0.000689287,-5.83855e-07,2.89287e-08,0.842333,0.000688206,-4.97068e-07,-4.98181e-08,0.843021,0.000687062,-6.46523e-07,5.11344e-08,0.843707,0.000685922,-4.9312e-07,-3.55102e-08,0.844393,0.00068483,-5.9965e-07,3.13019e-08,0.845077,0.000683724,-5.05745e-07,-3.00925e-08,0.84576,0.000682622,-5.96022e-07,2.94636e-08,0.846442,0.000681519,-5.07631e-07,-2.81572e-08,0.847123,0.000680419,-5.92103e-07,2.35606e-08,0.847803,0.000679306,-5.21421e-07,-6.48045e-09,0.848482,0.000678243,-5.40863e-07,2.36124e-09,0.849159,0.000677169,-5.33779e-07,-2.96461e-09,0.849836,0.000676092,-5.42673e-07,9.49728e-09,0.850512,0.000675035,-5.14181e-07,-3.50245e-08,0.851186,0.000673902,-6.19254e-07,7.09959e-08,0.851859,0.000672876,-4.06267e-07,-7.01453e-08,0.852532,0.000671853,-6.16703e-07,3.07714e-08,0.853203,0.000670712,-5.24388e-07,6.66423e-09,0.853873,0.000669684,-5.04396e-07,2.17629e-09,0.854542,0.000668681,-4.97867e-07,-1.53693e-08,0.855211,0.000667639,-5.43975e-07,-3.03752e-10,0.855878,0.000666551,-5.44886e-07,1.65844e-08,0.856544,0.000665511,-4.95133e-07,-6.42907e-09,0.857209,0.000664501,-5.1442e-07,9.13195e-09,0.857873,0.0006635,-4.87024e-07,-3.00987e-08,0.858536,0.000662435,-5.7732e-07,5.16584e-08,0.859198,0.000661436,-4.22345e-07,-5.73255e-08,0.859859,0.000660419,-5.94322e-07,5.84343e-08,0.860518,0.000659406,-4.19019e-07,-5.72022e-08,0.861177,0.000658396,-5.90626e-07,5.11653e-08,0.861835,0.000657368,-4.3713e-07,-2.82495e-08,0.862492,0.000656409,-5.21878e-07,2.22788e-09,0.863148,0.000655372,-5.15195e-07,1.9338e-08,0.863803,0.0006544,-4.5718e-07,-1.99754e-08,0.864457,0.000653425,-5.17107e-07,9.59024e-10,0.86511,0.000652394,-5.1423e-07,1.61393e-08,0.865762,0.000651414,-4.65812e-07,-5.91149e-09,0.866413,0.000650465,-4.83546e-07,7.50665e-09,0.867063,0.00064952,-4.61026e-07,-2.4115e-08,0.867712,0.000648526,-5.33371e-07,2.93486e-08,0.86836,0.000647547,-4.45325e-07,-3.36748e-08,0.869007,0.000646555,-5.4635e-07,4.57461e-08,0.869653,0.0006456,-4.09112e-07,-3.01002e-08,0.870298,0.000644691,-4.99412e-07,1.50501e-08,0.870942,0.000643738,-4.54262e-07,-3.01002e-08,0.871585,0.000642739,-5.44563e-07,4.57461e-08,0.872228,0.000641787,-4.07324e-07,-3.36748e-08,0.872869,0.000640871,-5.08349e-07,2.93486e-08,0.873509,0.000639943,-4.20303e-07,-2.4115e-08,0.874149,0.00063903,-4.92648e-07,7.50655e-09,0.874787,0.000638067,-4.70128e-07,-5.91126e-09,0.875425,0.000637109,-4.87862e-07,1.61385e-08,0.876062,0.000636182,-4.39447e-07,9.61961e-10,0.876697,0.000635306,-4.36561e-07,-1.99863e-08,0.877332,0.000634373,-4.9652e-07,1.93785e-08,0.877966,0.000633438,-4.38384e-07,2.07697e-09,0.878599,0.000632567,-4.32153e-07,-2.76864e-08,0.879231,0.00063162,-5.15212e-07,4.90641e-08,0.879862,0.000630737,-3.6802e-07,-4.93606e-08,0.880493,0.000629852,-5.16102e-07,2.9169e-08,0.881122,0.000628908,-4.28595e-07,-7.71083e-09,0.881751,0.000628027,-4.51727e-07,1.6744e-09,0.882378,0.000627129,-4.46704e-07,1.01317e-09,0.883005,0.000626239,-4.43665e-07,-5.72703e-09,0.883631,0.000625334,-4.60846e-07,2.1895e-08,0.884255,0.000624478,-3.95161e-07,-2.22481e-08,0.88488,0.000623621,-4.61905e-07,7.4928e-09,0.885503,0.00062272,-4.39427e-07,-7.72306e-09,0.886125,0.000621818,-4.62596e-07,2.33995e-08,0.886746,0.000620963,-3.92398e-07,-2.62704e-08,0.887367,0.000620099,-4.71209e-07,2.20775e-08,0.887987,0.000619223,-4.04976e-07,-2.43496e-09,0.888605,0.000618406,-4.12281e-07,-1.23377e-08,0.889223,0.000617544,-4.49294e-07,-7.81876e-09,0.88984,0.000616622,-4.72751e-07,4.36128e-08,0.890457,0.000615807,-3.41912e-07,-4.7423e-08,0.891072,0.000614981,-4.84181e-07,2.68698e-08,0.891687,0.000614093,-4.03572e-07,-4.51384e-10,0.8923,0.000613285,-4.04926e-07,-2.50643e-08,0.892913,0.0006124,-4.80119e-07,4.11038e-08,0.893525,0.000611563,-3.56808e-07,-2.01414e-08,0.894136,0.000610789,-4.17232e-07,-2.01426e-08,0.894747,0.000609894,-4.7766e-07,4.11073e-08,0.895356,0.000609062,-3.54338e-07,-2.50773e-08,0.895965,0.000608278,-4.2957e-07,-4.02954e-10,0.896573,0.000607418,-4.30779e-07,2.66891e-08,0.89718,0.000606636,-3.50711e-07,-4.67489e-08,0.897786,0.000605795,-4.90958e-07,4.10972e-08,0.898391,0.000604936,-3.67666e-07,1.56948e-09,0.898996,0.000604205,-3.62958e-07,-4.73751e-08,0.8996,0.000603337,-5.05083e-07,6.87214e-08,0.900202,0.000602533,-2.98919e-07,-4.86966e-08,0.900805,0.000601789,-4.45009e-07,6.85589e-09,0.901406,0.00060092,-4.24441e-07,2.1273e-08,0.902007,0.000600135,-3.60622e-07,-3.23434e-08,0.902606,0.000599317,-4.57652e-07,4.84959e-08,0.903205,0.000598547,-3.12164e-07,-4.24309e-08,0.903803,0.000597795,-4.39457e-07,2.01844e-09,0.904401,0.000596922,-4.33402e-07,3.43571e-08,0.904997,0.000596159,-3.30331e-07,-2.02374e-08,0.905593,0.000595437,-3.91043e-07,-1.30123e-08,0.906188,0.000594616,-4.3008e-07,1.26819e-08,0.906782,0.000593794,-3.92034e-07,2.18894e-08,0.907376,0.000593076,-3.26366e-07,-4.06349e-08,0.907968,0.000592301,-4.4827e-07,2.1441e-08,0.90856,0.000591469,-3.83947e-07,1.44754e-08,0.909151,0.000590744,-3.40521e-07,-1.97379e-08,0.909742,0.000590004,-3.99735e-07,4.87161e-09,0.910331,0.000589219,-3.8512e-07,2.51532e-10,0.91092,0.00058845,-3.84366e-07,-5.87776e-09,0.911508,0.000587663,-4.01999e-07,2.32595e-08,0.912096,0.000586929,-3.3222e-07,-2.75554e-08,0.912682,0.000586182,-4.14887e-07,2.73573e-08,0.913268,0.000585434,-3.32815e-07,-2.22692e-08,0.913853,0.000584702,-3.99622e-07,2.11486e-09,0.914437,0.000583909,-3.93278e-07,1.38098e-08,0.915021,0.000583164,-3.51848e-07,2.25042e-09,0.915604,0.000582467,-3.45097e-07,-2.28115e-08,0.916186,0.000581708,-4.13531e-07,2.93911e-08,0.916767,0.000580969,-3.25358e-07,-3.51481e-08,0.917348,0.000580213,-4.30803e-07,5.15967e-08,0.917928,0.000579506,-2.76012e-07,-5.20296e-08,0.918507,0.000578798,-4.32101e-07,3.73124e-08,0.919085,0.000578046,-3.20164e-07,-3.76154e-08,0.919663,0.000577293,-4.3301e-07,5.35447e-08,0.92024,0.000576587,-2.72376e-07,-5.7354e-08,0.920816,0.000575871,-4.44438e-07,5.66621e-08,0.921391,0.000575152,-2.74452e-07,-5.00851e-08,0.921966,0.000574453,-4.24707e-07,2.4469e-08,0.92254,0.000573677,-3.513e-07,1.18138e-08,0.923114,0.000573009,-3.15859e-07,-1.21195e-08,0.923686,0.000572341,-3.52217e-07,-2.29403e-08,0.924258,0.000571568,-4.21038e-07,4.4276e-08,0.924829,0.000570859,-2.8821e-07,-3.49546e-08,0.9254,0.000570178,-3.93074e-07,3.59377e-08,0.92597,0.000569499,-2.85261e-07,-4.91915e-08,0.926539,0.000568781,-4.32835e-07,4.16189e-08,0.927107,0.00056804,-3.07979e-07,1.92523e-09,0.927675,0.00056743,-3.02203e-07,-4.93198e-08,0.928242,0.000566678,-4.50162e-07,7.61447e-08,0.928809,0.000566006,-2.21728e-07,-7.6445e-08,0.929374,0.000565333,-4.51063e-07,5.08216e-08,0.929939,0.000564583,-2.98599e-07,-7.63212e-09,0.930503,0.000563963,-3.21495e-07,-2.02931e-08,0.931067,0.000563259,-3.82374e-07,2.92001e-08,0.93163,0.000562582,-2.94774e-07,-3.69025e-08,0.932192,0.000561882,-4.05482e-07,5.88053e-08,0.932754,0.000561247,-2.29066e-07,-7.91094e-08,0.933315,0.000560552,-4.66394e-07,7.88184e-08,0.933875,0.000559856,-2.29939e-07,-5.73501e-08,0.934434,0.000559224,-4.01989e-07,3.13727e-08,0.934993,0.000558514,-3.07871e-07,-8.53611e-09,0.935551,0.000557873,-3.33479e-07,2.77175e-09,0.936109,0.000557214,-3.25164e-07,-2.55091e-09,0.936666,0.000556556,-3.32817e-07,7.43188e-09,0.937222,0.000555913,-3.10521e-07,-2.71766e-08,0.937778,0.00055521,-3.92051e-07,4.167e-08,0.938333,0.000554551,-2.67041e-07,-2.02941e-08,0.938887,0.000553956,-3.27923e-07,-2.00984e-08,0.93944,0.00055324,-3.88218e-07,4.10828e-08,0.939993,0.000552587,-2.6497e-07,-2.50237e-08,0.940546,0.000551982,-3.40041e-07,-5.92583e-10,0.941097,0.0005513,-3.41819e-07,2.7394e-08,0.941648,0.000550698,-2.59637e-07,-4.93788e-08,0.942199,0.000550031,-4.07773e-07,5.09119e-08,0.942748,0.000549368,-2.55038e-07,-3.50595e-08,0.943297,0.000548753,-3.60216e-07,2.97214e-08,0.943846,0.000548122,-2.71052e-07,-2.42215e-08,0.944394,0.000547507,-3.43716e-07,7.55985e-09,0.944941,0.000546842,-3.21037e-07,-6.01796e-09,0.945487,0.000546182,-3.3909e-07,1.65119e-08,0.946033,0.000545553,-2.89555e-07,-4.2498e-10,0.946578,0.000544973,-2.9083e-07,-1.4812e-08,0.947123,0.000544347,-3.35266e-07,6.83068e-11,0.947667,0.000543676,-3.35061e-07,1.45388e-08,0.94821,0.00054305,-2.91444e-07,1.38123e-09,0.948753,0.000542471,-2.87301e-07,-2.00637e-08,0.949295,0.000541836,-3.47492e-07,1.92688e-08,0.949837,0.000541199,-2.89685e-07,2.59298e-09,0.950378,0.000540628,-2.81906e-07,-2.96407e-08,0.950918,0.000539975,-3.70829e-07,5.63652e-08,0.951458,0.000539402,-2.01733e-07,-7.66107e-08,0.951997,0.000538769,-4.31565e-07,7.12638e-08,0.952535,0.00053812,-2.17774e-07,-2.96305e-08,0.953073,0.000537595,-3.06665e-07,-1.23464e-08,0.95361,0.000536945,-3.43704e-07,1.94114e-08,0.954147,0.000536316,-2.8547e-07,-5.69451e-09,0.954683,0.000535728,-3.02554e-07,3.36666e-09,0.955219,0.000535133,-2.92454e-07,-7.77208e-09,0.955753,0.000534525,-3.1577e-07,2.77216e-08,0.956288,0.000533976,-2.32605e-07,-4.35097e-08,0.956821,0.00053338,-3.63134e-07,2.7108e-08,0.957354,0.000532735,-2.8181e-07,-5.31772e-09,0.957887,0.000532156,-2.97764e-07,-5.83718e-09,0.958419,0.000531543,-3.15275e-07,2.86664e-08,0.95895,0.000530998,-2.29276e-07,-4.9224e-08,0.959481,0.000530392,-3.76948e-07,4.90201e-08,0.960011,0.000529785,-2.29887e-07,-2.76471e-08,0.96054,0.000529243,-3.12829e-07,1.96385e-09,0.961069,0.000528623,-3.06937e-07,1.97917e-08,0.961598,0.000528068,-2.47562e-07,-2.15261e-08,0.962125,0.000527508,-3.1214e-07,6.70795e-09,0.962653,0.000526904,-2.92016e-07,-5.30573e-09,0.963179,0.000526304,-3.07934e-07,1.4515e-08,0.963705,0.000525732,-2.64389e-07,6.85048e-09,0.964231,0.000525224,-2.43837e-07,-4.19169e-08,0.964756,0.00052461,-3.69588e-07,4.1608e-08,0.96528,0.000523996,-2.44764e-07,-5.30598e-09,0.965804,0.000523491,-2.60682e-07,-2.03841e-08,0.966327,0.000522908,-3.21834e-07,2.72378e-08,0.966849,0.000522346,-2.40121e-07,-2.89625e-08,0.967371,0.000521779,-3.27008e-07,2.90075e-08,0.967893,0.000521212,-2.39986e-07,-2.74629e-08,0.968414,0.00052065,-3.22374e-07,2.12396e-08,0.968934,0.000520069,-2.58656e-07,2.10922e-09,0.969454,0.000519558,-2.52328e-07,-2.96765e-08,0.969973,0.000518964,-3.41357e-07,5.6992e-08,0.970492,0.000518452,-1.70382e-07,-7.90821e-08,0.97101,0.000517874,-4.07628e-07,8.05224e-08,0.971528,0.000517301,-1.66061e-07,-6.41937e-08,0.972045,0.000516776,-3.58642e-07,5.70429e-08,0.972561,0.00051623,-1.87513e-07,-4.47686e-08,0.973077,0.00051572,-3.21819e-07,2.82237e-09,0.973593,0.000515085,-3.13352e-07,3.34792e-08,0.974108,0.000514559,-2.12914e-07,-1.75298e-08,0.974622,0.000514081,-2.65503e-07,-2.29648e-08,0.975136,0.000513481,-3.34398e-07,4.97843e-08,0.975649,0.000512961,-1.85045e-07,-5.6963e-08,0.976162,0.00051242,-3.55934e-07,5.88585e-08,0.976674,0.000511885,-1.79359e-07,-5.92616e-08,0.977185,0.000511348,-3.57143e-07,5.89785e-08,0.977696,0.000510811,-1.80208e-07,-5.74433e-08,0.978207,0.000510278,-3.52538e-07,5.15854e-08,0.978717,0.000509728,-1.97781e-07,-2.9689e-08,0.979226,0.000509243,-2.86848e-07,7.56591e-09,0.979735,0.000508692,-2.64151e-07,-5.74649e-10,0.980244,0.000508162,-2.65875e-07,-5.26732e-09,0.980752,0.000507615,-2.81677e-07,2.16439e-08,0.981259,0.000507116,-2.16745e-07,-2.17037e-08,0.981766,0.000506618,-2.81856e-07,5.56636e-09,0.982272,0.000506071,-2.65157e-07,-5.61689e-10,0.982778,0.000505539,-2.66842e-07,-3.31963e-09,0.983283,0.000504995,-2.76801e-07,1.38402e-08,0.983788,0.000504483,-2.3528e-07,7.56339e-09,0.984292,0.000504035,-2.1259e-07,-4.40938e-08,0.984796,0.000503478,-3.44871e-07,4.96026e-08,0.985299,0.000502937,-1.96064e-07,-3.51071e-08,0.985802,0.000502439,-3.01385e-07,3.12212e-08,0.986304,0.00050193,-2.07721e-07,-3.0173e-08,0.986806,0.000501424,-2.9824e-07,2.9866e-08,0.987307,0.000500917,-2.08642e-07,-2.96865e-08,0.987808,0.000500411,-2.97702e-07,2.92753e-08,0.988308,0.000499903,-2.09876e-07,-2.78101e-08,0.988807,0.0004994,-2.93306e-07,2.23604e-08,0.989307,0.000498881,-2.26225e-07,-2.02681e-09,0.989805,0.000498422,-2.32305e-07,-1.42531e-08,0.990303,0.000497915,-2.75065e-07,-5.65232e-10,0.990801,0.000497363,-2.76761e-07,1.65141e-08,0.991298,0.000496859,-2.27218e-07,-5.88639e-09,0.991795,0.000496387,-2.44878e-07,7.0315e-09,0.992291,0.000495918,-2.23783e-07,-2.22396e-08,0.992787,0.000495404,-2.90502e-07,2.23224e-08,0.993282,0.00049489,-2.23535e-07,-7.44543e-09,0.993776,0.000494421,-2.45871e-07,7.45924e-09,0.994271,0.000493951,-2.23493e-07,-2.23915e-08,0.994764,0.000493437,-2.90668e-07,2.25021e-08,0.995257,0.000492923,-2.23161e-07,-8.01218e-09,0.99575,0.000492453,-2.47198e-07,9.54669e-09,0.996242,0.000491987,-2.18558e-07,-3.01746e-08,0.996734,0.000491459,-3.09082e-07,5.1547e-08,0.997225,0.000490996,-1.54441e-07,-5.68039e-08,0.997716,0.000490517,-3.24853e-07,5.64594e-08,0.998206,0.000490036,-1.55474e-07,-4.98245e-08,0.998696,0.000489576,-3.04948e-07,2.36292e-08,0.999186,0.000489037,-2.3406e-07,1.49121e-08,0.999674,0.000488613,-1.89324e-07,-2.3673e-08,1.00016,0.000488164,-2.60343e-07,2.01754e-08,1.00065,0.000487704,-1.99816e-07,-5.70288e-08,1.00114,0.000487133,-3.70903e-07,8.87303e-08,1.00162,0.000486657,-1.04712e-07,-5.94737e-08,1.00211,0.000486269,-2.83133e-07,2.99553e-08,1.0026,0.000485793,-1.93267e-07,-6.03474e-08,1.00308,0.000485225,-3.74309e-07,9.2225e-08,1.00357,0.000484754,-9.76345e-08,-7.0134e-08,1.00405,0.000484348,-3.08036e-07,6.91016e-08,1.00454,0.000483939,-1.00731e-07,-8.70633e-08,1.00502,0.000483476,-3.61921e-07,4.07328e-08,1.0055,0.000482875,-2.39723e-07,4.33413e-08,1.00599,0.000482525,-1.09699e-07,-9.48886e-08,1.00647,0.000482021,-3.94365e-07,9.77947e-08,1.00695,0.000481526,-1.00981e-07,-5.78713e-08,1.00743,0.00048115,-2.74595e-07,1.44814e-08,1.00791,0.000480645,-2.31151e-07,-5.42665e-11,1.00839,0.000480182,-2.31314e-07,-1.42643e-08,1.00887,0.000479677,-2.74106e-07,5.71115e-08,1.00935,0.0004793,-1.02772e-07,-9.49724e-08,1.00983,0.000478809,-3.87689e-07,8.43596e-08,1.01031,0.000478287,-1.3461e-07,-4.04755e-09,1.01079,0.000478006,-1.46753e-07,-6.81694e-08,1.01127,0.000477508,-3.51261e-07,3.83067e-08,1.01174,0.00047692,-2.36341e-07,3.41521e-08,1.01222,0.00047655,-1.33885e-07,-5.57058e-08,1.0127,0.000476115,-3.01002e-07,6.94616e-08,1.01317,0.000475721,-9.26174e-08,-1.02931e-07,1.01365,0.000475227,-4.01412e-07,1.03846e-07,1.01412,0.000474736,-8.98751e-08,-7.40321e-08,1.0146,0.000474334,-3.11971e-07,7.30735e-08,1.01507,0.00047393,-9.27508e-08,-9.90527e-08,1.01554,0.000473447,-3.89909e-07,8.47188e-08,1.01602,0.000472921,-1.35753e-07,-1.40381e-09,1.01649,0.000472645,-1.39964e-07,-7.91035e-08,1.01696,0.000472128,-3.77275e-07,7.93993e-08,1.01744,0.000471612,-1.39077e-07,-7.52607e-11,1.01791,0.000471334,-1.39302e-07,-7.90983e-08,1.01838,0.000470818,-3.76597e-07,7.80499e-08,1.01885,0.000470299,-1.42448e-07,5.31733e-09,1.01932,0.00047003,-1.26496e-07,-9.93193e-08,1.01979,0.000469479,-4.24453e-07,1.53541e-07,1.02026,0.00046909,3.617e-08,-1.57217e-07,1.02073,0.000468691,-4.35482e-07,1.177e-07,1.02119,0.000468173,-8.23808e-08,-7.51659e-08,1.02166,0.000467783,-3.07878e-07,6.37538e-08,1.02213,0.000467358,-1.16617e-07,-6.064e-08,1.0226,0.000466943,-2.98537e-07,5.9597e-08,1.02306,0.000466525,-1.19746e-07,-5.85386e-08,1.02353,0.00046611,-2.95362e-07,5.53482e-08,1.024,0.000465685,-1.29317e-07,-4.36449e-08,1.02446,0.000465296,-2.60252e-07,2.20268e-11,1.02493,0.000464775,-2.60186e-07,4.35568e-08,1.02539,0.000464386,-1.29516e-07,-5.50398e-08,1.02586,0.000463961,-2.94635e-07,5.73932e-08,1.02632,0.000463544,-1.22456e-07,-5.53236e-08,1.02678,0.000463133,-2.88426e-07,4.46921e-08,1.02725,0.000462691,-1.5435e-07,-4.23534e-09,1.02771,0.000462369,-1.67056e-07,-2.77507e-08,1.02817,0.000461952,-2.50308e-07,-3.97101e-09,1.02863,0.000461439,-2.62221e-07,4.36348e-08,1.02909,0.000461046,-1.31317e-07,-5.13589e-08,1.02955,0.000460629,-2.85394e-07,4.25913e-08,1.03001,0.000460186,-1.5762e-07,2.0285e-10,1.03047,0.000459871,-1.57011e-07,-4.34027e-08,1.03093,0.000459427,-2.87219e-07,5.41987e-08,1.03139,0.000459015,-1.24623e-07,-5.4183e-08,1.03185,0.000458604,-2.87172e-07,4.33239e-08,1.03231,0.000458159,-1.572e-07,9.65817e-11,1.03277,0.000457845,-1.56911e-07,-4.37103e-08,1.03323,0.0004574,-2.88041e-07,5.55351e-08,1.03368,0.000456991,-1.21436e-07,-5.9221e-08,1.03414,0.00045657,-2.99099e-07,6.21394e-08,1.0346,0.000456158,-1.1268e-07,-7.01275e-08,1.03505,0.000455723,-3.23063e-07,9.91614e-08,1.03551,0.000455374,-2.55788e-08,-8.80996e-08,1.03596,0.000455058,-2.89878e-07,1.48184e-08,1.03642,0.000454523,-2.45422e-07,2.88258e-08,1.03687,0.000454119,-1.58945e-07,-1.09125e-08,1.03733,0.000453768,-1.91682e-07,1.48241e-08,1.03778,0.000453429,-1.4721e-07,-4.83838e-08,1.03823,0.00045299,-2.92361e-07,5.95019e-08,1.03869,0.000452584,-1.13856e-07,-7.04146e-08,1.03914,0.000452145,-3.25099e-07,1.02947e-07,1.03959,0.000451803,-1.62583e-08,-1.02955e-07,1.04004,0.000451462,-3.25123e-07,7.04544e-08,1.04049,0.000451023,-1.1376e-07,-5.96534e-08,1.04094,0.000450616,-2.9272e-07,4.89499e-08,1.04139,0.000450178,-1.45871e-07,-1.69369e-08,1.04184,0.000449835,-1.96681e-07,1.87977e-08,1.04229,0.000449498,-1.40288e-07,-5.82539e-08,1.04274,0.000449043,-3.1505e-07,9.50087e-08,1.04319,0.000448698,-3.00238e-08,-8.33623e-08,1.04364,0.000448388,-2.80111e-07,2.20363e-11,1.04409,0.000447828,-2.80045e-07,8.32742e-08,1.04454,0.000447517,-3.02221e-08,-9.47002e-08,1.04498,0.000447173,-3.14323e-07,5.7108e-08,1.04543,0.000446716,-1.42999e-07,-1.45225e-08,1.04588,0.000446386,-1.86566e-07,9.82022e-10,1.04632,0.000446016,-1.8362e-07,1.05944e-08,1.04677,0.00044568,-1.51837e-07,-4.33597e-08,1.04721,0.000445247,-2.81916e-07,4.36352e-08,1.04766,0.000444814,-1.51011e-07,-1.19717e-08,1.0481,0.000444476,-1.86926e-07,4.25158e-09,1.04855,0.000444115,-1.74171e-07,-5.03461e-09,1.04899,0.000443751,-1.89275e-07,1.58868e-08,1.04944,0.00044342,-1.41614e-07,-5.85127e-08,1.04988,0.000442961,-3.17152e-07,9.89548e-08,1.05032,0.000442624,-2.0288e-08,-9.88878e-08,1.05076,0.000442287,-3.16951e-07,5.81779e-08,1.05121,0.000441827,-1.42418e-07,-1.46144e-08,1.05165,0.000441499,-1.86261e-07,2.79892e-10,1.05209,0.000441127,-1.85421e-07,1.34949e-08,1.05253,0.000440797,-1.44937e-07,-5.42594e-08,1.05297,0.000440344,-3.07715e-07,8.43335e-08,1.05341,0.000439982,-5.47146e-08,-4.46558e-08,1.05385,0.000439738,-1.88682e-07,-2.49193e-08,1.05429,0.000439286,-2.6344e-07,2.5124e-08,1.05473,0.000438835,-1.88068e-07,4.36328e-08,1.05517,0.000438589,-5.71699e-08,-8.04459e-08,1.05561,0.000438234,-2.98508e-07,3.97324e-08,1.05605,0.000437756,-1.79311e-07,4.07258e-08,1.05648,0.000437519,-5.71332e-08,-8.34263e-08,1.05692,0.000437155,-3.07412e-07,5.45608e-08,1.05736,0.000436704,-1.4373e-07,-1.56078e-08,1.05779,0.000436369,-1.90553e-07,7.87043e-09,1.05823,0.000436012,-1.66942e-07,-1.58739e-08,1.05867,0.00043563,-2.14563e-07,5.56251e-08,1.0591,0.000435368,-4.76881e-08,-8.74172e-08,1.05954,0.000435011,-3.0994e-07,5.56251e-08,1.05997,0.000434558,-1.43064e-07,-1.58739e-08,1.06041,0.000434224,-1.90686e-07,7.87042e-09,1.06084,0.000433866,-1.67075e-07,-1.56078e-08,1.06127,0.000433485,-2.13898e-07,5.45609e-08,1.06171,0.000433221,-5.02157e-08,-8.34263e-08,1.06214,0.00043287,-3.00495e-07,4.07258e-08,1.06257,0.000432391,-1.78317e-07,3.97325e-08,1.063,0.000432154,-5.91198e-08,-8.04464e-08,1.06344,0.000431794,-3.00459e-07,4.36347e-08,1.06387,0.000431324,-1.69555e-07,2.5117e-08,1.0643,0.000431061,-9.42041e-08,-2.48934e-08,1.06473,0.000430798,-1.68884e-07,-4.47527e-08,1.06516,0.000430326,-3.03142e-07,8.46951e-08,1.06559,0.000429973,-4.90573e-08,-5.56089e-08,1.06602,0.000429708,-2.15884e-07,1.85314e-08,1.06645,0.000429332,-1.6029e-07,-1.85166e-08,1.06688,0.000428956,-2.1584e-07,5.5535e-08,1.06731,0.000428691,-4.92347e-08,-8.44142e-08,1.06774,0.000428339,-3.02477e-07,4.37032e-08,1.06816,0.000427865,-1.71368e-07,2.88107e-08,1.06859,0.000427609,-8.49356e-08,-3.97367e-08,1.06902,0.00042732,-2.04146e-07,1.09267e-08,1.06945,0.000426945,-1.71365e-07,-3.97023e-09,1.06987,0.00042659,-1.83276e-07,4.9542e-09,1.0703,0.000426238,-1.68414e-07,-1.58466e-08,1.07073,0.000425854,-2.15953e-07,5.84321e-08,1.07115,0.000425597,-4.0657e-08,-9.86725e-08,1.07158,0.00042522,-3.36674e-07,9.78392e-08,1.072,0.00042484,-4.31568e-08,-5.42658e-08,1.07243,0.000424591,-2.05954e-07,1.45377e-11,1.07285,0.000424179,-2.0591e-07,5.42076e-08,1.07328,0.00042393,-4.32877e-08,-9.76357e-08,1.0737,0.00042355,-3.36195e-07,9.79165e-08,1.07412,0.000423172,-4.24451e-08,-5.56118e-08,1.07455,0.00042292,-2.09281e-07,5.32143e-09,1.07497,0.000422518,-1.93316e-07,3.43261e-08,1.07539,0.000422234,-9.0338e-08,-2.34165e-08,1.07581,0.000421983,-1.60588e-07,-5.98692e-08,1.07623,0.000421482,-3.40195e-07,1.43684e-07,1.07666,0.000421233,9.08574e-08,-1.5724e-07,1.07708,0.000420943,-3.80862e-07,1.27647e-07,1.0775,0.000420564,2.0791e-09,-1.1493e-07,1.07792,0.000420223,-3.4271e-07,9.36534e-08,1.07834,0.000419819,-6.17499e-08,-2.12653e-08,1.07876,0.000419632,-1.25546e-07,-8.59219e-09,1.07918,0.000419355,-1.51322e-07,-6.35752e-08,1.0796,0.000418861,-3.42048e-07,1.43684e-07,1.08002,0.000418608,8.90034e-08,-1.53532e-07,1.08043,0.000418326,-3.71593e-07,1.12817e-07,1.08085,0.000417921,-3.31414e-08,-5.93184e-08,1.08127,0.000417677,-2.11097e-07,5.24697e-09,1.08169,0.00041727,-1.95356e-07,3.83305e-08,1.0821,0.000416995,-8.03642e-08,-3.93597e-08,1.08252,0.000416716,-1.98443e-07,-1.0094e-10,1.08294,0.000416319,-1.98746e-07,3.97635e-08,1.08335,0.00041604,-7.94557e-08,-3.97437e-08,1.08377,0.000415762,-1.98687e-07,1.94215e-12,1.08419,0.000415365,-1.98681e-07,3.97359e-08,1.0846,0.000415087,-7.94732e-08,-3.97362e-08,1.08502,0.000414809,-1.98682e-07,-4.31063e-13,1.08543,0.000414411,-1.98683e-07,3.97379e-08,1.08584,0.000414133,-7.94694e-08,-3.97418e-08,1.08626,0.000413855,-1.98695e-07,2.00563e-11,1.08667,0.000413458,-1.98635e-07,3.96616e-08,1.08709,0.000413179,-7.965e-08,-3.9457e-08,1.0875,0.000412902,-1.98021e-07,-1.04281e-09,1.08791,0.000412502,-2.01149e-07,4.36282e-08,1.08832,0.000412231,-7.02648e-08,-5.42608e-08,1.08874,0.000411928,-2.33047e-07,5.42057e-08,1.08915,0.000411624,-7.04301e-08,-4.33527e-08,1.08956,0.000411353,-2.00488e-07,-4.07378e-12,1.08997,0.000410952,-2.005e-07,4.3369e-08,1.09038,0.000410681,-7.03934e-08,-5.42627e-08,1.09079,0.000410378,-2.33182e-07,5.44726e-08,1.0912,0.000410075,-6.97637e-08,-4.44186e-08,1.09161,0.000409802,-2.03019e-07,3.99235e-09,1.09202,0.000409408,-1.91042e-07,2.84491e-08,1.09243,0.000409111,-1.05695e-07,1.42043e-09,1.09284,0.000408904,-1.01434e-07,-3.41308e-08,1.09325,0.000408599,-2.03826e-07,1.58937e-08,1.09366,0.000408239,-1.56145e-07,-2.94438e-08,1.09406,0.000407838,-2.44476e-07,1.01881e-07,1.09447,0.000407655,6.11676e-08,-1.39663e-07,1.09488,0.000407358,-3.57822e-07,9.91432e-08,1.09529,0.00040694,-6.03921e-08,-1.84912e-08,1.09569,0.000406764,-1.15866e-07,-2.51785e-08,1.0961,0.000406457,-1.91401e-07,-4.03115e-12,1.09651,0.000406074,-1.91413e-07,2.51947e-08,1.09691,0.000405767,-1.15829e-07,1.84346e-08,1.09732,0.00040559,-6.05254e-08,-9.89332e-08,1.09772,0.000405172,-3.57325e-07,1.3888e-07,1.09813,0.000404874,5.93136e-08,-9.8957e-08,1.09853,0.000404696,-2.37557e-07,1.853e-08,1.09894,0.000404277,-1.81968e-07,2.48372e-08,1.09934,0.000403987,-1.07456e-07,1.33047e-09,1.09975,0.000403776,-1.03465e-07,-3.01591e-08,1.10015,0.000403479,-1.93942e-07,9.66054e-11,1.10055,0.000403091,-1.93652e-07,2.97727e-08,1.10096,0.000402793,-1.04334e-07,2.19273e-11,1.10136,0.000402585,-1.04268e-07,-2.98604e-08,1.10176,0.000402287,-1.93849e-07,2.10325e-10,1.10216,0.0004019,-1.93218e-07,2.90191e-08,1.10256,0.0004016,-1.06161e-07,2.92264e-09,1.10297,0.000401397,-9.73931e-08,-4.07096e-08,1.10337,0.00040108,-2.19522e-07,4.07067e-08,1.10377,0.000400763,-9.7402e-08,-2.90783e-09,1.10417,0.000400559,-1.06126e-07,-2.90754e-08,1.10457,0.00040026,-1.93352e-07,9.00021e-14,1.10497,0.000399873,-1.93351e-07,2.9075e-08,1.10537,0.000399574,-1.06126e-07,2.90902e-09,1.10577,0.00039937,-9.73992e-08,-4.07111e-08,1.10617,0.000399053,-2.19533e-07,4.07262e-08,1.10657,0.000398736,-9.73541e-08,-2.98424e-09,1.10697,0.000398533,-1.06307e-07,-2.87892e-08,1.10736,0.000398234,-1.92674e-07,-1.06824e-09,1.10776,0.000397845,-1.95879e-07,3.30622e-08,1.10816,0.000397552,-9.66926e-08,-1.19712e-08,1.10856,0.000397323,-1.32606e-07,1.48225e-08,1.10895,0.000397102,-8.81387e-08,-4.73187e-08,1.10935,0.000396784,-2.30095e-07,5.52429e-08,1.10975,0.00039649,-6.4366e-08,-5.44437e-08,1.11014,0.000396198,-2.27697e-07,4.33226e-08,1.11054,0.000395872,-9.77293e-08,3.62656e-10,1.11094,0.000395678,-9.66414e-08,-4.47732e-08,1.11133,0.00039535,-2.30961e-07,5.95208e-08,1.11173,0.000395067,-5.23985e-08,-7.41008e-08,1.11212,0.00039474,-2.74701e-07,1.17673e-07,1.11252,0.000394543,7.83181e-08,-1.58172e-07,1.11291,0.000394225,-3.96199e-07,1.57389e-07,1.1133,0.000393905,7.59679e-08,-1.13756e-07,1.1137,0.000393716,-2.653e-07,5.92165e-08,1.11409,0.000393363,-8.76507e-08,-3.90074e-09,1.11449,0.000393176,-9.93529e-08,-4.36136e-08,1.11488,0.000392846,-2.30194e-07,5.91457e-08,1.11527,0.000392563,-5.27564e-08,-7.376e-08,1.11566,0.000392237,-2.74037e-07,1.16685e-07,1.11606,0.000392039,7.60189e-08,-1.54562e-07,1.11645,0.000391727,-3.87667e-07,1.43935e-07,1.11684,0.000391384,4.4137e-08,-6.35487e-08,1.11723,0.000391281,-1.46509e-07,-8.94896e-09,1.11762,0.000390961,-1.73356e-07,-1.98647e-08,1.11801,0.000390555,-2.3295e-07,8.8408e-08,1.1184,0.000390354,3.22736e-08,-9.53486e-08,1.11879,0.000390133,-2.53772e-07,5.45677e-08,1.11918,0.000389789,-9.0069e-08,-3.71296e-09,1.11957,0.000389598,-1.01208e-07,-3.97159e-08,1.11996,0.000389276,-2.20355e-07,4.33671e-08,1.12035,0.000388966,-9.02542e-08,-1.45431e-08,1.12074,0.000388741,-1.33883e-07,1.48052e-08,1.12113,0.000388518,-8.94678e-08,-4.46778e-08,1.12152,0.000388205,-2.23501e-07,4.46966e-08,1.12191,0.000387892,-8.94114e-08,-1.48992e-08,1.12229,0.000387669,-1.34109e-07,1.49003e-08,1.12268,0.000387445,-8.94082e-08,-4.47019e-08,1.12307,0.000387132,-2.23514e-07,4.4698e-08,1.12345,0.000386819,-8.942e-08,-1.48806e-08,1.12384,0.000386596,-1.34062e-07,1.48245e-08,1.12423,0.000386372,-8.95885e-08,-4.44172e-08,1.12461,0.00038606,-2.2284e-07,4.36351e-08,1.125,0.000385745,-9.19348e-08,-1.09139e-08,1.12539,0.000385528,-1.24677e-07,2.05584e-11,1.12577,0.000385279,-1.24615e-07,1.08317e-08,1.12616,0.000385062,-9.21198e-08,-4.33473e-08,1.12654,0.000384748,-2.22162e-07,4.33481e-08,1.12693,0.000384434,-9.21174e-08,-1.08356e-08,1.12731,0.000384217,-1.24624e-07,-5.50907e-12,1.12769,0.000383968,-1.24641e-07,1.08577e-08,1.12808,0.000383751,-9.20679e-08,-4.34252e-08,1.12846,0.000383437,-2.22343e-07,4.36337e-08,1.12884,0.000383123,-9.14422e-08,-1.19005e-08,1.12923,0.000382904,-1.27144e-07,3.96813e-09,1.12961,0.000382662,-1.15239e-07,-3.97207e-09,1.12999,0.000382419,-1.27155e-07,1.19201e-08,1.13038,0.000382201,-9.1395e-08,-4.37085e-08,1.13076,0.000381887,-2.2252e-07,4.37046e-08,1.13114,0.000381573,-9.14068e-08,-1.19005e-08,1.13152,0.000381355,-1.27108e-07,3.89734e-09,1.1319,0.000381112,-1.15416e-07,-3.68887e-09,1.13228,0.00038087,-1.26483e-07,1.08582e-08,1.13266,0.00038065,-9.39083e-08,-3.97438e-08,1.13304,0.000380343,-2.1314e-07,2.89076e-08,1.13342,0.000380003,-1.26417e-07,4.33225e-08,1.1338,0.00037988,3.55072e-09,-8.29883e-08,1.13418,0.000379638,-2.45414e-07,5.0212e-08,1.13456,0.000379298,-9.47781e-08,1.34964e-09,1.13494,0.000379113,-9.07292e-08,-5.56105e-08,1.13532,0.000378764,-2.57561e-07,1.01883e-07,1.1357,0.000378555,4.80889e-08,-1.13504e-07,1.13608,0.000378311,-2.92423e-07,1.13713e-07,1.13646,0.000378067,4.87176e-08,-1.02931e-07,1.13683,0.000377856,-2.60076e-07,5.95923e-08,1.13721,0.000377514,-8.12988e-08,-1.62288e-08,1.13759,0.000377303,-1.29985e-07,5.32278e-09,1.13797,0.000377059,-1.14017e-07,-5.06237e-09,1.13834,0.000376816,-1.29204e-07,1.49267e-08,1.13872,0.000376602,-8.44237e-08,-5.46444e-08,1.1391,0.000376269,-2.48357e-07,8.44417e-08,1.13947,0.000376026,4.96815e-09,-4.47039e-08,1.13985,0.000375902,-1.29143e-07,-2.48355e-08,1.14023,0.000375569,-2.0365e-07,2.48368e-08,1.1406,0.000375236,-1.2914e-07,4.46977e-08,1.14098,0.000375112,4.95341e-09,-8.44184e-08,1.14135,0.000374869,-2.48302e-07,5.45572e-08,1.14173,0.000374536,-8.463e-08,-1.46013e-08,1.1421,0.000374323,-1.28434e-07,3.8478e-09,1.14247,0.000374077,-1.1689e-07,-7.89941e-10,1.14285,0.000373841,-1.1926e-07,-6.88042e-10,1.14322,0.0003736,-1.21324e-07,3.54213e-09,1.1436,0.000373368,-1.10698e-07,-1.34805e-08,1.14397,0.000373107,-1.51139e-07,5.03798e-08,1.14434,0.000372767,0.,0.}; + + template + __device__ __forceinline__ void RGB2LuvConvert_f(const T& src, D& dst) + { + const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3); + const float _un = 13 * (4 * 0.950456f * _d); + const float _vn = 13 * (9 * _d); + + float B = blueIdx == 0 ? src.x : src.z; + float G = src.y; + float R = blueIdx == 0 ? src.z : src.x; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE); + } + + float X = R * 0.412453f + G * 0.357580f + B * 0.180423f; + float Y = R * 0.212671f + G * 0.715160f + B * 0.072169f; + float Z = R * 0.019334f + G * 0.119193f + B * 0.950227f; + + float L = splineInterpolate(Y * (LAB_CBRT_TAB_SIZE / 1.5f), c_LabCbrtTab, LAB_CBRT_TAB_SIZE); + L = 116.f * L - 16.f; + + const float d = (4 * 13) / ::fmaxf(X + 15 * Y + 3 * Z, numeric_limits::epsilon()); + float u = L * (X * d - _un); + float v = L * ((9 * 0.25f) * Y * d - _vn); + + dst.x = L; + dst.y = u; + dst.z = v; + } + + template + __device__ __forceinline__ void RGB2LuvConvert_b(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (1.f / 255.f); + srcf.y = src.y * (1.f / 255.f); + srcf.z = src.z * (1.f / 255.f); + + RGB2LuvConvert_f(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 2.55f); + dst.y = saturate_cast(dstf.y * 0.72033898305084743f + 96.525423728813564f); + dst.z = saturate_cast(dstf.z * 0.9732824427480916f + 136.259541984732824f); + } + + template struct RGB2Luv; + template + struct RGB2Luv + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LuvConvert_b(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2Luv() {} + __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {} + }; + template + struct RGB2Luv + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + RGB2LuvConvert_f(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ RGB2Luv() {} + __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::RGB2Luv functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + namespace color_detail + { + template + __device__ __forceinline__ void Luv2RGBConvert_f(const T& src, D& dst) + { + const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3); + const float _un = 4 * 0.950456f * _d; + const float _vn = 9 * _d; + + float L = src.x; + float u = src.y; + float v = src.z; + + float Y = (L + 16.f) * (1.f / 116.f); + Y = Y * Y * Y; + + float d = (1.f / 13.f) / L; + u = u * d + _un; + v = v * d + _vn; + + float iv = 1.f / v; + float X = 2.25f * u * Y * iv; + float Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv; + + float B = 0.055648f * X - 0.204043f * Y + 1.057311f * Z; + float G = -0.969256f * X + 1.875991f * Y + 0.041556f * Z; + float R = 3.240479f * X - 1.537150f * Y - 0.498535f * Z; + + if (srgb) + { + B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE); + } + + dst.x = blueIdx == 0 ? B : R; + dst.y = G; + dst.z = blueIdx == 0 ? R : B; + setAlpha(dst, ColorChannel::max()); + } + + template + __device__ __forceinline__ void Luv2RGBConvert_b(const T& src, D& dst) + { + float3 srcf, dstf; + + srcf.x = src.x * (100.f / 255.f); + srcf.y = src.y * 1.388235294117647f - 134.f; + srcf.z = src.z * 1.027450980392157f - 140.f; + + Luv2RGBConvert_f(srcf, dstf); + + dst.x = saturate_cast(dstf.x * 255.f); + dst.y = saturate_cast(dstf.y * 255.f); + dst.z = saturate_cast(dstf.z * 255.f); + setAlpha(dst, ColorChannel::max()); + } + + template struct Luv2RGB; + template + struct Luv2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Luv2RGBConvert_b(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ Luv2RGB() {} + __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {} + }; + template + struct Luv2RGB + : unary_function::vec_type, typename TypeVec::vec_type> + { + __device__ __forceinline__ typename TypeVec::vec_type operator ()(const typename TypeVec::vec_type& src) const + { + typename TypeVec::vec_type dst; + + Luv2RGBConvert_f(src, dst); + + return dst; + } + __host__ __device__ __forceinline__ Luv2RGB() {} + __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {} + }; + } + +#define OPENCV_CUDA_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \ + template struct name ## _traits \ + { \ + typedef ::cv::cuda::device::color_detail::Luv2RGB functor_type; \ + static __host__ __device__ __forceinline__ functor_type create_functor() \ + { \ + return functor_type(); \ + } \ + }; + + #undef CV_DESCALE + +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_COLOR_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce.hpp new file mode 100644 index 00000000..8af20b0d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce.hpp @@ -0,0 +1,365 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_REDUCE_DETAIL_HPP +#define OPENCV_CUDA_REDUCE_DETAIL_HPP + +#include +#include "../warp.hpp" +#include "../warp_shuffle.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + namespace reduce_detail + { + template struct GetType; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid) + { + thrust::get(smem)[tid] = thrust::get(val); + + For::loadToSmem(smem, val, tid); + } + template + static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid) + { + thrust::get(val) = thrust::get(smem)[tid]; + + For::loadFromSmem(smem, val, tid); + } + + template + static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op) + { + typename GetType::type>::type reg = thrust::get(smem)[tid + delta]; + thrust::get(smem)[tid] = thrust::get(val) = thrust::get(op)(thrust::get(val), reg); + + For::merge(smem, val, tid, delta, op); + } + template + static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op) + { + typename GetType::type>::type reg = shfl_down(thrust::get(val), delta, width); + thrust::get(val) = thrust::get(op)(thrust::get(val), reg); + + For::mergeShfl(val, delta, width, op); + } + }; + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int) + { + } + template + static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int) + { + } + + template + static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&) + { + } + template + static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&) + { + } + }; + + template + __device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid) + { + smem[tid] = val; + } + template + __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid) + { + val = smem[tid]; + } + template + __device__ __forceinline__ void loadToSmem(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadToSmem(smem, val, tid); + } + template + __device__ __forceinline__ void loadFromSmem(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadFromSmem(smem, val, tid); + } + + template + __device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op) + { + T reg = smem[tid + delta]; + smem[tid] = val = op(val, reg); + } + template + __device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op) + { + T reg = shfl_down(val, delta, width); + val = op(val, reg); + } + template + __device__ __forceinline__ void merge(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid, + unsigned int delta, + const thrust::tuple& op) + { + For<0, thrust::tuple_size >::value>::merge(smem, val, tid, delta, op); + } + template + __device__ __forceinline__ void mergeShfl(const thrust::tuple& val, + unsigned int delta, + unsigned int width, + const thrust::tuple& op) + { + For<0, thrust::tuple_size >::value>::mergeShfl(val, delta, width, op); + } + + template struct Generic + { + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + loadToSmem(smem, val, tid); + if (N >= 32) + __syncthreads(); + + if (N >= 2048) + { + if (tid < 1024) + merge(smem, val, tid, 1024, op); + + __syncthreads(); + } + if (N >= 1024) + { + if (tid < 512) + merge(smem, val, tid, 512, op); + + __syncthreads(); + } + if (N >= 512) + { + if (tid < 256) + merge(smem, val, tid, 256, op); + + __syncthreads(); + } + if (N >= 256) + { + if (tid < 128) + merge(smem, val, tid, 128, op); + + __syncthreads(); + } + if (N >= 128) + { + if (tid < 64) + merge(smem, val, tid, 64, op); + + __syncthreads(); + } + if (N >= 64) + { + if (tid < 32) + merge(smem, val, tid, 32, op); + } + + if (tid < 16) + { + merge(smem, val, tid, 16, op); + merge(smem, val, tid, 8, op); + merge(smem, val, tid, 4, op); + merge(smem, val, tid, 2, op); + merge(smem, val, tid, 1, op); + } + } + }; + + template + struct Unroll + { + static __device__ void loopShfl(Reference val, Op op, unsigned int N) + { + mergeShfl(val, I, N, op); + Unroll::loopShfl(val, op, N); + } + static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op) + { + merge(smem, val, tid, I, op); + Unroll::loop(smem, val, tid, op); + } + }; + template + struct Unroll<0, Pointer, Reference, Op> + { + static __device__ void loopShfl(Reference, Op, unsigned int) + { + } + static __device__ void loop(Pointer, Reference, unsigned int, Op) + { + } + }; + + template struct WarpOptimized + { + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + CV_UNUSED(smem); + CV_UNUSED(tid); + + Unroll::loopShfl(val, op, N); + #else + loadToSmem(smem, val, tid); + + if (tid < N / 2) + Unroll::loop(smem, val, tid, op); + #endif + } + }; + + template struct GenericOptimized32 + { + enum { M = N / 32 }; + + template + static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op) + { + const unsigned int laneId = Warp::laneId(); + + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize); + + if (laneId == 0) + loadToSmem(smem, val, tid / 32); + #else + loadToSmem(smem, val, tid); + + if (laneId < 16) + Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op); + + __syncthreads(); + + if (laneId == 0) + loadToSmem(smem, val, tid / 32); + #endif + + __syncthreads(); + + loadFromSmem(smem, val, tid); + + if (tid < 32) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + Unroll::loopShfl(val, op, M); + #else + Unroll::loop(smem, val, tid, op); + #endif + } + } + }; + + template struct StaticIf; + template struct StaticIf + { + typedef T1 type; + }; + template struct StaticIf + { + typedef T2 type; + }; + + template struct IsPowerOf2 + { + enum { value = ((N != 0) && !(N & (N - 1))) }; + }; + + template struct Dispatcher + { + typedef typename StaticIf< + (N <= 32) && IsPowerOf2::value, + WarpOptimized, + typename StaticIf< + (N <= 1024) && IsPowerOf2::value, + GenericOptimized32, + Generic + >::type + >::type reductor; + }; + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_REDUCE_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce_key_val.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce_key_val.hpp new file mode 100644 index 00000000..df37c173 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/reduce_key_val.hpp @@ -0,0 +1,502 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP +#define OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP + +#include +#include "../warp.hpp" +#include "../warp_shuffle.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + namespace reduce_key_val_detail + { + template struct GetType; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + template struct GetType + { + typedef T type; + }; + + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid) + { + thrust::get(smem)[tid] = thrust::get(data); + + For::loadToSmem(smem, data, tid); + } + template + static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid) + { + thrust::get(data) = thrust::get(smem)[tid]; + + For::loadFromSmem(smem, data, tid); + } + + template + static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width) + { + thrust::get(val) = shfl_down(thrust::get(val), delta, width); + + For::copyShfl(val, delta, width); + } + template + static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta) + { + thrust::get(svals)[tid] = thrust::get(val) = thrust::get(svals)[tid + delta]; + + For::copy(svals, val, tid, delta); + } + + template + static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width) + { + typename GetType::type>::type reg = shfl_down(thrust::get(key), delta, width); + + if (thrust::get(cmp)(reg, thrust::get(key))) + { + thrust::get(key) = reg; + thrust::get(val) = shfl_down(thrust::get(val), delta, width); + } + + For::mergeShfl(key, val, cmp, delta, width); + } + template + static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key, + const ValPointerTuple& svals, const ValReferenceTuple& val, + const CmpTuple& cmp, + unsigned int tid, unsigned int delta) + { + typename GetType::type>::type reg = thrust::get(skeys)[tid + delta]; + + if (thrust::get(cmp)(reg, thrust::get(key))) + { + thrust::get(skeys)[tid] = thrust::get(key) = reg; + thrust::get(svals)[tid] = thrust::get(val) = thrust::get(svals)[tid + delta]; + } + + For::merge(skeys, key, svals, val, cmp, tid, delta); + } + }; + template + struct For + { + template + static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int) + { + } + template + static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int) + { + } + + template + static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int) + { + } + template + static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int) + { + } + + template + static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int) + { + } + template + static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&, + const ValPointerTuple&, const ValReferenceTuple&, + const CmpTuple&, + unsigned int, unsigned int) + { + } + }; + + ////////////////////////////////////////////////////// + // loadToSmem + + template + __device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid) + { + smem[tid] = data; + } + template + __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid) + { + data = smem[tid]; + } + template + __device__ __forceinline__ void loadToSmem(const thrust::tuple& smem, + const thrust::tuple& data, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadToSmem(smem, data, tid); + } + template + __device__ __forceinline__ void loadFromSmem(const thrust::tuple& smem, + const thrust::tuple& data, + unsigned int tid) + { + For<0, thrust::tuple_size >::value>::loadFromSmem(smem, data, tid); + } + + ////////////////////////////////////////////////////// + // copyVals + + template + __device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width) + { + val = shfl_down(val, delta, width); + } + template + __device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta) + { + svals[tid] = val = svals[tid + delta]; + } + template + __device__ __forceinline__ void copyValsShfl(const thrust::tuple& val, + unsigned int delta, + int width) + { + For<0, thrust::tuple_size >::value>::copyShfl(val, delta, width); + } + template + __device__ __forceinline__ void copyVals(const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, unsigned int delta) + { + For<0, thrust::tuple_size >::value>::copy(svals, val, tid, delta); + } + + ////////////////////////////////////////////////////// + // merge + + template + __device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width) + { + K reg = shfl_down(key, delta, width); + + if (cmp(reg, key)) + { + key = reg; + copyValsShfl(val, delta, width); + } + } + template + __device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta) + { + K reg = skeys[tid + delta]; + + if (cmp(reg, key)) + { + skeys[tid] = key = reg; + copyVals(svals, val, tid, delta); + } + } + template + __device__ __forceinline__ void mergeShfl(K& key, + const thrust::tuple& val, + const Cmp& cmp, + unsigned int delta, int width) + { + K reg = shfl_down(key, delta, width); + + if (cmp(reg, key)) + { + key = reg; + copyValsShfl(val, delta, width); + } + } + template + __device__ __forceinline__ void merge(volatile K* skeys, K& key, + const thrust::tuple& svals, + const thrust::tuple& val, + const Cmp& cmp, unsigned int tid, unsigned int delta) + { + K reg = skeys[tid + delta]; + + if (cmp(reg, key)) + { + skeys[tid] = key = reg; + copyVals(svals, val, tid, delta); + } + } + template + __device__ __forceinline__ void mergeShfl(const thrust::tuple& key, + const thrust::tuple& val, + const thrust::tuple& cmp, + unsigned int delta, int width) + { + For<0, thrust::tuple_size >::value>::mergeShfl(key, val, cmp, delta, width); + } + template + __device__ __forceinline__ void merge(const thrust::tuple& skeys, + const thrust::tuple& key, + const thrust::tuple& svals, + const thrust::tuple& val, + const thrust::tuple& cmp, + unsigned int tid, unsigned int delta) + { + For<0, thrust::tuple_size >::value>::merge(skeys, key, svals, val, cmp, tid, delta); + } + + ////////////////////////////////////////////////////// + // Generic + + template struct Generic + { + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + loadToSmem(skeys, key, tid); + loadValsToSmem(svals, val, tid); + if (N >= 32) + __syncthreads(); + + if (N >= 2048) + { + if (tid < 1024) + merge(skeys, key, svals, val, cmp, tid, 1024); + + __syncthreads(); + } + if (N >= 1024) + { + if (tid < 512) + merge(skeys, key, svals, val, cmp, tid, 512); + + __syncthreads(); + } + if (N >= 512) + { + if (tid < 256) + merge(skeys, key, svals, val, cmp, tid, 256); + + __syncthreads(); + } + if (N >= 256) + { + if (tid < 128) + merge(skeys, key, svals, val, cmp, tid, 128); + + __syncthreads(); + } + if (N >= 128) + { + if (tid < 64) + merge(skeys, key, svals, val, cmp, tid, 64); + + __syncthreads(); + } + if (N >= 64) + { + if (tid < 32) + merge(skeys, key, svals, val, cmp, tid, 32); + } + + if (tid < 16) + { + merge(skeys, key, svals, val, cmp, tid, 16); + merge(skeys, key, svals, val, cmp, tid, 8); + merge(skeys, key, svals, val, cmp, tid, 4); + merge(skeys, key, svals, val, cmp, tid, 2); + merge(skeys, key, svals, val, cmp, tid, 1); + } + } + }; + + template + struct Unroll + { + static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N) + { + mergeShfl(key, val, cmp, I, N); + Unroll::loopShfl(key, val, cmp, N); + } + static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + merge(skeys, key, svals, val, cmp, tid, I); + Unroll::loop(skeys, key, svals, val, tid, cmp); + } + }; + template + struct Unroll<0, KP, KR, VP, VR, Cmp> + { + static __device__ void loopShfl(KR, VR, Cmp, unsigned int) + { + } + static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp) + { + } + }; + + template struct WarpOptimized + { + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + #if 0 // __CUDA_ARCH__ >= 300 + CV_UNUSED(skeys); + CV_UNUSED(svals); + CV_UNUSED(tid); + + Unroll::loopShfl(key, val, cmp, N); + #else + loadToSmem(skeys, key, tid); + loadToSmem(svals, val, tid); + + if (tid < N / 2) + Unroll::loop(skeys, key, svals, val, tid, cmp); + #endif + } + }; + + template struct GenericOptimized32 + { + enum { M = N / 32 }; + + template + static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp) + { + const unsigned int laneId = Warp::laneId(); + + #if 0 // __CUDA_ARCH__ >= 300 + Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize); + + if (laneId == 0) + { + loadToSmem(skeys, key, tid / 32); + loadToSmem(svals, val, tid / 32); + } + #else + loadToSmem(skeys, key, tid); + loadToSmem(svals, val, tid); + + if (laneId < 16) + Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp); + + __syncthreads(); + + if (laneId == 0) + { + loadToSmem(skeys, key, tid / 32); + loadToSmem(svals, val, tid / 32); + } + #endif + + __syncthreads(); + + loadFromSmem(skeys, key, tid); + + if (tid < 32) + { + #if 0 // __CUDA_ARCH__ >= 300 + loadFromSmem(svals, val, tid); + + Unroll::loopShfl(key, val, cmp, M); + #else + Unroll::loop(skeys, key, svals, val, tid, cmp); + #endif + } + } + }; + + template struct StaticIf; + template struct StaticIf + { + typedef T1 type; + }; + template struct StaticIf + { + typedef T2 type; + }; + + template struct IsPowerOf2 + { + enum { value = ((N != 0) && !(N & (N - 1))) }; + }; + + template struct Dispatcher + { + typedef typename StaticIf< + (N <= 32) && IsPowerOf2::value, + WarpOptimized, + typename StaticIf< + (N <= 1024) && IsPowerOf2::value, + GenericOptimized32, + Generic + >::type + >::type reductor; + }; + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_PRED_VAL_REDUCE_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/transform_detail.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/transform_detail.hpp new file mode 100644 index 00000000..19198488 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/transform_detail.hpp @@ -0,0 +1,392 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_TRANSFORM_DETAIL_HPP +#define OPENCV_CUDA_TRANSFORM_DETAIL_HPP + +#include "../common.hpp" +#include "../vec_traits.hpp" +#include "../functional.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + namespace transform_detail + { + //! Read Write Traits + + template struct UnaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type; + typedef typename TypeVec::vec_type write_type; + }; + + template struct BinaryReadWriteTraits + { + typedef typename TypeVec::vec_type read_type1; + typedef typename TypeVec::vec_type read_type2; + typedef typename TypeVec::vec_type write_type; + }; + + //! Transform kernels + + template struct OpUnroller; + template <> struct OpUnroller<1> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + } + }; + template <> struct OpUnroller<2> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + } + }; + template <> struct OpUnroller<3> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + } + }; + template <> struct OpUnroller<4> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src.w); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.x = op(src1.x, src2.x); + if (mask(y, x_shifted + 1)) + dst.y = op(src1.y, src2.y); + if (mask(y, x_shifted + 2)) + dst.z = op(src1.z, src2.z); + if (mask(y, x_shifted + 3)) + dst.w = op(src1.w, src2.w); + } + }; + template <> struct OpUnroller<8> + { + template + static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src.a7); + } + + template + static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y) + { + if (mask(y, x_shifted)) + dst.a0 = op(src1.a0, src2.a0); + if (mask(y, x_shifted + 1)) + dst.a1 = op(src1.a1, src2.a1); + if (mask(y, x_shifted + 2)) + dst.a2 = op(src1.a2, src2.a2); + if (mask(y, x_shifted + 3)) + dst.a3 = op(src1.a3, src2.a3); + if (mask(y, x_shifted + 4)) + dst.a4 = op(src1.a4, src2.a4); + if (mask(y, x_shifted + 5)) + dst.a5 = op(src1.a5, src2.a5); + if (mask(y, x_shifted + 6)) + dst.a6 = op(src1.a6, src2.a6); + if (mask(y, x_shifted + 7)) + dst.a7 = op(src1.a7, src2.a7); + } + }; + + template + static __global__ void transformSmart(const PtrStepSz src_, PtrStep dst_, const Mask mask, const UnOp op) + { + typedef TransformFunctorTraits ft; + typedef typename UnaryReadWriteTraits::read_type read_type; + typedef typename UnaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src_.rows) + { + const T* src = src_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src_.cols) + { + const read_type src_n_el = ((const read_type*)src)[x]; + OpUnroller::unroll(src_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y); + } + else + { + for (int real_x = x_shifted; real_x < src_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src[real_x]); + } + } + } + } + + template + __global__ static void transformSimple(const PtrStepSz src, PtrStep dst, const Mask mask, const UnOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src.cols && y < src.rows && mask(y, x)) + { + dst.ptr(y)[x] = op(src.ptr(y)[x]); + } + } + + template + static __global__ void transformSmart(const PtrStepSz src1_, const PtrStep src2_, PtrStep dst_, + const Mask mask, const BinOp op) + { + typedef TransformFunctorTraits ft; + typedef typename BinaryReadWriteTraits::read_type1 read_type1; + typedef typename BinaryReadWriteTraits::read_type2 read_type2; + typedef typename BinaryReadWriteTraits::write_type write_type; + + const int x = threadIdx.x + blockIdx.x * blockDim.x; + const int y = threadIdx.y + blockIdx.y * blockDim.y; + const int x_shifted = x * ft::smart_shift; + + if (y < src1_.rows) + { + const T1* src1 = src1_.ptr(y); + const T2* src2 = src2_.ptr(y); + D* dst = dst_.ptr(y); + + if (x_shifted + ft::smart_shift - 1 < src1_.cols) + { + const read_type1 src1_n_el = ((const read_type1*)src1)[x]; + const read_type2 src2_n_el = ((const read_type2*)src2)[x]; + + OpUnroller::unroll(src1_n_el, src2_n_el, ((write_type*)dst)[x], mask, op, x_shifted, y); + } + else + { + for (int real_x = x_shifted; real_x < src1_.cols; ++real_x) + { + if (mask(y, real_x)) + dst[real_x] = op(src1[real_x], src2[real_x]); + } + } + } + } + + template + static __global__ void transformSimple(const PtrStepSz src1, const PtrStep src2, PtrStep dst, + const Mask mask, const BinOp op) + { + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + + if (x < src1.cols && y < src1.rows && mask(y, x)) + { + const T1 src1_data = src1.ptr(y)[x]; + const T2 src2_data = src2.ptr(y)[x]; + dst.ptr(y)[x] = op(src1_data, src2_data); + } + } + + template struct TransformDispatcher; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1); + + transformSimple<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1); + + transformSimple<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + template<> struct TransformDispatcher + { + template + static void call(PtrStepSz src, PtrStepSz dst, UnOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + CV_StaticAssert(ft::smart_shift != 1, ""); + + if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1); + + transformSmart<<>>(src, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + + template + static void call(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, Mask mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + + CV_StaticAssert(ft::smart_shift != 1, ""); + + if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) || + !isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) || + !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D))) + { + TransformDispatcher::call(src1, src2, dst, op, mask, stream); + return; + } + + const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1); + const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1); + + transformSmart<<>>(src1, src2, dst, mask, op); + cudaSafeCall( cudaGetLastError() ); + + if (stream == 0) + cudaSafeCall( cudaDeviceSynchronize() ); + } + }; + } // namespace transform_detail +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_TRANSFORM_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/type_traits_detail.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/type_traits_detail.hpp new file mode 100644 index 00000000..a78bd2c0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/type_traits_detail.hpp @@ -0,0 +1,191 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP +#define OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP + +#include "../common.hpp" +#include "../vec_traits.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + namespace type_traits_detail + { + template struct Select { typedef T1 type; }; + template struct Select { typedef T2 type; }; + + template struct IsSignedIntergral { enum {value = 0}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + template <> struct IsSignedIntergral { enum {value = 1}; }; + + template struct IsUnsignedIntegral { enum {value = 0}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + template <> struct IsUnsignedIntegral { enum {value = 1}; }; + + template struct IsIntegral { enum {value = IsSignedIntergral::value || IsUnsignedIntegral::value}; }; + template <> struct IsIntegral { enum {value = 1}; }; + template <> struct IsIntegral { enum {value = 1}; }; + + template struct IsFloat { enum {value = 0}; }; + template <> struct IsFloat { enum {value = 1}; }; + template <> struct IsFloat { enum {value = 1}; }; + + template struct IsVec { enum {value = 0}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + template <> struct IsVec { enum {value = 1}; }; + + template struct AddParameterType { typedef const U& type; }; + template struct AddParameterType { typedef U& type; }; + template <> struct AddParameterType { typedef void type; }; + + template struct ReferenceTraits + { + enum { value = false }; + typedef U type; + }; + template struct ReferenceTraits + { + enum { value = true }; + typedef U type; + }; + + template struct PointerTraits + { + enum { value = false }; + typedef void type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + template struct PointerTraits + { + enum { value = true }; + typedef U type; + }; + + template struct UnConst + { + typedef U type; + enum { value = 0 }; + }; + template struct UnConst + { + typedef U type; + enum { value = 1 }; + }; + template struct UnConst + { + typedef U& type; + enum { value = 1 }; + }; + + template struct UnVolatile + { + typedef U type; + enum { value = 0 }; + }; + template struct UnVolatile + { + typedef U type; + enum { value = 1 }; + }; + template struct UnVolatile + { + typedef U& type; + enum { value = 1 }; + }; + } // namespace type_traits_detail +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_TYPE_TRAITS_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/vec_distance_detail.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/vec_distance_detail.hpp new file mode 100644 index 00000000..8283a995 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/detail/vec_distance_detail.hpp @@ -0,0 +1,121 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP +#define OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP + +#include "../datamov_utils.hpp" + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + namespace vec_distance_detail + { + template struct UnrollVecDiffCached + { + template + static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind) + { + if (ind < len) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, ind, val2); + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM); + } + } + + template + static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist) + { + T1 val1 = *vecCached++; + + T2 val2; + ForceGlob::Load(vecGlob, 0, val2); + vecGlob += THREAD_DIM; + + dist.reduceIter(val1, val2); + + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob, dist); + } + }; + template struct UnrollVecDiffCached + { + template + static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int) + { + } + + template + static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&) + { + } + }; + + template struct VecDiffCachedCalculator; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcCheck(vecCached, vecGlob, len, dist, tid); + } + }; + template struct VecDiffCachedCalculator + { + template + static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid) + { + UnrollVecDiffCached::calcWithoutCheck(vecCached, vecGlob + tid, dist); + } + }; + } // namespace vec_distance_detail +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_VEC_DISTANCE_DETAIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/dynamic_smem.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/dynamic_smem.hpp new file mode 100644 index 00000000..42570c68 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/dynamic_smem.hpp @@ -0,0 +1,88 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_DYNAMIC_SMEM_HPP +#define OPENCV_CUDA_DYNAMIC_SMEM_HPP + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template struct DynamicSharedMem + { + __device__ __forceinline__ operator T*() + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + + __device__ __forceinline__ operator const T*() const + { + extern __shared__ int __smem[]; + return (T*)__smem; + } + }; + + // specialize for double to avoid unaligned memory access compile errors + template<> struct DynamicSharedMem + { + __device__ __forceinline__ operator double*() + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + + __device__ __forceinline__ operator const double*() const + { + extern __shared__ double __smem_d[]; + return (double*)__smem_d; + } + }; +}}} + +//! @endcond + +#endif // OPENCV_CUDA_DYNAMIC_SMEM_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/emulation.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/emulation.hpp new file mode 100644 index 00000000..17dc1171 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/emulation.hpp @@ -0,0 +1,269 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_EMULATION_HPP_ +#define OPENCV_CUDA_EMULATION_HPP_ + +#include "common.hpp" +#include "warp_reduce.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + struct Emulation + { + + static __device__ __forceinline__ int syncthreadsOr(int pred) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) + // just campilation stab + return 0; +#else + return __syncthreads_or(pred); +#endif + } + + template + static __forceinline__ __device__ int Ballot(int predicate) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200) + return __ballot(predicate); +#else + __shared__ volatile int cta_buffer[CTA_SIZE]; + + int tid = threadIdx.x; + cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0; + return warp_reduce(cta_buffer); +#endif + } + + struct smem + { + enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U }; + + template + static __device__ __forceinline__ T atomicInc(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + 1); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - 1; +#else + return ::atomicInc(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicAdd(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count; + unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U); + do + { + count = *address & TAG_MASK; + count = tag | (count + val); + *address = count; + } while (*address != count); + + return (count & TAG_MASK) - val; +#else + return ::atomicAdd(address, val); +#endif + } + + template + static __device__ __forceinline__ T atomicMin(T* address, T val) + { +#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120) + T count = ::min(*address, val); + do + { + *address = count; + } while (*address > count); + + return count; +#else + return ::atomicMin(address, val); +#endif + } + }; // struct cmem + + struct glob + { + static __device__ __forceinline__ int atomicAdd(int* address, int val) + { + return ::atomicAdd(address, val); + } + static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val) + { + return ::atomicAdd(address, val); + } + static __device__ __forceinline__ float atomicAdd(float* address, float val) + { + #if __CUDA_ARCH__ >= 200 + return ::atomicAdd(address, val); + #else + int* address_as_i = (int*) address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_i, assumed, + __float_as_int(val + __int_as_float(assumed))); + } while (assumed != old); + return __int_as_float(old); + #endif + } + static __device__ __forceinline__ double atomicAdd(double* address, double val) + { + #if __CUDA_ARCH__ >= 130 + unsigned long long int* address_as_ull = (unsigned long long int*) address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); + #else + CV_UNUSED(address); + CV_UNUSED(val); + return 0.0; + #endif + } + + static __device__ __forceinline__ int atomicMin(int* address, int val) + { + return ::atomicMin(address, val); + } + static __device__ __forceinline__ float atomicMin(float* address, float val) + { + #if __CUDA_ARCH__ >= 120 + int* address_as_i = (int*) address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_i, assumed, + __float_as_int(::fminf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); + #else + CV_UNUSED(address); + CV_UNUSED(val); + return 0.0f; + #endif + } + static __device__ __forceinline__ double atomicMin(double* address, double val) + { + #if __CUDA_ARCH__ >= 130 + unsigned long long int* address_as_ull = (unsigned long long int*) address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_ull, assumed, + __double_as_longlong(::fmin(val, __longlong_as_double(assumed)))); + } while (assumed != old); + return __longlong_as_double(old); + #else + CV_UNUSED(address); + CV_UNUSED(val); + return 0.0; + #endif + } + + static __device__ __forceinline__ int atomicMax(int* address, int val) + { + return ::atomicMax(address, val); + } + static __device__ __forceinline__ float atomicMax(float* address, float val) + { + #if __CUDA_ARCH__ >= 120 + int* address_as_i = (int*) address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_i, assumed, + __float_as_int(::fmaxf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); + #else + CV_UNUSED(address); + CV_UNUSED(val); + return 0.0f; + #endif + } + static __device__ __forceinline__ double atomicMax(double* address, double val) + { + #if __CUDA_ARCH__ >= 130 + unsigned long long int* address_as_ull = (unsigned long long int*) address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = ::atomicCAS(address_as_ull, assumed, + __double_as_longlong(::fmax(val, __longlong_as_double(assumed)))); + } while (assumed != old); + return __longlong_as_double(old); + #else + CV_UNUSED(address); + CV_UNUSED(val); + return 0.0; + #endif + } + }; + }; //struct Emulation +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif /* OPENCV_CUDA_EMULATION_HPP_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/filters.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/filters.hpp new file mode 100644 index 00000000..bb942122 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/filters.hpp @@ -0,0 +1,286 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_FILTERS_HPP +#define OPENCV_CUDA_FILTERS_HPP + +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "vec_math.hpp" +#include "type_traits.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template struct PointFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + CV_UNUSED(fx); + CV_UNUSED(fy); + } + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + return src(__float2int_rz(y), __float2int_rz(x)); + } + + Ptr2D src; + }; + + template struct LinearFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + CV_UNUSED(fx); + CV_UNUSED(fy); + } + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + typedef typename TypeVec::cn>::vec_type work_type; + + work_type out = VecTraits::all(0); + + const int x1 = __float2int_rd(x); + const int y1 = __float2int_rd(y); + const int x2 = x1 + 1; + const int y2 = y1 + 1; + + elem_type src_reg = src(y1, x1); + out = out + src_reg * ((x2 - x) * (y2 - y)); + + src_reg = src(y1, x2); + out = out + src_reg * ((x - x1) * (y2 - y)); + + src_reg = src(y2, x1); + out = out + src_reg * ((x2 - x) * (y - y1)); + + src_reg = src(y2, x2); + out = out + src_reg * ((x - x1) * (y - y1)); + + return saturate_cast(out); + } + + Ptr2D src; + }; + + template struct CubicFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + typedef typename TypeVec::cn>::vec_type work_type; + + explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) + : src(src_) + { + CV_UNUSED(fx); + CV_UNUSED(fy); + } + + static __device__ __forceinline__ float bicubicCoeff(float x_) + { + float x = fabsf(x_); + if (x <= 1.0f) + { + return x * x * (1.5f * x - 2.5f) + 1.0f; + } + else if (x < 2.0f) + { + return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; + } + else + { + return 0.0f; + } + } + + __device__ elem_type operator ()(float y, float x) const + { + const float xmin = ::ceilf(x - 2.0f); + const float xmax = ::floorf(x + 2.0f); + + const float ymin = ::ceilf(y - 2.0f); + const float ymax = ::floorf(y + 2.0f); + + work_type sum = VecTraits::all(0); + float wsum = 0.0f; + + for (float cy = ymin; cy <= ymax; cy += 1.0f) + { + for (float cx = xmin; cx <= xmax; cx += 1.0f) + { + const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy); + sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx)); + wsum += w; + } + } + + work_type res = (!wsum)? VecTraits::all(0) : sum / wsum; + + return saturate_cast(res); + } + + Ptr2D src; + }; + // for integer scaling + template struct IntegerAreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for(int dy = sy1; dy < sy2; ++dy) + for(int dx = sx1; dx < sx2; ++dx) + { + out = out + src(dy, dx) * scale; + } + + return saturate_cast(out); + } + + Ptr2D src; + float scale_x, scale_y ,scale; + }; + + template struct AreaFilter + { + typedef typename Ptr2D::elem_type elem_type; + typedef float index_type; + + explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_) + : src(src_), scale_x(scale_x_), scale_y(scale_y_){} + + __device__ __forceinline__ elem_type operator ()(float y, float x) const + { + float fsx1 = x * scale_x; + float fsx2 = fsx1 + scale_x; + + int sx1 = __float2int_ru(fsx1); + int sx2 = __float2int_rd(fsx2); + + float fsy1 = y * scale_y; + float fsy2 = fsy1 + scale_y; + + int sy1 = __float2int_ru(fsy1); + int sy2 = __float2int_rd(fsy2); + + float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1)); + + typedef typename TypeVec::cn>::vec_type work_type; + work_type out = VecTraits::all(0.f); + + for (int dy = sy1; dy < sy2; ++dy) + { + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(dy, dx) * scale; + + if (sx1 > fsx1) + out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale); + + if (sx2 < fsx2) + out = out + src(dy, sx2) * ((fsx2 -sx2) * scale); + } + + if (sy1 > fsy1) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale); + + if (sy2 < fsy2) + for (int dx = sx1; dx < sx2; ++dx) + out = out + src(sy2, dx) * ((fsy2 -sy2) * scale); + + if ((sy1 > fsy1) && (sx1 > fsx1)) + out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale); + + if ((sy1 > fsy1) && (sx2 < fsx2)) + out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx2 < fsx2)) + out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale); + + if ((sy2 < fsy2) && (sx1 > fsx1)) + out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale); + + return saturate_cast(out); + } + + Ptr2D src; + float scale_x, scale_y; + int width, haight; + }; +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_FILTERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/funcattrib.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/funcattrib.hpp new file mode 100644 index 00000000..f5820804 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/funcattrib.hpp @@ -0,0 +1,79 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP +#define OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP + +#include + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template + void printFuncAttrib(Func& func) + { + + cudaFuncAttributes attrs; + cudaFuncGetAttributes(&attrs, func); + + printf("=== Function stats ===\n"); + printf("Name: \n"); + printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes); + printf("constSizeBytes = %d\n", attrs.constSizeBytes); + printf("localSizeBytes = %d\n", attrs.localSizeBytes); + printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock); + printf("numRegs = %d\n", attrs.numRegs); + printf("ptxVersion = %d\n", attrs.ptxVersion); + printf("binaryVersion = %d\n", attrs.binaryVersion); + printf("\n"); + fflush(stdout); + } +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif /* OPENCV_CUDA_DEVICE_FUNCATTRIB_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/functional.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/functional.hpp new file mode 100644 index 00000000..3b531a13 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/functional.hpp @@ -0,0 +1,810 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_FUNCTIONAL_HPP +#define OPENCV_CUDA_FUNCTIONAL_HPP + +#include +#include "saturate_cast.hpp" +#include "vec_traits.hpp" +#include "type_traits.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + // Function Objects +#ifdef CV_CXX11 + template struct unary_function + { + typedef Argument argument_type; + typedef Result result_type; + }; + template struct binary_function + { + typedef Argument1 first_argument_type; + typedef Argument2 second_argument_type; + typedef Result result_type; + }; +#else + template struct unary_function : public std::unary_function {}; + template struct binary_function : public std::binary_function {}; +#endif + + // Arithmetic Operations + template struct plus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a + b; + } + __host__ __device__ __forceinline__ plus() {} + __host__ __device__ __forceinline__ plus(const plus&) {} + }; + + template struct minus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a - b; + } + __host__ __device__ __forceinline__ minus() {} + __host__ __device__ __forceinline__ minus(const minus&) {} + }; + + template struct multiplies : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a * b; + } + __host__ __device__ __forceinline__ multiplies() {} + __host__ __device__ __forceinline__ multiplies(const multiplies&) {} + }; + + template struct divides : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a / b; + } + __host__ __device__ __forceinline__ divides() {} + __host__ __device__ __forceinline__ divides(const divides&) {} + }; + + template struct modulus : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a % b; + } + __host__ __device__ __forceinline__ modulus() {} + __host__ __device__ __forceinline__ modulus(const modulus&) {} + }; + + template struct negate : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a) const + { + return -a; + } + __host__ __device__ __forceinline__ negate() {} + __host__ __device__ __forceinline__ negate(const negate&) {} + }; + + // Comparison Operations + template struct equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a == b; + } + __host__ __device__ __forceinline__ equal_to() {} + __host__ __device__ __forceinline__ equal_to(const equal_to&) {} + }; + + template struct not_equal_to : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a != b; + } + __host__ __device__ __forceinline__ not_equal_to() {} + __host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {} + }; + + template struct greater : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a > b; + } + __host__ __device__ __forceinline__ greater() {} + __host__ __device__ __forceinline__ greater(const greater&) {} + }; + + template struct less : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a < b; + } + __host__ __device__ __forceinline__ less() {} + __host__ __device__ __forceinline__ less(const less&) {} + }; + + template struct greater_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a >= b; + } + __host__ __device__ __forceinline__ greater_equal() {} + __host__ __device__ __forceinline__ greater_equal(const greater_equal&) {} + }; + + template struct less_equal : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a <= b; + } + __host__ __device__ __forceinline__ less_equal() {} + __host__ __device__ __forceinline__ less_equal(const less_equal&) {} + }; + + // Logical Operations + template struct logical_and : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a && b; + } + __host__ __device__ __forceinline__ logical_and() {} + __host__ __device__ __forceinline__ logical_and(const logical_and&) {} + }; + + template struct logical_or : binary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a || b; + } + __host__ __device__ __forceinline__ logical_or() {} + __host__ __device__ __forceinline__ logical_or(const logical_or&) {} + }; + + template struct logical_not : unary_function + { + __device__ __forceinline__ bool operator ()(typename TypeTraits::ParameterType a) const + { + return !a; + } + __host__ __device__ __forceinline__ logical_not() {} + __host__ __device__ __forceinline__ logical_not(const logical_not&) {} + }; + + // Bitwise Operations + template struct bit_and : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a & b; + } + __host__ __device__ __forceinline__ bit_and() {} + __host__ __device__ __forceinline__ bit_and(const bit_and&) {} + }; + + template struct bit_or : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a | b; + } + __host__ __device__ __forceinline__ bit_or() {} + __host__ __device__ __forceinline__ bit_or(const bit_or&) {} + }; + + template struct bit_xor : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType a, + typename TypeTraits::ParameterType b) const + { + return a ^ b; + } + __host__ __device__ __forceinline__ bit_xor() {} + __host__ __device__ __forceinline__ bit_xor(const bit_xor&) {} + }; + + template struct bit_not : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType v) const + { + return ~v; + } + __host__ __device__ __forceinline__ bit_not() {} + __host__ __device__ __forceinline__ bit_not(const bit_not&) {} + }; + + // Generalized Identity Operations + template struct identity : unary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType x) const + { + return x; + } + __host__ __device__ __forceinline__ identity() {} + __host__ __device__ __forceinline__ identity(const identity&) {} + }; + + template struct project1st : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return lhs; + } + __host__ __device__ __forceinline__ project1st() {} + __host__ __device__ __forceinline__ project1st(const project1st&) {} + }; + + template struct project2nd : binary_function + { + __device__ __forceinline__ typename TypeTraits::ParameterType operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return rhs; + } + __host__ __device__ __forceinline__ project2nd() {} + __host__ __device__ __forceinline__ project2nd(const project2nd&) {} + }; + + // Min/Max Operations + +#define OPENCV_CUDA_IMPLEMENT_MINMAX(name, type, op) \ + template <> struct name : binary_function \ + { \ + __device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \ + __host__ __device__ __forceinline__ name() {}\ + __host__ __device__ __forceinline__ name(const name&) {}\ + }; + + template struct maximum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return max(lhs, rhs); + } + __host__ __device__ __forceinline__ maximum() {} + __host__ __device__ __forceinline__ maximum(const maximum&) {} + }; + + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uchar, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, schar, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, char, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, ushort, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, short, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, int, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, uint, ::max) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, float, ::fmax) + OPENCV_CUDA_IMPLEMENT_MINMAX(maximum, double, ::fmax) + + template struct minimum : binary_function + { + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType lhs, typename TypeTraits::ParameterType rhs) const + { + return min(lhs, rhs); + } + __host__ __device__ __forceinline__ minimum() {} + __host__ __device__ __forceinline__ minimum(const minimum&) {} + }; + + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uchar, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, schar, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, char, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, ushort, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, short, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, int, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, uint, ::min) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, float, ::fmin) + OPENCV_CUDA_IMPLEMENT_MINMAX(minimum, double, ::fmin) + +#undef OPENCV_CUDA_IMPLEMENT_MINMAX + + // Math functions + + template struct abs_func : unary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType x) const + { + return abs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned char operator ()(unsigned char x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ signed char operator ()(signed char x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ char operator ()(char x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned short operator ()(unsigned short x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ short operator ()(short x) const + { + return ::abs((int)x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ unsigned int operator ()(unsigned int x) const + { + return x; + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ int operator ()(int x) const + { + return ::abs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ float operator ()(float x) const + { + return ::fabsf(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + template <> struct abs_func : unary_function + { + __device__ __forceinline__ double operator ()(double x) const + { + return ::fabs(x); + } + + __host__ __device__ __forceinline__ abs_func() {} + __host__ __device__ __forceinline__ abs_func(const abs_func&) {} + }; + +#define OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(name, func) \ + template struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v) const \ + { \ + return func ## f(v); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; \ + template <> struct name ## _func : unary_function \ + { \ + __device__ __forceinline__ double operator ()(double v) const \ + { \ + return func(v); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; + +#define OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(name, func) \ + template struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ float operator ()(typename TypeTraits::ParameterType v1, typename TypeTraits::ParameterType v2) const \ + { \ + return func ## f(v1, v2); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; \ + template <> struct name ## _func : binary_function \ + { \ + __device__ __forceinline__ double operator ()(double v1, double v2) const \ + { \ + return func(v1, v2); \ + } \ + __host__ __device__ __forceinline__ name ## _func() {} \ + __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \ + }; + + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp, ::exp) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log, ::log) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log2, ::log2) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(log10, ::log10) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sin, ::sin) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cos, ::cos) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tan, ::tan) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asin, ::asin) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acos, ::acos) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atan, ::atan) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh) + OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh) + + OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot) + OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2) + OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR(pow, ::pow) + + #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR + #undef OPENCV_CUDA_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE + #undef OPENCV_CUDA_IMPLEMENT_BIN_FUNCTOR + + template struct hypot_sqr_func : binary_function + { + __device__ __forceinline__ T operator ()(typename TypeTraits::ParameterType src1, typename TypeTraits::ParameterType src2) const + { + return src1 * src1 + src2 * src2; + } + __host__ __device__ __forceinline__ hypot_sqr_func() {} + __host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {} + }; + + // Saturate Cast Functor + template struct saturate_cast_func : unary_function + { + __device__ __forceinline__ D operator ()(typename TypeTraits::ParameterType v) const + { + return saturate_cast(v); + } + __host__ __device__ __forceinline__ saturate_cast_func() {} + __host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {} + }; + + // Threshold Functors + template struct thresh_binary_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * maxVal; + } + + __host__ __device__ __forceinline__ thresh_binary_func() {} + __host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other) + : thresh(other.thresh), maxVal(other.maxVal) {} + + T thresh; + T maxVal; + }; + + template struct thresh_binary_inv_func : unary_function + { + __host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * maxVal; + } + + __host__ __device__ __forceinline__ thresh_binary_inv_func() {} + __host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other) + : thresh(other.thresh), maxVal(other.maxVal) {} + + T thresh; + T maxVal; + }; + + template struct thresh_trunc_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return minimum()(src, thresh); + } + + __host__ __device__ __forceinline__ thresh_trunc_func() {} + __host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other) + : thresh(other.thresh) {} + + T thresh; + }; + + template struct thresh_to_zero_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src > thresh) * src; + } + + __host__ __device__ __forceinline__ thresh_to_zero_func() {} + __host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other) + : thresh(other.thresh) {} + + T thresh; + }; + + template struct thresh_to_zero_inv_func : unary_function + { + explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {CV_UNUSED(maxVal_);} + + __device__ __forceinline__ T operator()(typename TypeTraits::ParameterType src) const + { + return (src <= thresh) * src; + } + + __host__ __device__ __forceinline__ thresh_to_zero_inv_func() {} + __host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other) + : thresh(other.thresh) {} + + T thresh; + }; + + // Function Object Adaptors + template struct unary_negate : unary_function + { + explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x) const + { + return !pred(x); + } + + __host__ __device__ __forceinline__ unary_negate() {} + __host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {} + + Predicate pred; + }; + + template __host__ __device__ __forceinline__ unary_negate not1(const Predicate& pred) + { + return unary_negate(pred); + } + + template struct binary_negate : binary_function + { + explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {} + + __device__ __forceinline__ bool operator()(typename TypeTraits::ParameterType x, + typename TypeTraits::ParameterType y) const + { + return !pred(x,y); + } + + __host__ __device__ __forceinline__ binary_negate() {} + __host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {} + + Predicate pred; + }; + + template __host__ __device__ __forceinline__ binary_negate not2(const BinaryPredicate& pred) + { + return binary_negate(pred); + } + + template struct binder1st : unary_function + { + __host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {} + + __device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(arg1, a); + } + + __host__ __device__ __forceinline__ binder1st() {} + __host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {} + + Op op; + typename Op::first_argument_type arg1; + }; + + template __host__ __device__ __forceinline__ binder1st bind1st(const Op& op, const T& x) + { + return binder1st(op, typename Op::first_argument_type(x)); + } + + template struct binder2nd : unary_function + { + __host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {} + + __forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits::ParameterType a) const + { + return op(a, arg2); + } + + __host__ __device__ __forceinline__ binder2nd() {} + __host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {} + + Op op; + typename Op::second_argument_type arg2; + }; + + template __host__ __device__ __forceinline__ binder2nd bind2nd(const Op& op, const T& x) + { + return binder2nd(op, typename Op::second_argument_type(x)); + } + + // Functor Traits + template struct IsUnaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(unary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + template struct IsBinaryFunction + { + typedef char Yes; + struct No {Yes a[2];}; + + template static Yes check(binary_function); + static No check(...); + + static F makeF(); + + enum { value = (sizeof(check(makeF())) == sizeof(Yes)) }; + }; + + namespace functional_detail + { + template struct UnOpShift { enum { shift = 1 }; }; + template struct UnOpShift { enum { shift = 4 }; }; + template struct UnOpShift { enum { shift = 2 }; }; + + template struct DefaultUnaryShift + { + enum { shift = UnOpShift::shift }; + }; + + template struct BinOpShift { enum { shift = 1 }; }; + template struct BinOpShift { enum { shift = 4 }; }; + template struct BinOpShift { enum { shift = 2 }; }; + + template struct DefaultBinaryShift + { + enum { shift = BinOpShift::shift }; + }; + + template ::value> struct ShiftDispatcher; + template struct ShiftDispatcher + { + enum { shift = DefaultUnaryShift::shift }; + }; + template struct ShiftDispatcher + { + enum { shift = DefaultBinaryShift::shift }; + }; + } + + template struct DefaultTransformShift + { + enum { shift = functional_detail::ShiftDispatcher::shift }; + }; + + template struct DefaultTransformFunctorTraits + { + enum { simple_block_dim_x = 16 }; + enum { simple_block_dim_y = 16 }; + + enum { smart_block_dim_x = 16 }; + enum { smart_block_dim_y = 16 }; + enum { smart_shift = DefaultTransformShift::shift }; + }; + + template struct TransformFunctorTraits : DefaultTransformFunctorTraits {}; + +#define OPENCV_CUDA_TRANSFORM_FUNCTOR_TRAITS(type) \ + template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type > +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_FUNCTIONAL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/limits.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/limits.hpp new file mode 100644 index 00000000..7e15ed62 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/limits.hpp @@ -0,0 +1,128 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_LIMITS_HPP +#define OPENCV_CUDA_LIMITS_HPP + +#include +#include +#include "common.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ +template struct numeric_limits; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static bool min() { return false; } + __device__ __forceinline__ static bool max() { return true; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static signed char min() { return SCHAR_MIN; } + __device__ __forceinline__ static signed char max() { return SCHAR_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned char min() { return 0; } + __device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static short min() { return SHRT_MIN; } + __device__ __forceinline__ static short max() { return SHRT_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned short min() { return 0; } + __device__ __forceinline__ static unsigned short max() { return USHRT_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static int min() { return INT_MIN; } + __device__ __forceinline__ static int max() { return INT_MAX; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static unsigned int min() { return 0; } + __device__ __forceinline__ static unsigned int max() { return UINT_MAX; } + static const bool is_signed = false; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static float min() { return FLT_MIN; } + __device__ __forceinline__ static float max() { return FLT_MAX; } + __device__ __forceinline__ static float epsilon() { return FLT_EPSILON; } + static const bool is_signed = true; +}; + +template <> struct numeric_limits +{ + __device__ __forceinline__ static double min() { return DBL_MIN; } + __device__ __forceinline__ static double max() { return DBL_MAX; } + __device__ __forceinline__ static double epsilon() { return DBL_EPSILON; } + static const bool is_signed = true; +}; +}}} // namespace cv { namespace cuda { namespace cudev { + +//! @endcond + +#endif // OPENCV_CUDA_LIMITS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/reduce.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/reduce.hpp new file mode 100644 index 00000000..5de36508 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/reduce.hpp @@ -0,0 +1,209 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_REDUCE_HPP +#define OPENCV_CUDA_REDUCE_HPP + +#ifndef THRUST_DEBUG // eliminate -Wundef warning +#define THRUST_DEBUG 0 +#endif + +#include +#include "detail/reduce.hpp" +#include "detail/reduce_key_val.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template + __device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op) + { + reduce_detail::Dispatcher::reductor::template reduce(smem, val, tid, op); + } + template + __device__ __forceinline__ void reduce(const thrust::tuple& smem, + const thrust::tuple& val, + unsigned int tid, + const thrust::tuple& op) + { + reduce_detail::Dispatcher::reductor::template reduce< + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&>(smem, val, tid, op); + } + + template + __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce(skeys, key, svals, val, tid, cmp); + } + template + __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, + const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, const Cmp& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce&, + const thrust::tuple&, + const Cmp&>(skeys, key, svals, val, tid, cmp); + } + template + __device__ __forceinline__ void reduceKeyVal(const thrust::tuple& skeys, + const thrust::tuple& key, + const thrust::tuple& svals, + const thrust::tuple& val, + unsigned int tid, + const thrust::tuple& cmp) + { + reduce_key_val_detail::Dispatcher::reductor::template reduce< + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple&, + const thrust::tuple& + >(skeys, key, svals, val, tid, cmp); + } + + // smem_tuple + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0) + { + return thrust::make_tuple((volatile T0*) t0); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8); + } + + template + __device__ __forceinline__ + thrust::tuple + smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9) + { + return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9); + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_REDUCE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/saturate_cast.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/saturate_cast.hpp new file mode 100644 index 00000000..c3a3d1cb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/saturate_cast.hpp @@ -0,0 +1,292 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_SATURATE_CAST_HPP +#define OPENCV_CUDA_SATURATE_CAST_HPP + +#include "common.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); } + template __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); } + + template<> __device__ __forceinline__ uchar saturate_cast(schar v) + { + uint res = 0; + int vi = v; + asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(ushort v) + { + uint res = 0; + asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(uint v) + { + uint res = 0; + asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(float v) + { + uint res = 0; + asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ uchar saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + uint res = 0; + asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ schar saturate_cast(uchar v) + { + uint res = 0; + uint vi = v; + asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(ushort v) + { + uint res = 0; + asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(uint v) + { + uint res = 0; + asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(float v) + { + uint res = 0; + asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ schar saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + uint res = 0; + asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ ushort saturate_cast(schar v) + { + ushort res = 0; + int vi = v; + asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(short v) + { + ushort res = 0; + asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(int v) + { + ushort res = 0; + asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(uint v) + { + ushort res = 0; + asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(float v) + { + ushort res = 0; + asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ ushort saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + ushort res = 0; + asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ short saturate_cast(ushort v) + { + short res = 0; + asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(int v) + { + short res = 0; + asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(uint v) + { + short res = 0; + asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(float v) + { + short res = 0; + asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v)); + return res; + } + template<> __device__ __forceinline__ short saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + short res = 0; + asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v)); + return res; + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ int saturate_cast(uint v) + { + int res = 0; + asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ int saturate_cast(float v) + { + return __float2int_rn(v); + } + template<> __device__ __forceinline__ int saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2int_rn(v); + #else + return saturate_cast((float)v); + #endif + } + + template<> __device__ __forceinline__ uint saturate_cast(schar v) + { + uint res = 0; + int vi = v; + asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(short v) + { + uint res = 0; + asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(int v) + { + uint res = 0; + asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v)); + return res; + } + template<> __device__ __forceinline__ uint saturate_cast(float v) + { + return __float2uint_rn(v); + } + template<> __device__ __forceinline__ uint saturate_cast(double v) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130 + return __double2uint_rn(v); + #else + return saturate_cast((float)v); + #endif + } +}}} + +//! @endcond + +#endif /* OPENCV_CUDA_SATURATE_CAST_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/scan.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/scan.hpp new file mode 100644 index 00000000..e128fb09 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/scan.hpp @@ -0,0 +1,258 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_SCAN_HPP +#define OPENCV_CUDA_SCAN_HPP + +#include "opencv2/core/cuda/common.hpp" +#include "opencv2/core/cuda/utility.hpp" +#include "opencv2/core/cuda/warp.hpp" +#include "opencv2/core/cuda/warp_shuffle.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 }; + + template struct WarpScan + { + __device__ __forceinline__ WarpScan() {} + __device__ __forceinline__ WarpScan(const WarpScan& other) { CV_UNUSED(other); } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = idx & 31; + F op; + + if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return tid; + } + + __device__ __forceinline__ void init(volatile T *ptr){} + + static const int warp_offset = 0; + + typedef WarpScan merge; + }; + + template struct WarpScanNoComp + { + __device__ __forceinline__ WarpScanNoComp() {} + __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { CV_UNUSED(other); } + + __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx) + { + const unsigned int lane = threadIdx.x & 31; + F op; + + ptr [idx ] = op(ptr [idx - 1], ptr [idx]); + ptr [idx ] = op(ptr [idx - 2], ptr [idx]); + ptr [idx ] = op(ptr [idx - 4], ptr [idx]); + ptr [idx ] = op(ptr [idx - 8], ptr [idx]); + ptr [idx ] = op(ptr [idx - 16], ptr [idx]); + + if( Kind == INCLUSIVE ) + return ptr [idx]; + else + return (lane > 0) ? ptr [idx - 1] : 0; + } + + __device__ __forceinline__ unsigned int index(const unsigned int tid) + { + return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask); + } + + __device__ __forceinline__ void init(volatile T *ptr) + { + ptr[threadIdx.x] = 0; + } + + static const int warp_smem_stride = 32 + 16 + 1; + static const int warp_offset = 16; + static const int warp_log = 5; + static const int warp_mask = 31; + + typedef WarpScanNoComp merge; + }; + + template struct BlockScan + { + __device__ __forceinline__ BlockScan() {} + __device__ __forceinline__ BlockScan(const BlockScan& other) { CV_UNUSED(other); } + + __device__ __forceinline__ T operator()(volatile T *ptr) + { + const unsigned int tid = threadIdx.x; + const unsigned int lane = tid & warp_mask; + const unsigned int warp = tid >> warp_log; + + Sc scan; + typename Sc::merge merge_scan; + const unsigned int idx = scan.index(tid); + + T val = scan(ptr, idx); + __syncthreads (); + + if( warp == 0) + scan.init(ptr); + __syncthreads (); + + if( lane == 31 ) + ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx]; + __syncthreads (); + + if( warp == 0 ) + merge_scan(ptr, idx); + __syncthreads(); + + if ( warp > 0) + val = ptr [scan.warp_offset + warp - 1] + val; + __syncthreads (); + + ptr[idx] = val; + __syncthreads (); + + return val ; + } + + static const int warp_log = 5; + static const int warp_mask = 31; + }; + + template + __device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid) + { + #if __CUDA_ARCH__ >= 300 + const unsigned int laneId = cv::cuda::device::Warp::laneId(); + + // scan on shuffl functions + #pragma unroll + for (int i = 1; i <= (OPENCV_CUDA_WARP_SIZE / 2); i *= 2) + { + const T n = cv::cuda::device::shfl_up(idata, i); + if (laneId >= i) + idata += n; + } + + return idata; + #else + unsigned int pos = 2 * tid - (tid & (OPENCV_CUDA_WARP_SIZE - 1)); + s_Data[pos] = 0; + pos += OPENCV_CUDA_WARP_SIZE; + s_Data[pos] = idata; + + s_Data[pos] += s_Data[pos - 1]; + s_Data[pos] += s_Data[pos - 2]; + s_Data[pos] += s_Data[pos - 4]; + s_Data[pos] += s_Data[pos - 8]; + s_Data[pos] += s_Data[pos - 16]; + + return s_Data[pos]; + #endif + } + + template + __device__ __forceinline__ T warpScanExclusive(T idata, volatile T* s_Data, unsigned int tid) + { + return warpScanInclusive(idata, s_Data, tid) - idata; + } + + template + __device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid) + { + if (tiNumScanThreads > OPENCV_CUDA_WARP_SIZE) + { + //Bottom-level inclusive warp scan + T warpResult = warpScanInclusive(idata, s_Data, tid); + + //Save top elements of each warp for exclusive warp scan + //sync to wait for warp scans to complete (because s_Data is being overwritten) + __syncthreads(); + if ((tid & (OPENCV_CUDA_WARP_SIZE - 1)) == (OPENCV_CUDA_WARP_SIZE - 1)) + { + s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE] = warpResult; + } + + //wait for warp scans to complete + __syncthreads(); + + if (tid < (tiNumScanThreads / OPENCV_CUDA_WARP_SIZE) ) + { + //grab top warp elements + T val = s_Data[tid]; + //calculate exclusive scan and write back to shared memory + s_Data[tid] = warpScanExclusive(val, s_Data, tid); + } + + //return updated warp scans with exclusive scan results + __syncthreads(); + + return warpResult + s_Data[tid >> OPENCV_CUDA_LOG_WARP_SIZE]; + } + else + { + return warpScanInclusive(idata, s_Data, tid); + } + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_SCAN_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/simd_functions.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/simd_functions.hpp new file mode 100644 index 00000000..3d8c2e0d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/simd_functions.hpp @@ -0,0 +1,869 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + * Copyright (c) 2013 NVIDIA Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of NVIDIA Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef OPENCV_CUDA_SIMD_FUNCTIONS_HPP +#define OPENCV_CUDA_SIMD_FUNCTIONS_HPP + +#include "common.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + // 2 + + static __device__ __forceinline__ unsigned int vadd2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = a ^ b; // sum bits + r = a + b; // actual sum + s = s ^ r; // determine carry-ins for each bit position + s = s & 0x00010000; // carry-in to high word (= carry-out from low word) + r = r - s; // subtract out carry-out from low word + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsub2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = a ^ b; // sum bits + r = a - b; // actual sum + s = s ^ r; // determine carry-ins for each bit position + s = s & 0x00010000; // borrow to high word + r = r + s; // compensate for borrow from low word + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vabsdiff2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u, v; + s = a & 0x0000ffff; // extract low halfword + r = b & 0x0000ffff; // extract low halfword + u = ::max(r, s); // maximum of low halfwords + v = ::min(r, s); // minimum of low halfwords + s = a & 0xffff0000; // extract high halfword + r = b & 0xffff0000; // extract high halfword + t = ::max(r, s); // maximum of high halfwords + s = ::min(r, s); // minimum of high halfwords + r = u | t; // maximum of both halfwords + s = v | s; // minimum of both halfwords + r = r - s; // |a - b| = max(a,b) - min(a,b); + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vavg2(unsigned int a, unsigned int b) + { + unsigned int r, s; + + // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==> + // (a + b) / 2 = (a & b) + ((a ^ b) >> 1) + s = a ^ b; + r = a & b; + s = s & 0xfffefffe; // ensure shift doesn't cross halfword boundaries + s = s >> 1; + s = r + s; + + return s; + } + + static __device__ __forceinline__ unsigned int vavrg2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vavrg2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==> + // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1) + unsigned int s; + s = a ^ b; + r = a | b; + s = s & 0xfffefffe; // ensure shift doesn't cross half-word boundaries + s = s >> 1; + r = r - s; + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vseteq2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x8000 + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r & ~c; // msb = 1, if r was 0x0000 + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpeq2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vseteq2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x8000 + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r & ~c; // msb = 1, if r was 0x0000 + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetge2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpge2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetge2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetgt2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80008000; // msbs = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpgt2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetgt2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80008000; // msbs = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetle2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmple2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetle2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetlt2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset2.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmplt2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetlt2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80008000; // msb = carry-outs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetne2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm ("vset2.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r | c; // msb = 1, if r was not 0x0000 + c = c & 0x80008000; // extract msbs + r = c >> 15; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpne2(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetne2(a, b); + c = r << 16; // convert bool + r = c - r; // into mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x0000 if a == b + c = r | 0x80008000; // set msbs, to catch carry out + c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000 + c = r | c; // msb = 1, if r was not 0x0000 + c = c & 0x80008000; // extract msbs + r = c >> 15; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmax2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u; + r = a & 0x0000ffff; // extract low halfword + s = b & 0x0000ffff; // extract low halfword + t = ::max(r, s); // maximum of low halfwords + r = a & 0xffff0000; // extract high halfword + s = b & 0xffff0000; // extract high halfword + u = ::max(r, s); // maximum of high halfwords + r = t | u; // combine halfword maximums + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmin2(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t, u; + r = a & 0x0000ffff; // extract low halfword + s = b & 0x0000ffff; // extract low halfword + t = ::min(r, s); // minimum of low halfwords + r = a & 0xffff0000; // extract high halfword + s = b & 0xffff0000; // extract high halfword + u = ::min(r, s); // minimum of high halfwords + r = t | u; // combine halfword minimums + #endif + + return r; + } + + // 4 + + static __device__ __forceinline__ unsigned int vadd4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t; + s = a ^ b; // sum bits + r = a & 0x7f7f7f7f; // clear msbs + t = b & 0x7f7f7f7f; // clear msbs + s = s & 0x80808080; // msb sum bits + r = r + t; // add without msbs, record carry-out in msbs + r = r ^ s; // sum of msb sum and carry-in bits, w/o carry-out + #endif /* __CUDA_ARCH__ >= 300 */ + + return r; + } + + static __device__ __forceinline__ unsigned int vsub4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s, t; + s = a ^ ~b; // inverted sum bits + r = a | 0x80808080; // set msbs + t = b & 0x7f7f7f7f; // clear msbs + s = s & 0x80808080; // inverted msb sum bits + r = r - t; // subtract w/o msbs, record inverted borrows in msb + r = r ^ s; // combine inverted msb sum bits and borrows + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vavg4(unsigned int a, unsigned int b) + { + unsigned int r, s; + + // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==> + // (a + b) / 2 = (a & b) + ((a ^ b) >> 1) + s = a ^ b; + r = a & b; + s = s & 0xfefefefe; // ensure following shift doesn't cross byte boundaries + s = s >> 1; + s = r + s; + + return s; + } + + static __device__ __forceinline__ unsigned int vavrg4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vavrg4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==> + // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1) + unsigned int c; + c = a ^ b; + r = a | b; + c = c & 0xfefefefe; // ensure following shift doesn't cross byte boundaries + c = c >> 1; + r = r - c; + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vseteq4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + r = r ^ c; // extract msbs, msb = 1 if r < 0x80 + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r & ~c; // msb = 1, if r was 0x00 + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpeq4(unsigned int a, unsigned int b) + { + unsigned int r, t; + + #if __CUDA_ARCH__ >= 300 + r = vseteq4(a, b); + t = r << 8; // convert bool + r = t - r; // to mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + t = a ^ b; // 0x00 if a == b + r = t | 0x80808080; // set msbs, to catch carry out + t = t ^ r; // extract msbs, msb = 1 if t < 0x80 + r = r - 0x01010101; // msb = 0, if t was 0x00 or 0x80 + r = t & ~r; // msb = 1, if t was 0x00 + t = r >> 7; // build mask + t = r - t; // from + r = t | r; // msbs + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetle4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmple4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetle4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2 + c = c & 0x80808080; // msbs = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetlt4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmplt4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetlt4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(a)); + c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down] + c = c & 0x80808080; // msbs = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetge4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavrg4(a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpge4(unsigned int a, unsigned int b) + { + unsigned int r, s; + + #if __CUDA_ARCH__ >= 300 + r = vsetge4(a, b); + s = r << 8; // convert bool + r = s - r; // to mask + #else + asm ("not.b32 %0,%0;" : "+r"(b)); + r = vavrg4 (a, b); // (a + ~b + 1) / 2 = (a - b) / 2 + r = r & 0x80808080; // msb = carry-outs + s = r >> 7; // build mask + s = r - s; // from + r = s | r; // msbs + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetgt4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int c; + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpgt4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetgt4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + asm("not.b32 %0, %0;" : "+r"(b)); + c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down] + c = c & 0x80808080; // msb = carry-outs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vsetne4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vset4.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + unsigned int c; + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r | c; // msb = 1, if r was not 0x00 + c = c & 0x80808080; // extract msbs + r = c >> 7; // convert to bool + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vcmpne4(unsigned int a, unsigned int b) + { + unsigned int r, c; + + #if __CUDA_ARCH__ >= 300 + r = vsetne4(a, b); + c = r << 8; // convert bool + r = c - r; // to mask + #else + // inspired by Alan Mycroft's null-byte detection algorithm: + // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080)) + r = a ^ b; // 0x00 if a == b + c = r | 0x80808080; // set msbs, to catch carry out + c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80 + c = r | c; // msb = 1, if r was not 0x00 + c = c & 0x80808080; // extract msbs + r = c >> 7; // convert + r = c - r; // msbs to + r = c | r; // mask + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vabsdiff4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(a, b); // mask = 0xff if a >= b + r = a ^ b; // + s = (r & s) ^ b; // select a when a >= b, else select b => max(a,b) + r = s ^ r; // select a when b >= a, else select b => min(a,b) + r = s - r; // |a - b| = max(a,b) - min(a,b); + #endif + + return r; + } + + static __device__ __forceinline__ unsigned int vmax4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(a, b); // mask = 0xff if a >= b + r = a & s; // select a when b >= a + s = b & ~s; // select b when b < a + r = r | s; // combine byte selections + #endif + + return r; // byte-wise unsigned maximum + } + + static __device__ __forceinline__ unsigned int vmin4(unsigned int a, unsigned int b) + { + unsigned int r = 0; + + #if __CUDA_ARCH__ >= 300 + asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #elif __CUDA_ARCH__ >= 200 + asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r)); + #else + unsigned int s; + s = vcmpge4(b, a); // mask = 0xff if a >= b + r = a & s; // select a when b >= a + s = b & ~s; // select b when b < a + r = r | s; // combine byte selections + #endif + + return r; + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_SIMD_FUNCTIONS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/transform.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/transform.hpp new file mode 100644 index 00000000..42aa6ea1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/transform.hpp @@ -0,0 +1,75 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_TRANSFORM_HPP +#define OPENCV_CUDA_TRANSFORM_HPP + +#include "common.hpp" +#include "utility.hpp" +#include "detail/transform_detail.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template + static inline void transform(PtrStepSz src, PtrStepSz dst, UnOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream); + } + + template + static inline void transform(PtrStepSz src1, PtrStepSz src2, PtrStepSz dst, BinOp op, const Mask& mask, cudaStream_t stream) + { + typedef TransformFunctorTraits ft; + transform_detail::TransformDispatcher::cn == 1 && VecTraits::cn == 1 && VecTraits::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream); + } +}}} + +//! @endcond + +#endif // OPENCV_CUDA_TRANSFORM_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/type_traits.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/type_traits.hpp new file mode 100644 index 00000000..8b7a3fd1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/type_traits.hpp @@ -0,0 +1,90 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_TYPE_TRAITS_HPP +#define OPENCV_CUDA_TYPE_TRAITS_HPP + +#include "detail/type_traits_detail.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template struct IsSimpleParameter + { + enum {value = type_traits_detail::IsIntegral::value || type_traits_detail::IsFloat::value || + type_traits_detail::PointerTraits::type>::value}; + }; + + template struct TypeTraits + { + typedef typename type_traits_detail::UnConst::type NonConstType; + typedef typename type_traits_detail::UnVolatile::type NonVolatileType; + typedef typename type_traits_detail::UnVolatile::type>::type UnqualifiedType; + typedef typename type_traits_detail::PointerTraits::type PointeeType; + typedef typename type_traits_detail::ReferenceTraits::type ReferredType; + + enum { isConst = type_traits_detail::UnConst::value }; + enum { isVolatile = type_traits_detail::UnVolatile::value }; + + enum { isReference = type_traits_detail::ReferenceTraits::value }; + enum { isPointer = type_traits_detail::PointerTraits::type>::value }; + + enum { isUnsignedInt = type_traits_detail::IsUnsignedIntegral::value }; + enum { isSignedInt = type_traits_detail::IsSignedIntergral::value }; + enum { isIntegral = type_traits_detail::IsIntegral::value }; + enum { isFloat = type_traits_detail::IsFloat::value }; + enum { isArith = isIntegral || isFloat }; + enum { isVec = type_traits_detail::IsVec::value }; + + typedef typename type_traits_detail::Select::value, + T, typename type_traits_detail::AddParameterType::type>::type ParameterType; + }; +}}} + +//! @endcond + +#endif // OPENCV_CUDA_TYPE_TRAITS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/utility.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/utility.hpp new file mode 100644 index 00000000..7f5db48a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/utility.hpp @@ -0,0 +1,230 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_UTILITY_HPP +#define OPENCV_CUDA_UTILITY_HPP + +#include "saturate_cast.hpp" +#include "datamov_utils.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + struct CV_EXPORTS ThrustAllocator + { + typedef uchar value_type; + virtual ~ThrustAllocator(); + virtual __device__ __host__ uchar* allocate(size_t numBytes) = 0; + virtual __device__ __host__ void deallocate(uchar* ptr, size_t numBytes) = 0; + static ThrustAllocator& getAllocator(); + static void setAllocator(ThrustAllocator* allocator); + }; + #define OPENCV_CUDA_LOG_WARP_SIZE (5) + #define OPENCV_CUDA_WARP_SIZE (1 << OPENCV_CUDA_LOG_WARP_SIZE) + #define OPENCV_CUDA_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla + #define OPENCV_CUDA_MEM_BANKS (1 << OPENCV_CUDA_LOG_MEM_BANKS) + + /////////////////////////////////////////////////////////////////////////////// + // swap + + template void __device__ __host__ __forceinline__ swap(T& a, T& b) + { + const T temp = a; + a = b; + b = temp; + } + + /////////////////////////////////////////////////////////////////////////////// + // Mask Reader + + struct SingleMask + { + explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {} + __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x] != 0; + } + + PtrStepb mask; + }; + + struct SingleMaskChannels + { + __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_) + : mask(mask_), channels(channels_) {} + __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_) + :mask(mask_.mask), channels(mask_.channels){} + + __device__ __forceinline__ bool operator()(int y, int x) const + { + return mask.ptr(y)[x / channels] != 0; + } + + PtrStepb mask; + int channels; + }; + + struct MaskCollection + { + explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_) + : maskCollection(maskCollection_) {} + + __device__ __forceinline__ MaskCollection(const MaskCollection& masks_) + : maskCollection(masks_.maskCollection), curMask(masks_.curMask){} + + __device__ __forceinline__ void next() + { + curMask = *maskCollection++; + } + __device__ __forceinline__ void setMask(int z) + { + curMask = maskCollection[z]; + } + + __device__ __forceinline__ bool operator()(int y, int x) const + { + uchar val; + return curMask.data == 0 || (ForceGlob::Load(curMask.ptr(y), x, val), (val != 0)); + } + + const PtrStepb* maskCollection; + PtrStepb curMask; + }; + + struct WithOutMask + { + __host__ __device__ __forceinline__ WithOutMask(){} + __host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){} + + __device__ __forceinline__ void next() const + { + } + __device__ __forceinline__ void setMask(int) const + { + } + + __device__ __forceinline__ bool operator()(int, int) const + { + return true; + } + + __device__ __forceinline__ bool operator()(int, int, int) const + { + return true; + } + + static __device__ __forceinline__ bool check(int, int) + { + return true; + } + + static __device__ __forceinline__ bool check(int, int, int) + { + return true; + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // Solve linear system + + // solve 2x2 linear system Ax=b + template __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2]) + { + T det = A[0][0] * A[1][1] - A[1][0] * A[0][1]; + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * (b[0] * A[1][1] - b[1] * A[0][1])); + + x[1] = saturate_cast(invdet * (A[0][0] * b[1] - A[1][0] * b[0])); + + return true; + } + + return false; + } + + // solve 3x3 linear system Ax=b + template __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3]) + { + T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) + - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]); + + if (det != 0) + { + double invdet = 1.0 / det; + + x[0] = saturate_cast(invdet * + (b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) - + A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) + + A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] ))); + + x[1] = saturate_cast(invdet * + (A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) - + b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) + + A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0]))); + + x[2] = saturate_cast(invdet * + (A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) - + A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) + + b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]))); + + return true; + } + + return false; + } +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_UTILITY_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_distance.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_distance.hpp new file mode 100644 index 00000000..ef6e5108 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_distance.hpp @@ -0,0 +1,232 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_VEC_DISTANCE_HPP +#define OPENCV_CUDA_VEC_DISTANCE_HPP + +#include "reduce.hpp" +#include "functional.hpp" +#include "detail/vec_distance_detail.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template struct L1Dist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ L1Dist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum = __sad(val1, val2, mySum); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + template <> struct L1Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L1Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + mySum += ::fabs(val1 - val2); + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return mySum; + } + + float mySum; + }; + + struct L2Dist + { + typedef float value_type; + typedef float result_type; + + __device__ __forceinline__ L2Dist() : mySum(0.0f) {} + + __device__ __forceinline__ void reduceIter(float val1, float val2) + { + float reg = val1 - val2; + mySum += reg * reg; + } + + template __device__ __forceinline__ void reduceAll(float* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator float() const + { + return sqrtf(mySum); + } + + float mySum; + }; + + struct HammingDist + { + typedef int value_type; + typedef int result_type; + + __device__ __forceinline__ HammingDist() : mySum(0) {} + + __device__ __forceinline__ void reduceIter(int val1, int val2) + { + mySum += __popc(val1 ^ val2); + } + + template __device__ __forceinline__ void reduceAll(int* smem, int tid) + { + reduce(smem, mySum, tid, plus()); + } + + __device__ __forceinline__ operator int() const + { + return mySum; + } + + int mySum; + }; + + // calc distance between two vectors in global memory + template + __device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + for (int i = tid; i < len; i += THREAD_DIM) + { + T1 val1; + ForceGlob::Load(vec1, i, val1); + + T2 val2; + ForceGlob::Load(vec2, i, val2); + + dist.reduceIter(val1, val2); + } + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory + template + __device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid) + { + vec_distance_detail::VecDiffCachedCalculator::calc(vecCached, vecGlob, len, dist, tid); + + dist.reduceAll(smem, tid); + } + + // calc distance between two vectors in global memory + template struct VecDiffGlobal + { + explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0) + { + vec1 = vec1_; + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffGlobal(vec1, vec2, len, dist, smem, tid); + } + + const T1* vec1; + }; + + // calc distance between two vectors, first vector is cached in register memory, second vector is in global memory + template struct VecDiffCachedRegister + { + template __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid) + { + if (glob_tid < len) + smem[glob_tid] = vec1[glob_tid]; + __syncthreads(); + + U* vec1ValsPtr = vec1Vals; + + #pragma unroll + for (int i = tid; i < MAX_LEN; i += THREAD_DIM) + *vec1ValsPtr++ = smem[i]; + + __syncthreads(); + } + + template + __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const + { + calcVecDiffCached(vec1Vals, vec2, len, dist, smem, tid); + } + + U vec1Vals[MAX_LEN / THREAD_DIM]; + }; +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_VEC_DISTANCE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_math.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_math.hpp new file mode 100644 index 00000000..9085b923 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_math.hpp @@ -0,0 +1,930 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_VECMATH_HPP +#define OPENCV_CUDA_VECMATH_HPP + +#include "vec_traits.hpp" +#include "saturate_cast.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + +// saturate_cast + +namespace vec_math_detail +{ + template struct SatCastHelper; + template struct SatCastHelper<1, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x)); + } + }; + template struct SatCastHelper<2, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y)); + } + }; + template struct SatCastHelper<3, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z)); + } + }; + template struct SatCastHelper<4, VecD> + { + template static __device__ __forceinline__ VecD cast(const VecS& v) + { + typedef typename VecTraits::elem_type D; + return VecTraits::make(saturate_cast(v.x), saturate_cast(v.y), saturate_cast(v.z), saturate_cast(v.w)); + } + }; + + template static __device__ __forceinline__ VecD saturate_cast_helper(const VecS& v) + { + return SatCastHelper::cn, VecD>::cast(v); + } +} + +template static __device__ __forceinline__ T saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_helper(v);} + +template static __device__ __forceinline__ T saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_helper(v);} +template static __device__ __forceinline__ T saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_helper(v);} + +// unary operators + +#define CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(op, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a) \ + { \ + return VecTraits::make(op (a.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y), op (a.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a) \ + { \ + return VecTraits::make(op (a.x), op (a.y), op (a.z), op (a.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_OP + +// unary functions + +#define CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(func_name, func, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a) \ + { \ + return VecTraits::make(func (a.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y), func (a.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a) \ + { \ + return VecTraits::make(func (a.x), func (a.y), func (a.z), func (a.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, char, char) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, short, short) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, int, int) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabs, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrt, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::exp, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::log, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cos, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tan, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acos, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atan, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::cosh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acosh, double, double) + +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, char, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, short, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, int, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, float, float) +CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanh, double, double) + +#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC + +// binary operators (vec & vec) + +#define CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(op, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, const input_type ## 1 & b) \ + { \ + return VecTraits::make(a.x op b.x); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, const input_type ## 2 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, const input_type ## 3 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y, a.z op b.z); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, const input_type ## 4 & b) \ + { \ + return VecTraits::make(a.x op b.x, a.y op b.y, a.z op b.z, a.w op b.w); \ + } + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uchar, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, char, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, ushort, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, short, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, char, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, ushort, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, short, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, int, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uint, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, float, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, double, uchar) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uint, uint) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uint, uint) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_OP + +// binary operators (vec & scalar) + +#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(op, input_type, scalar_type, output_type) \ + __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s); \ + } \ + __device__ __forceinline__ output_type ## 1 operator op(scalar_type s, const input_type ## 1 & b) \ + { \ + return VecTraits::make(s op b.x); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s); \ + } \ + __device__ __forceinline__ output_type ## 2 operator op(scalar_type s, const input_type ## 2 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s, a.z op s); \ + } \ + __device__ __forceinline__ output_type ## 3 operator op(scalar_type s, const input_type ## 3 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y, s op b.z); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, scalar_type s) \ + { \ + return VecTraits::make(a.x op s, a.y op s, a.z op s, a.w op s); \ + } \ + __device__ __forceinline__ output_type ## 4 operator op(scalar_type s, const input_type ## 4 & b) \ + { \ + return VecTraits::make(s op b.x, s op b.y, s op b.z, s op b.w); \ + } + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, char, char, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, ushort, ushort, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, short, short, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, int, int, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uint, uint, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, float, float, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, double, double, uchar) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uint, uint, uint) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uint, uint, uint) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uint, uint, uint) + +#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP + +// binary function (vec & vec) + +#define CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(func_name, func, input_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, const input_type ## 1 & b) \ + { \ + return VecTraits::make(func (a.x, b.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, const input_type ## 2 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, const input_type ## 3 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, const input_type ## 4 & b) \ + { \ + return VecTraits::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z), func (a.w, b.w)); \ + } + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmaxf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmax, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uchar, uchar) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, char, char) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, ushort, ushort) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, short, short) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uint, uint) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, int, int) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fminf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fmin, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, char, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, short, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uint, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, int, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypot, double, double) + +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uchar, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, char, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, ushort, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, short, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uint, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, int, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, float, float) +CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2, double, double) + +#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC + +// binary function (vec & scalar) + +#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(func_name, func, input_type, scalar_type, output_type) \ + __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 1 func_name(scalar_type s, const input_type ## 1 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 2 func_name(scalar_type s, const input_type ## 2 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 3 func_name(scalar_type s, const input_type ## 3 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, scalar_type s) \ + { \ + return VecTraits::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s), func ((output_type) a.w, (output_type) s)); \ + } \ + __device__ __forceinline__ output_type ## 4 func_name(scalar_type s, const input_type ## 4 & b) \ + { \ + return VecTraits::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z), func ((output_type) s, (output_type) b.w)); \ + } + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uchar, uchar, uchar) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, char, char, char) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, ushort, ushort, ushort) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, short, short, short) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uint, uint, uint) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, int, int, int) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, double, double, double) + +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uchar, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uchar, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, char, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, char, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, ushort, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, ushort, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, short, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, short, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uint, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uint, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, int, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, int, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, float, float, float) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, float, double, double) +CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double) + +#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC + +}}} // namespace cv { namespace cuda { namespace device + +//! @endcond + +#endif // OPENCV_CUDA_VECMATH_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_traits.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_traits.hpp new file mode 100644 index 00000000..b5ff281a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/vec_traits.hpp @@ -0,0 +1,288 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_VEC_TRAITS_HPP +#define OPENCV_CUDA_VEC_TRAITS_HPP + +#include "common.hpp" + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template struct TypeVec; + + struct __align__(8) uchar8 + { + uchar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7) + { + uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(8) char8 + { + schar a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) + { + char8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) ushort8 + { + ushort a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7) + { + ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(16) short8 + { + short a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7) + { + short8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) uint8 + { + uint a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7) + { + uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) int8 + { + int a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7) + { + int8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct __align__(32) float8 + { + float a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7) + { + float8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + struct double8 + { + double a0, a1, a2, a3, a4, a5, a6, a7; + }; + static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7) + { + double8 val = {a0, a1, a2, a3, a4, a5, a6, a7}; + return val; + } + +#define OPENCV_CUDA_IMPLEMENT_TYPE_VEC(type) \ + template<> struct TypeVec { typedef type vec_type; }; \ + template<> struct TypeVec { typedef type ## 1 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 2 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 3 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 4 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; \ + template<> struct TypeVec { typedef type ## 8 vec_type; }; + + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uchar) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(char) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(ushort) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(short) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(int) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(uint) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(float) + OPENCV_CUDA_IMPLEMENT_TYPE_VEC(double) + + #undef OPENCV_CUDA_IMPLEMENT_TYPE_VEC + + template<> struct TypeVec { typedef schar vec_type; }; + template<> struct TypeVec { typedef char2 vec_type; }; + template<> struct TypeVec { typedef char3 vec_type; }; + template<> struct TypeVec { typedef char4 vec_type; }; + template<> struct TypeVec { typedef char8 vec_type; }; + + template<> struct TypeVec { typedef uchar vec_type; }; + template<> struct TypeVec { typedef uchar2 vec_type; }; + template<> struct TypeVec { typedef uchar3 vec_type; }; + template<> struct TypeVec { typedef uchar4 vec_type; }; + template<> struct TypeVec { typedef uchar8 vec_type; }; + + template struct VecTraits; + +#define OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(type) \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type all(type v) {return v;} \ + static __device__ __host__ __forceinline__ type make(type x) {return x;} \ + static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=1}; \ + static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \ + static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \ + static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=2}; \ + static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \ + static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \ + static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=3}; \ + static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \ + static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=4}; \ + static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \ + static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \ + }; \ + template<> struct VecTraits \ + { \ + typedef type elem_type; \ + enum {cn=8}; \ + static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \ + static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \ + static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \ + }; + + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uchar) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(ushort) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(short) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(int) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(uint) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(float) + OPENCV_CUDA_IMPLEMENT_VEC_TRAITS(double) + + #undef OPENCV_CUDA_IMPLEMENT_VEC_TRAITS + + template<> struct VecTraits + { + typedef char elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char all(char v) {return v;} + static __device__ __host__ __forceinline__ char make(char x) {return x;} + static __device__ __host__ __forceinline__ char make(const char* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ schar all(schar v) {return v;} + static __device__ __host__ __forceinline__ schar make(schar x) {return x;} + static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=1}; + static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);} + static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);} + static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=2}; + static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);} + static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);} + static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=3}; + static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);} + static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);} + static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=4}; + static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);} + static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);} + static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);} + }; + template<> struct VecTraits + { + typedef schar elem_type; + enum {cn=8}; + static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);} + static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);} + static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} + }; +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif // OPENCV_CUDA_VEC_TRAITS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp.hpp new file mode 100644 index 00000000..8af7e6a2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp.hpp @@ -0,0 +1,139 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_DEVICE_WARP_HPP +#define OPENCV_CUDA_DEVICE_WARP_HPP + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + struct Warp + { + enum + { + LOG_WARP_SIZE = 5, + WARP_SIZE = 1 << LOG_WARP_SIZE, + STRIDE = WARP_SIZE + }; + + /** \brief Returns the warp lane ID of the calling thread. */ + static __device__ __forceinline__ unsigned int laneId() + { + unsigned int ret; + asm("mov.u32 %0, %%laneid;" : "=r"(ret) ); + return ret; + } + + template + static __device__ __forceinline__ void fill(It beg, It end, const T& value) + { + for(It t = beg + laneId(); t < end; t += STRIDE) + *t = value; + } + + template + static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = *t; + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op) + { + for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE) + *out = op(*t); + return out; + } + + template + static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op) + { + unsigned int lane = laneId(); + + InIt1 t1 = beg1 + lane; + InIt2 t2 = beg2 + lane; + for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE) + *out = op(*t1, *t2); + return out; + } + + template + static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op) + { + const unsigned int lane = laneId(); + + if (lane < 16) + { + T partial = ptr[lane]; + + ptr[lane] = partial = op(partial, ptr[lane + 16]); + ptr[lane] = partial = op(partial, ptr[lane + 8]); + ptr[lane] = partial = op(partial, ptr[lane + 4]); + ptr[lane] = partial = op(partial, ptr[lane + 2]); + ptr[lane] = partial = op(partial, ptr[lane + 1]); + } + + return *ptr; + } + + template + static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value) + { + unsigned int lane = laneId(); + value += lane; + + for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE) + *t = value; + } + }; +}}} // namespace cv { namespace cuda { namespace cudev + +//! @endcond + +#endif /* OPENCV_CUDA_DEVICE_WARP_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_reduce.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_reduce.hpp new file mode 100644 index 00000000..530303d2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_reduce.hpp @@ -0,0 +1,76 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_WARP_REDUCE_HPP__ +#define OPENCV_CUDA_WARP_REDUCE_HPP__ + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ + template + __device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x) + { + const unsigned int lane = tid & 31; // index of thread in warp (0..31) + + if (lane < 16) + { + T partial = ptr[tid]; + + ptr[tid] = partial = partial + ptr[tid + 16]; + ptr[tid] = partial = partial + ptr[tid + 8]; + ptr[tid] = partial = partial + ptr[tid + 4]; + ptr[tid] = partial = partial + ptr[tid + 2]; + ptr[tid] = partial = partial + ptr[tid + 1]; + } + + return ptr[tid - lane]; + } +}}} // namespace cv { namespace cuda { namespace cudev { + +//! @endcond + +#endif /* OPENCV_CUDA_WARP_REDUCE_HPP__ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_shuffle.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_shuffle.hpp new file mode 100644 index 00000000..0da54aee --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda/warp_shuffle.hpp @@ -0,0 +1,162 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CUDA_WARP_SHUFFLE_HPP +#define OPENCV_CUDA_WARP_SHUFFLE_HPP + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +namespace cv { namespace cuda { namespace device +{ +#if __CUDACC_VER_MAJOR__ >= 9 +# define __shfl(x, y, z) __shfl_sync(0xFFFFFFFFU, x, y, z) +# define __shfl_up(x, y, z) __shfl_up_sync(0xFFFFFFFFU, x, y, z) +# define __shfl_down(x, y, z) __shfl_down_sync(0xFFFFFFFFU, x, y, z) +#endif + template + __device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return __shfl(val, srcLane, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl((int) val, srcLane, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl(lo, srcLane, width); + hi = __shfl(hi, srcLane, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } + + template + __device__ __forceinline__ T shfl_down(T val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return __shfl_down(val, delta, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_down((int) val, delta, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl_down(lo, delta, width); + hi = __shfl_down(hi, delta, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } + + template + __device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return __shfl_up(val, delta, width); + #else + return T(); + #endif + } + __device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + return (unsigned int) __shfl_up((int) val, delta, width); + #else + return 0; + #endif + } + __device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize) + { + #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 + int lo = __double2loint(val); + int hi = __double2hiint(val); + + lo = __shfl_up(lo, delta, width); + hi = __shfl_up(hi, delta, width); + + return __hiloint2double(hi, lo); + #else + return 0.0; + #endif + } +}}} + +# undef __shfl +# undef __shfl_up +# undef __shfl_down + +//! @endcond + +#endif // OPENCV_CUDA_WARP_SHUFFLE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_stream_accessor.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_stream_accessor.hpp new file mode 100644 index 00000000..deaf356f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_stream_accessor.hpp @@ -0,0 +1,86 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP +#define OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP + +#ifndef __cplusplus +# error cuda_stream_accessor.hpp header must be compiled as C++ +#endif + +/** @file cuda_stream_accessor.hpp + * This is only header file that depends on CUDA Runtime API. All other headers are independent. + */ + +#include +#include "opencv2/core/cuda.hpp" + +namespace cv +{ + namespace cuda + { + +//! @addtogroup cudacore_struct +//! @{ + + /** @brief Class that enables getting cudaStream_t from cuda::Stream + */ + struct StreamAccessor + { + CV_EXPORTS static cudaStream_t getStream(const Stream& stream); + CV_EXPORTS static Stream wrapStream(cudaStream_t stream); + }; + + /** @brief Class that enables getting cudaEvent_t from cuda::Event + */ + struct EventAccessor + { + CV_EXPORTS static cudaEvent_t getEvent(const Event& event); + CV_EXPORTS static Event wrapEvent(cudaEvent_t event); + }; + +//! @} + + } +} + +#endif /* OPENCV_CORE_CUDA_STREAM_ACCESSOR_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_types.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_types.hpp new file mode 100644 index 00000000..b33f0617 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cuda_types.hpp @@ -0,0 +1,144 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CUDA_TYPES_HPP +#define OPENCV_CORE_CUDA_TYPES_HPP + +#ifndef __cplusplus +# error cuda_types.hpp header must be compiled as C++ +#endif + +#if defined(__OPENCV_BUILD) && defined(__clang__) +#pragma clang diagnostic ignored "-Winconsistent-missing-override" +#endif +#if defined(__OPENCV_BUILD) && defined(__GNUC__) && __GNUC__ >= 5 +#pragma GCC diagnostic ignored "-Wsuggest-override" +#endif + +/** @file + * @deprecated Use @ref cudev instead. + */ + +//! @cond IGNORED + +#ifdef __CUDACC__ + #define __CV_CUDA_HOST_DEVICE__ __host__ __device__ __forceinline__ +#else + #define __CV_CUDA_HOST_DEVICE__ +#endif + +namespace cv +{ + namespace cuda + { + + // Simple lightweight structures that encapsulates information about an image on device. + // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile + + template struct DevPtr + { + typedef T elem_type; + typedef int index_type; + + enum { elem_size = sizeof(elem_type) }; + + T* data; + + __CV_CUDA_HOST_DEVICE__ DevPtr() : data(0) {} + __CV_CUDA_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {} + + __CV_CUDA_HOST_DEVICE__ size_t elemSize() const { return elem_size; } + __CV_CUDA_HOST_DEVICE__ operator T*() { return data; } + __CV_CUDA_HOST_DEVICE__ operator const T*() const { return data; } + }; + + template struct PtrSz : public DevPtr + { + __CV_CUDA_HOST_DEVICE__ PtrSz() : size(0) {} + __CV_CUDA_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr(data_), size(size_) {} + + size_t size; + }; + + template struct PtrStep : public DevPtr + { + __CV_CUDA_HOST_DEVICE__ PtrStep() : step(0) {} + __CV_CUDA_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr(data_), step(step_) {} + + size_t step; + + __CV_CUDA_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)(((DevPtr*)this)->data) + y * step); } + __CV_CUDA_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)(((DevPtr*)this)->data) + y * step); } + + __CV_CUDA_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; } + __CV_CUDA_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } + }; + + template struct PtrStepSz : public PtrStep + { + __CV_CUDA_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {} + __CV_CUDA_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_) + : PtrStep(data_, step_), cols(cols_), rows(rows_) {} + + template + explicit PtrStepSz(const PtrStepSz& d) : PtrStep((T*)d.data, d.step), cols(d.cols), rows(d.rows){} + + int cols; + int rows; + }; + + typedef PtrStepSz PtrStepSzb; + typedef PtrStepSz PtrStepSzus; + typedef PtrStepSz PtrStepSzf; + typedef PtrStepSz PtrStepSzi; + + typedef PtrStep PtrStepb; + typedef PtrStep PtrStepus; + typedef PtrStep PtrStepf; + typedef PtrStep PtrStepi; + + } +} + +//! @endcond + +#endif /* OPENCV_CORE_CUDA_TYPES_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_dispatch.h b/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_dispatch.h new file mode 100644 index 00000000..540fbb60 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_dispatch.h @@ -0,0 +1,350 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#if defined __OPENCV_BUILD \ + +#include "cv_cpu_config.h" +#include "cv_cpu_helper.h" + +#ifdef CV_CPU_DISPATCH_MODE +#define CV_CPU_OPTIMIZATION_NAMESPACE __CV_CAT(opt_, CV_CPU_DISPATCH_MODE) +#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace __CV_CAT(opt_, CV_CPU_DISPATCH_MODE) { +#define CV_CPU_OPTIMIZATION_NAMESPACE_END } +#else +#define CV_CPU_OPTIMIZATION_NAMESPACE cpu_baseline +#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace cpu_baseline { +#define CV_CPU_OPTIMIZATION_NAMESPACE_END } +#define CV_CPU_BASELINE_MODE 1 +#endif + + +#define __CV_CPU_DISPATCH_CHAIN_END(fn, args, mode, ...) /* done */ +#define __CV_CPU_DISPATCH(fn, args, mode, ...) __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) +#define __CV_CPU_DISPATCH_EXPAND(fn, args, ...) __CV_EXPAND(__CV_CPU_DISPATCH(fn, args, __VA_ARGS__)) +#define CV_CPU_DISPATCH(fn, args, ...) __CV_CPU_DISPATCH_EXPAND(fn, args, __VA_ARGS__, END) // expand macros + + +#if defined CV_ENABLE_INTRINSICS \ + && !defined CV_DISABLE_OPTIMIZATION \ + && !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */ \ + +#ifdef CV_CPU_COMPILE_SSE2 +# include +# define CV_MMX 1 +# define CV_SSE 1 +# define CV_SSE2 1 +#endif +#ifdef CV_CPU_COMPILE_SSE3 +# include +# define CV_SSE3 1 +#endif +#ifdef CV_CPU_COMPILE_SSSE3 +# include +# define CV_SSSE3 1 +#endif +#ifdef CV_CPU_COMPILE_SSE4_1 +# include +# define CV_SSE4_1 1 +#endif +#ifdef CV_CPU_COMPILE_SSE4_2 +# include +# define CV_SSE4_2 1 +#endif +#ifdef CV_CPU_COMPILE_POPCNT +# ifdef _MSC_VER +# include +# if defined(_M_X64) +# define CV_POPCNT_U64 _mm_popcnt_u64 +# endif +# define CV_POPCNT_U32 _mm_popcnt_u32 +# else +# include +# if defined(__x86_64__) +# define CV_POPCNT_U64 __builtin_popcountll +# endif +# define CV_POPCNT_U32 __builtin_popcount +# endif +# define CV_POPCNT 1 +#endif +#ifdef CV_CPU_COMPILE_AVX +# include +# define CV_AVX 1 +#endif +#ifdef CV_CPU_COMPILE_FP16 +# if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || defined(_M_ARM64) +# include +# else +# include +# endif +# define CV_FP16 1 +#endif +#ifdef CV_CPU_COMPILE_AVX2 +# include +# define CV_AVX2 1 +#endif +#ifdef CV_CPU_COMPILE_AVX_512F +# include +# define CV_AVX_512F 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_COMMON +# define CV_AVX512_COMMON 1 +# define CV_AVX_512CD 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_KNL +# define CV_AVX512_KNL 1 +# define CV_AVX_512ER 1 +# define CV_AVX_512PF 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_KNM +# define CV_AVX512_KNM 1 +# define CV_AVX_5124FMAPS 1 +# define CV_AVX_5124VNNIW 1 +# define CV_AVX_512VPOPCNTDQ 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_SKX +# define CV_AVX512_SKX 1 +# define CV_AVX_512VL 1 +# define CV_AVX_512BW 1 +# define CV_AVX_512DQ 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_CNL +# define CV_AVX512_CNL 1 +# define CV_AVX_512IFMA 1 +# define CV_AVX_512VBMI 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_CLX +# define CV_AVX512_CLX 1 +# define CV_AVX_512VNNI 1 +#endif +#ifdef CV_CPU_COMPILE_AVX512_ICL +# define CV_AVX512_ICL 1 +# undef CV_AVX_512IFMA +# define CV_AVX_512IFMA 1 +# undef CV_AVX_512VBMI +# define CV_AVX_512VBMI 1 +# undef CV_AVX_512VNNI +# define CV_AVX_512VNNI 1 +# define CV_AVX_512VBMI2 1 +# define CV_AVX_512BITALG 1 +# define CV_AVX_512VPOPCNTDQ 1 +#endif +#ifdef CV_CPU_COMPILE_FMA3 +# define CV_FMA3 1 +#endif + +#if defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64)) && (defined(CV_CPU_COMPILE_NEON) || !defined(_MSC_VER)) +# include +# include +# define CV_NEON 1 +#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__)) +# include +# define CV_NEON 1 +#endif + +#if defined(__ARM_NEON__) || defined(__aarch64__) +# include +#endif + +#ifdef CV_CPU_COMPILE_VSX +# include +# undef vector +# undef pixel +# undef bool +# define CV_VSX 1 +#endif + +#ifdef CV_CPU_COMPILE_VSX3 +# define CV_VSX3 1 +#endif + +#ifdef CV_CPU_COMPILE_MSA +# include "hal/msa_macros.h" +# define CV_MSA 1 +#endif + +#ifdef __EMSCRIPTEN__ +# define CV_WASM_SIMD 1 +# include +#endif + +#endif // CV_ENABLE_INTRINSICS && !CV_DISABLE_OPTIMIZATION && !__CUDACC__ + +#if defined CV_CPU_COMPILE_AVX && !defined CV_CPU_BASELINE_COMPILE_AVX +struct VZeroUpperGuard { +#ifdef __GNUC__ + __attribute__((always_inline)) +#endif + inline VZeroUpperGuard() { _mm256_zeroupper(); } +#ifdef __GNUC__ + __attribute__((always_inline)) +#endif + inline ~VZeroUpperGuard() { _mm256_zeroupper(); } +}; +#define __CV_AVX_GUARD VZeroUpperGuard __vzeroupper_guard; CV_UNUSED(__vzeroupper_guard); +#endif + +#ifdef __CV_AVX_GUARD +#define CV_AVX_GUARD __CV_AVX_GUARD +#else +#define CV_AVX_GUARD +#endif + +#endif // __OPENCV_BUILD + + + +#if !defined __OPENCV_BUILD /* Compatibility code */ \ + && !defined __CUDACC__ /* do not include SSE/AVX/NEON headers for NVCC compiler */ +#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2) +# include +# define CV_MMX 1 +# define CV_SSE 1 +# define CV_SSE2 1 +#elif defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64)) && (defined(CV_CPU_COMPILE_NEON) || !defined(_MSC_VER)) +# include +# include +# define CV_NEON 1 +#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__)) +# include +# define CV_NEON 1 +#elif defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +# include +# undef vector +# undef pixel +# undef bool +# define CV_VSX 1 +#endif + +#ifdef __F16C__ +# include +# define CV_FP16 1 +#endif + +#endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code) + + + +#ifndef CV_MMX +# define CV_MMX 0 +#endif +#ifndef CV_SSE +# define CV_SSE 0 +#endif +#ifndef CV_SSE2 +# define CV_SSE2 0 +#endif +#ifndef CV_SSE3 +# define CV_SSE3 0 +#endif +#ifndef CV_SSSE3 +# define CV_SSSE3 0 +#endif +#ifndef CV_SSE4_1 +# define CV_SSE4_1 0 +#endif +#ifndef CV_SSE4_2 +# define CV_SSE4_2 0 +#endif +#ifndef CV_POPCNT +# define CV_POPCNT 0 +#endif +#ifndef CV_AVX +# define CV_AVX 0 +#endif +#ifndef CV_FP16 +# define CV_FP16 0 +#endif +#ifndef CV_AVX2 +# define CV_AVX2 0 +#endif +#ifndef CV_FMA3 +# define CV_FMA3 0 +#endif +#ifndef CV_AVX_512F +# define CV_AVX_512F 0 +#endif +#ifndef CV_AVX_512BW +# define CV_AVX_512BW 0 +#endif +#ifndef CV_AVX_512CD +# define CV_AVX_512CD 0 +#endif +#ifndef CV_AVX_512DQ +# define CV_AVX_512DQ 0 +#endif +#ifndef CV_AVX_512ER +# define CV_AVX_512ER 0 +#endif +#ifndef CV_AVX_512IFMA +# define CV_AVX_512IFMA 0 +#endif +#define CV_AVX_512IFMA512 CV_AVX_512IFMA // deprecated +#ifndef CV_AVX_512PF +# define CV_AVX_512PF 0 +#endif +#ifndef CV_AVX_512VBMI +# define CV_AVX_512VBMI 0 +#endif +#ifndef CV_AVX_512VL +# define CV_AVX_512VL 0 +#endif +#ifndef CV_AVX_5124FMAPS +# define CV_AVX_5124FMAPS 0 +#endif +#ifndef CV_AVX_5124VNNIW +# define CV_AVX_5124VNNIW 0 +#endif +#ifndef CV_AVX_512VPOPCNTDQ +# define CV_AVX_512VPOPCNTDQ 0 +#endif +#ifndef CV_AVX_512VNNI +# define CV_AVX_512VNNI 0 +#endif +#ifndef CV_AVX_512VBMI2 +# define CV_AVX_512VBMI2 0 +#endif +#ifndef CV_AVX_512BITALG +# define CV_AVX_512BITALG 0 +#endif +#ifndef CV_AVX512_COMMON +# define CV_AVX512_COMMON 0 +#endif +#ifndef CV_AVX512_KNL +# define CV_AVX512_KNL 0 +#endif +#ifndef CV_AVX512_KNM +# define CV_AVX512_KNM 0 +#endif +#ifndef CV_AVX512_SKX +# define CV_AVX512_SKX 0 +#endif +#ifndef CV_AVX512_CNL +# define CV_AVX512_CNL 0 +#endif +#ifndef CV_AVX512_CLX +# define CV_AVX512_CLX 0 +#endif +#ifndef CV_AVX512_ICL +# define CV_AVX512_ICL 0 +#endif + +#ifndef CV_NEON +# define CV_NEON 0 +#endif + +#ifndef CV_VSX +# define CV_VSX 0 +#endif + +#ifndef CV_VSX3 +# define CV_VSX3 0 +#endif + +#ifndef CV_MSA +# define CV_MSA 0 +#endif + +#ifndef CV_WASM_SIMD +# define CV_WASM_SIMD 0 +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_helper.h b/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_helper.h new file mode 100644 index 00000000..aaa89ed4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cv_cpu_helper.h @@ -0,0 +1,487 @@ +// AUTOGENERATED, DO NOT EDIT + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE +# define CV_TRY_SSE 1 +# define CV_CPU_FORCE_SSE 1 +# define CV_CPU_HAS_SUPPORT_SSE 1 +# define CV_CPU_CALL_SSE(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSE_(fn, args) return (opt_SSE::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE +# define CV_TRY_SSE 1 +# define CV_CPU_FORCE_SSE 0 +# define CV_CPU_HAS_SUPPORT_SSE (cv::checkHardwareSupport(CV_CPU_SSE)) +# define CV_CPU_CALL_SSE(fn, args) if (CV_CPU_HAS_SUPPORT_SSE) return (opt_SSE::fn args) +# define CV_CPU_CALL_SSE_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE) return (opt_SSE::fn args) +#else +# define CV_TRY_SSE 0 +# define CV_CPU_FORCE_SSE 0 +# define CV_CPU_HAS_SUPPORT_SSE 0 +# define CV_CPU_CALL_SSE(fn, args) +# define CV_CPU_CALL_SSE_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSE(fn, args, mode, ...) CV_CPU_CALL_SSE(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE2 +# define CV_TRY_SSE2 1 +# define CV_CPU_FORCE_SSE2 1 +# define CV_CPU_HAS_SUPPORT_SSE2 1 +# define CV_CPU_CALL_SSE2(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSE2_(fn, args) return (opt_SSE2::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE2 +# define CV_TRY_SSE2 1 +# define CV_CPU_FORCE_SSE2 0 +# define CV_CPU_HAS_SUPPORT_SSE2 (cv::checkHardwareSupport(CV_CPU_SSE2)) +# define CV_CPU_CALL_SSE2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE2) return (opt_SSE2::fn args) +# define CV_CPU_CALL_SSE2_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE2) return (opt_SSE2::fn args) +#else +# define CV_TRY_SSE2 0 +# define CV_CPU_FORCE_SSE2 0 +# define CV_CPU_HAS_SUPPORT_SSE2 0 +# define CV_CPU_CALL_SSE2(fn, args) +# define CV_CPU_CALL_SSE2_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSE2(fn, args, mode, ...) CV_CPU_CALL_SSE2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE3 +# define CV_TRY_SSE3 1 +# define CV_CPU_FORCE_SSE3 1 +# define CV_CPU_HAS_SUPPORT_SSE3 1 +# define CV_CPU_CALL_SSE3(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSE3_(fn, args) return (opt_SSE3::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE3 +# define CV_TRY_SSE3 1 +# define CV_CPU_FORCE_SSE3 0 +# define CV_CPU_HAS_SUPPORT_SSE3 (cv::checkHardwareSupport(CV_CPU_SSE3)) +# define CV_CPU_CALL_SSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSE3) return (opt_SSE3::fn args) +# define CV_CPU_CALL_SSE3_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE3) return (opt_SSE3::fn args) +#else +# define CV_TRY_SSE3 0 +# define CV_CPU_FORCE_SSE3 0 +# define CV_CPU_HAS_SUPPORT_SSE3 0 +# define CV_CPU_CALL_SSE3(fn, args) +# define CV_CPU_CALL_SSE3_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSE3(fn, args, mode, ...) CV_CPU_CALL_SSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSSE3 +# define CV_TRY_SSSE3 1 +# define CV_CPU_FORCE_SSSE3 1 +# define CV_CPU_HAS_SUPPORT_SSSE3 1 +# define CV_CPU_CALL_SSSE3(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSSE3_(fn, args) return (opt_SSSE3::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSSE3 +# define CV_TRY_SSSE3 1 +# define CV_CPU_FORCE_SSSE3 0 +# define CV_CPU_HAS_SUPPORT_SSSE3 (cv::checkHardwareSupport(CV_CPU_SSSE3)) +# define CV_CPU_CALL_SSSE3(fn, args) if (CV_CPU_HAS_SUPPORT_SSSE3) return (opt_SSSE3::fn args) +# define CV_CPU_CALL_SSSE3_(fn, args) if (CV_CPU_HAS_SUPPORT_SSSE3) return (opt_SSSE3::fn args) +#else +# define CV_TRY_SSSE3 0 +# define CV_CPU_FORCE_SSSE3 0 +# define CV_CPU_HAS_SUPPORT_SSSE3 0 +# define CV_CPU_CALL_SSSE3(fn, args) +# define CV_CPU_CALL_SSSE3_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSSE3(fn, args, mode, ...) CV_CPU_CALL_SSSE3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_1 +# define CV_TRY_SSE4_1 1 +# define CV_CPU_FORCE_SSE4_1 1 +# define CV_CPU_HAS_SUPPORT_SSE4_1 1 +# define CV_CPU_CALL_SSE4_1(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSE4_1_(fn, args) return (opt_SSE4_1::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_1 +# define CV_TRY_SSE4_1 1 +# define CV_CPU_FORCE_SSE4_1 0 +# define CV_CPU_HAS_SUPPORT_SSE4_1 (cv::checkHardwareSupport(CV_CPU_SSE4_1)) +# define CV_CPU_CALL_SSE4_1(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_1) return (opt_SSE4_1::fn args) +# define CV_CPU_CALL_SSE4_1_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_1) return (opt_SSE4_1::fn args) +#else +# define CV_TRY_SSE4_1 0 +# define CV_CPU_FORCE_SSE4_1 0 +# define CV_CPU_HAS_SUPPORT_SSE4_1 0 +# define CV_CPU_CALL_SSE4_1(fn, args) +# define CV_CPU_CALL_SSE4_1_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSE4_1(fn, args, mode, ...) CV_CPU_CALL_SSE4_1(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_SSE4_2 +# define CV_TRY_SSE4_2 1 +# define CV_CPU_FORCE_SSE4_2 1 +# define CV_CPU_HAS_SUPPORT_SSE4_2 1 +# define CV_CPU_CALL_SSE4_2(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_SSE4_2_(fn, args) return (opt_SSE4_2::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_SSE4_2 +# define CV_TRY_SSE4_2 1 +# define CV_CPU_FORCE_SSE4_2 0 +# define CV_CPU_HAS_SUPPORT_SSE4_2 (cv::checkHardwareSupport(CV_CPU_SSE4_2)) +# define CV_CPU_CALL_SSE4_2(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_2) return (opt_SSE4_2::fn args) +# define CV_CPU_CALL_SSE4_2_(fn, args) if (CV_CPU_HAS_SUPPORT_SSE4_2) return (opt_SSE4_2::fn args) +#else +# define CV_TRY_SSE4_2 0 +# define CV_CPU_FORCE_SSE4_2 0 +# define CV_CPU_HAS_SUPPORT_SSE4_2 0 +# define CV_CPU_CALL_SSE4_2(fn, args) +# define CV_CPU_CALL_SSE4_2_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_SSE4_2(fn, args, mode, ...) CV_CPU_CALL_SSE4_2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_POPCNT +# define CV_TRY_POPCNT 1 +# define CV_CPU_FORCE_POPCNT 1 +# define CV_CPU_HAS_SUPPORT_POPCNT 1 +# define CV_CPU_CALL_POPCNT(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_POPCNT_(fn, args) return (opt_POPCNT::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_POPCNT +# define CV_TRY_POPCNT 1 +# define CV_CPU_FORCE_POPCNT 0 +# define CV_CPU_HAS_SUPPORT_POPCNT (cv::checkHardwareSupport(CV_CPU_POPCNT)) +# define CV_CPU_CALL_POPCNT(fn, args) if (CV_CPU_HAS_SUPPORT_POPCNT) return (opt_POPCNT::fn args) +# define CV_CPU_CALL_POPCNT_(fn, args) if (CV_CPU_HAS_SUPPORT_POPCNT) return (opt_POPCNT::fn args) +#else +# define CV_TRY_POPCNT 0 +# define CV_CPU_FORCE_POPCNT 0 +# define CV_CPU_HAS_SUPPORT_POPCNT 0 +# define CV_CPU_CALL_POPCNT(fn, args) +# define CV_CPU_CALL_POPCNT_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_POPCNT(fn, args, mode, ...) CV_CPU_CALL_POPCNT(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX +# define CV_TRY_AVX 1 +# define CV_CPU_FORCE_AVX 1 +# define CV_CPU_HAS_SUPPORT_AVX 1 +# define CV_CPU_CALL_AVX(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX_(fn, args) return (opt_AVX::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX +# define CV_TRY_AVX 1 +# define CV_CPU_FORCE_AVX 0 +# define CV_CPU_HAS_SUPPORT_AVX (cv::checkHardwareSupport(CV_CPU_AVX)) +# define CV_CPU_CALL_AVX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX) return (opt_AVX::fn args) +# define CV_CPU_CALL_AVX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX) return (opt_AVX::fn args) +#else +# define CV_TRY_AVX 0 +# define CV_CPU_FORCE_AVX 0 +# define CV_CPU_HAS_SUPPORT_AVX 0 +# define CV_CPU_CALL_AVX(fn, args) +# define CV_CPU_CALL_AVX_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX(fn, args, mode, ...) CV_CPU_CALL_AVX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FP16 +# define CV_TRY_FP16 1 +# define CV_CPU_FORCE_FP16 1 +# define CV_CPU_HAS_SUPPORT_FP16 1 +# define CV_CPU_CALL_FP16(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_FP16_(fn, args) return (opt_FP16::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FP16 +# define CV_TRY_FP16 1 +# define CV_CPU_FORCE_FP16 0 +# define CV_CPU_HAS_SUPPORT_FP16 (cv::checkHardwareSupport(CV_CPU_FP16)) +# define CV_CPU_CALL_FP16(fn, args) if (CV_CPU_HAS_SUPPORT_FP16) return (opt_FP16::fn args) +# define CV_CPU_CALL_FP16_(fn, args) if (CV_CPU_HAS_SUPPORT_FP16) return (opt_FP16::fn args) +#else +# define CV_TRY_FP16 0 +# define CV_CPU_FORCE_FP16 0 +# define CV_CPU_HAS_SUPPORT_FP16 0 +# define CV_CPU_CALL_FP16(fn, args) +# define CV_CPU_CALL_FP16_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_FP16(fn, args, mode, ...) CV_CPU_CALL_FP16(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX2 +# define CV_TRY_AVX2 1 +# define CV_CPU_FORCE_AVX2 1 +# define CV_CPU_HAS_SUPPORT_AVX2 1 +# define CV_CPU_CALL_AVX2(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX2_(fn, args) return (opt_AVX2::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX2 +# define CV_TRY_AVX2 1 +# define CV_CPU_FORCE_AVX2 0 +# define CV_CPU_HAS_SUPPORT_AVX2 (cv::checkHardwareSupport(CV_CPU_AVX2)) +# define CV_CPU_CALL_AVX2(fn, args) if (CV_CPU_HAS_SUPPORT_AVX2) return (opt_AVX2::fn args) +# define CV_CPU_CALL_AVX2_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX2) return (opt_AVX2::fn args) +#else +# define CV_TRY_AVX2 0 +# define CV_CPU_FORCE_AVX2 0 +# define CV_CPU_HAS_SUPPORT_AVX2 0 +# define CV_CPU_CALL_AVX2(fn, args) +# define CV_CPU_CALL_AVX2_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX2(fn, args, mode, ...) CV_CPU_CALL_AVX2(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_FMA3 +# define CV_TRY_FMA3 1 +# define CV_CPU_FORCE_FMA3 1 +# define CV_CPU_HAS_SUPPORT_FMA3 1 +# define CV_CPU_CALL_FMA3(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_FMA3_(fn, args) return (opt_FMA3::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_FMA3 +# define CV_TRY_FMA3 1 +# define CV_CPU_FORCE_FMA3 0 +# define CV_CPU_HAS_SUPPORT_FMA3 (cv::checkHardwareSupport(CV_CPU_FMA3)) +# define CV_CPU_CALL_FMA3(fn, args) if (CV_CPU_HAS_SUPPORT_FMA3) return (opt_FMA3::fn args) +# define CV_CPU_CALL_FMA3_(fn, args) if (CV_CPU_HAS_SUPPORT_FMA3) return (opt_FMA3::fn args) +#else +# define CV_TRY_FMA3 0 +# define CV_CPU_FORCE_FMA3 0 +# define CV_CPU_HAS_SUPPORT_FMA3 0 +# define CV_CPU_CALL_FMA3(fn, args) +# define CV_CPU_CALL_FMA3_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_FMA3(fn, args, mode, ...) CV_CPU_CALL_FMA3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX_512F +# define CV_TRY_AVX_512F 1 +# define CV_CPU_FORCE_AVX_512F 1 +# define CV_CPU_HAS_SUPPORT_AVX_512F 1 +# define CV_CPU_CALL_AVX_512F(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX_512F_(fn, args) return (opt_AVX_512F::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX_512F +# define CV_TRY_AVX_512F 1 +# define CV_CPU_FORCE_AVX_512F 0 +# define CV_CPU_HAS_SUPPORT_AVX_512F (cv::checkHardwareSupport(CV_CPU_AVX_512F)) +# define CV_CPU_CALL_AVX_512F(fn, args) if (CV_CPU_HAS_SUPPORT_AVX_512F) return (opt_AVX_512F::fn args) +# define CV_CPU_CALL_AVX_512F_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX_512F) return (opt_AVX_512F::fn args) +#else +# define CV_TRY_AVX_512F 0 +# define CV_CPU_FORCE_AVX_512F 0 +# define CV_CPU_HAS_SUPPORT_AVX_512F 0 +# define CV_CPU_CALL_AVX_512F(fn, args) +# define CV_CPU_CALL_AVX_512F_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX_512F(fn, args, mode, ...) CV_CPU_CALL_AVX_512F(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_COMMON +# define CV_TRY_AVX512_COMMON 1 +# define CV_CPU_FORCE_AVX512_COMMON 1 +# define CV_CPU_HAS_SUPPORT_AVX512_COMMON 1 +# define CV_CPU_CALL_AVX512_COMMON(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_COMMON_(fn, args) return (opt_AVX512_COMMON::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_COMMON +# define CV_TRY_AVX512_COMMON 1 +# define CV_CPU_FORCE_AVX512_COMMON 0 +# define CV_CPU_HAS_SUPPORT_AVX512_COMMON (cv::checkHardwareSupport(CV_CPU_AVX512_COMMON)) +# define CV_CPU_CALL_AVX512_COMMON(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_COMMON) return (opt_AVX512_COMMON::fn args) +# define CV_CPU_CALL_AVX512_COMMON_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_COMMON) return (opt_AVX512_COMMON::fn args) +#else +# define CV_TRY_AVX512_COMMON 0 +# define CV_CPU_FORCE_AVX512_COMMON 0 +# define CV_CPU_HAS_SUPPORT_AVX512_COMMON 0 +# define CV_CPU_CALL_AVX512_COMMON(fn, args) +# define CV_CPU_CALL_AVX512_COMMON_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_COMMON(fn, args, mode, ...) CV_CPU_CALL_AVX512_COMMON(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_KNL +# define CV_TRY_AVX512_KNL 1 +# define CV_CPU_FORCE_AVX512_KNL 1 +# define CV_CPU_HAS_SUPPORT_AVX512_KNL 1 +# define CV_CPU_CALL_AVX512_KNL(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_KNL_(fn, args) return (opt_AVX512_KNL::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_KNL +# define CV_TRY_AVX512_KNL 1 +# define CV_CPU_FORCE_AVX512_KNL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_KNL (cv::checkHardwareSupport(CV_CPU_AVX512_KNL)) +# define CV_CPU_CALL_AVX512_KNL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNL) return (opt_AVX512_KNL::fn args) +# define CV_CPU_CALL_AVX512_KNL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNL) return (opt_AVX512_KNL::fn args) +#else +# define CV_TRY_AVX512_KNL 0 +# define CV_CPU_FORCE_AVX512_KNL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_KNL 0 +# define CV_CPU_CALL_AVX512_KNL(fn, args) +# define CV_CPU_CALL_AVX512_KNL_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_KNL(fn, args, mode, ...) CV_CPU_CALL_AVX512_KNL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_KNM +# define CV_TRY_AVX512_KNM 1 +# define CV_CPU_FORCE_AVX512_KNM 1 +# define CV_CPU_HAS_SUPPORT_AVX512_KNM 1 +# define CV_CPU_CALL_AVX512_KNM(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_KNM_(fn, args) return (opt_AVX512_KNM::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_KNM +# define CV_TRY_AVX512_KNM 1 +# define CV_CPU_FORCE_AVX512_KNM 0 +# define CV_CPU_HAS_SUPPORT_AVX512_KNM (cv::checkHardwareSupport(CV_CPU_AVX512_KNM)) +# define CV_CPU_CALL_AVX512_KNM(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNM) return (opt_AVX512_KNM::fn args) +# define CV_CPU_CALL_AVX512_KNM_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_KNM) return (opt_AVX512_KNM::fn args) +#else +# define CV_TRY_AVX512_KNM 0 +# define CV_CPU_FORCE_AVX512_KNM 0 +# define CV_CPU_HAS_SUPPORT_AVX512_KNM 0 +# define CV_CPU_CALL_AVX512_KNM(fn, args) +# define CV_CPU_CALL_AVX512_KNM_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_KNM(fn, args, mode, ...) CV_CPU_CALL_AVX512_KNM(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_SKX +# define CV_TRY_AVX512_SKX 1 +# define CV_CPU_FORCE_AVX512_SKX 1 +# define CV_CPU_HAS_SUPPORT_AVX512_SKX 1 +# define CV_CPU_CALL_AVX512_SKX(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_SKX_(fn, args) return (opt_AVX512_SKX::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_SKX +# define CV_TRY_AVX512_SKX 1 +# define CV_CPU_FORCE_AVX512_SKX 0 +# define CV_CPU_HAS_SUPPORT_AVX512_SKX (cv::checkHardwareSupport(CV_CPU_AVX512_SKX)) +# define CV_CPU_CALL_AVX512_SKX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_SKX) return (opt_AVX512_SKX::fn args) +# define CV_CPU_CALL_AVX512_SKX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_SKX) return (opt_AVX512_SKX::fn args) +#else +# define CV_TRY_AVX512_SKX 0 +# define CV_CPU_FORCE_AVX512_SKX 0 +# define CV_CPU_HAS_SUPPORT_AVX512_SKX 0 +# define CV_CPU_CALL_AVX512_SKX(fn, args) +# define CV_CPU_CALL_AVX512_SKX_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_SKX(fn, args, mode, ...) CV_CPU_CALL_AVX512_SKX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_CNL +# define CV_TRY_AVX512_CNL 1 +# define CV_CPU_FORCE_AVX512_CNL 1 +# define CV_CPU_HAS_SUPPORT_AVX512_CNL 1 +# define CV_CPU_CALL_AVX512_CNL(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_CNL_(fn, args) return (opt_AVX512_CNL::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_CNL +# define CV_TRY_AVX512_CNL 1 +# define CV_CPU_FORCE_AVX512_CNL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_CNL (cv::checkHardwareSupport(CV_CPU_AVX512_CNL)) +# define CV_CPU_CALL_AVX512_CNL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CNL) return (opt_AVX512_CNL::fn args) +# define CV_CPU_CALL_AVX512_CNL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CNL) return (opt_AVX512_CNL::fn args) +#else +# define CV_TRY_AVX512_CNL 0 +# define CV_CPU_FORCE_AVX512_CNL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_CNL 0 +# define CV_CPU_CALL_AVX512_CNL(fn, args) +# define CV_CPU_CALL_AVX512_CNL_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_CNL(fn, args, mode, ...) CV_CPU_CALL_AVX512_CNL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_CLX +# define CV_TRY_AVX512_CLX 1 +# define CV_CPU_FORCE_AVX512_CLX 1 +# define CV_CPU_HAS_SUPPORT_AVX512_CLX 1 +# define CV_CPU_CALL_AVX512_CLX(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_CLX_(fn, args) return (opt_AVX512_CLX::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_CLX +# define CV_TRY_AVX512_CLX 1 +# define CV_CPU_FORCE_AVX512_CLX 0 +# define CV_CPU_HAS_SUPPORT_AVX512_CLX (cv::checkHardwareSupport(CV_CPU_AVX512_CLX)) +# define CV_CPU_CALL_AVX512_CLX(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CLX) return (opt_AVX512_CLX::fn args) +# define CV_CPU_CALL_AVX512_CLX_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_CLX) return (opt_AVX512_CLX::fn args) +#else +# define CV_TRY_AVX512_CLX 0 +# define CV_CPU_FORCE_AVX512_CLX 0 +# define CV_CPU_HAS_SUPPORT_AVX512_CLX 0 +# define CV_CPU_CALL_AVX512_CLX(fn, args) +# define CV_CPU_CALL_AVX512_CLX_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_CLX(fn, args, mode, ...) CV_CPU_CALL_AVX512_CLX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_AVX512_ICL +# define CV_TRY_AVX512_ICL 1 +# define CV_CPU_FORCE_AVX512_ICL 1 +# define CV_CPU_HAS_SUPPORT_AVX512_ICL 1 +# define CV_CPU_CALL_AVX512_ICL(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_AVX512_ICL_(fn, args) return (opt_AVX512_ICL::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_AVX512_ICL +# define CV_TRY_AVX512_ICL 1 +# define CV_CPU_FORCE_AVX512_ICL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_ICL (cv::checkHardwareSupport(CV_CPU_AVX512_ICL)) +# define CV_CPU_CALL_AVX512_ICL(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_ICL) return (opt_AVX512_ICL::fn args) +# define CV_CPU_CALL_AVX512_ICL_(fn, args) if (CV_CPU_HAS_SUPPORT_AVX512_ICL) return (opt_AVX512_ICL::fn args) +#else +# define CV_TRY_AVX512_ICL 0 +# define CV_CPU_FORCE_AVX512_ICL 0 +# define CV_CPU_HAS_SUPPORT_AVX512_ICL 0 +# define CV_CPU_CALL_AVX512_ICL(fn, args) +# define CV_CPU_CALL_AVX512_ICL_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_AVX512_ICL(fn, args, mode, ...) CV_CPU_CALL_AVX512_ICL(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_NEON +# define CV_TRY_NEON 1 +# define CV_CPU_FORCE_NEON 1 +# define CV_CPU_HAS_SUPPORT_NEON 1 +# define CV_CPU_CALL_NEON(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_NEON_(fn, args) return (opt_NEON::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_NEON +# define CV_TRY_NEON 1 +# define CV_CPU_FORCE_NEON 0 +# define CV_CPU_HAS_SUPPORT_NEON (cv::checkHardwareSupport(CV_CPU_NEON)) +# define CV_CPU_CALL_NEON(fn, args) if (CV_CPU_HAS_SUPPORT_NEON) return (opt_NEON::fn args) +# define CV_CPU_CALL_NEON_(fn, args) if (CV_CPU_HAS_SUPPORT_NEON) return (opt_NEON::fn args) +#else +# define CV_TRY_NEON 0 +# define CV_CPU_FORCE_NEON 0 +# define CV_CPU_HAS_SUPPORT_NEON 0 +# define CV_CPU_CALL_NEON(fn, args) +# define CV_CPU_CALL_NEON_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_NEON(fn, args, mode, ...) CV_CPU_CALL_NEON(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_MSA +# define CV_TRY_MSA 1 +# define CV_CPU_FORCE_MSA 1 +# define CV_CPU_HAS_SUPPORT_MSA 1 +# define CV_CPU_CALL_MSA(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_MSA_(fn, args) return (opt_MSA::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_MSA +# define CV_TRY_MSA 1 +# define CV_CPU_FORCE_MSA 0 +# define CV_CPU_HAS_SUPPORT_MSA (cv::checkHardwareSupport(CV_CPU_MSA)) +# define CV_CPU_CALL_MSA(fn, args) if (CV_CPU_HAS_SUPPORT_MSA) return (opt_MSA::fn args) +# define CV_CPU_CALL_MSA_(fn, args) if (CV_CPU_HAS_SUPPORT_MSA) return (opt_MSA::fn args) +#else +# define CV_TRY_MSA 0 +# define CV_CPU_FORCE_MSA 0 +# define CV_CPU_HAS_SUPPORT_MSA 0 +# define CV_CPU_CALL_MSA(fn, args) +# define CV_CPU_CALL_MSA_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_MSA(fn, args, mode, ...) CV_CPU_CALL_MSA(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_VSX +# define CV_TRY_VSX 1 +# define CV_CPU_FORCE_VSX 1 +# define CV_CPU_HAS_SUPPORT_VSX 1 +# define CV_CPU_CALL_VSX(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_VSX_(fn, args) return (opt_VSX::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_VSX +# define CV_TRY_VSX 1 +# define CV_CPU_FORCE_VSX 0 +# define CV_CPU_HAS_SUPPORT_VSX (cv::checkHardwareSupport(CV_CPU_VSX)) +# define CV_CPU_CALL_VSX(fn, args) if (CV_CPU_HAS_SUPPORT_VSX) return (opt_VSX::fn args) +# define CV_CPU_CALL_VSX_(fn, args) if (CV_CPU_HAS_SUPPORT_VSX) return (opt_VSX::fn args) +#else +# define CV_TRY_VSX 0 +# define CV_CPU_FORCE_VSX 0 +# define CV_CPU_HAS_SUPPORT_VSX 0 +# define CV_CPU_CALL_VSX(fn, args) +# define CV_CPU_CALL_VSX_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_VSX(fn, args, mode, ...) CV_CPU_CALL_VSX(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#if !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_COMPILE_VSX3 +# define CV_TRY_VSX3 1 +# define CV_CPU_FORCE_VSX3 1 +# define CV_CPU_HAS_SUPPORT_VSX3 1 +# define CV_CPU_CALL_VSX3(fn, args) return (cpu_baseline::fn args) +# define CV_CPU_CALL_VSX3_(fn, args) return (opt_VSX3::fn args) +#elif !defined CV_DISABLE_OPTIMIZATION && defined CV_ENABLE_INTRINSICS && defined CV_CPU_DISPATCH_COMPILE_VSX3 +# define CV_TRY_VSX3 1 +# define CV_CPU_FORCE_VSX3 0 +# define CV_CPU_HAS_SUPPORT_VSX3 (cv::checkHardwareSupport(CV_CPU_VSX3)) +# define CV_CPU_CALL_VSX3(fn, args) if (CV_CPU_HAS_SUPPORT_VSX3) return (opt_VSX3::fn args) +# define CV_CPU_CALL_VSX3_(fn, args) if (CV_CPU_HAS_SUPPORT_VSX3) return (opt_VSX3::fn args) +#else +# define CV_TRY_VSX3 0 +# define CV_CPU_FORCE_VSX3 0 +# define CV_CPU_HAS_SUPPORT_VSX3 0 +# define CV_CPU_CALL_VSX3(fn, args) +# define CV_CPU_CALL_VSX3_(fn, args) +#endif +#define __CV_CPU_DISPATCH_CHAIN_VSX3(fn, args, mode, ...) CV_CPU_CALL_VSX3(fn, args); __CV_EXPAND(__CV_CPU_DISPATCH_CHAIN_ ## mode(fn, args, __VA_ARGS__)) + +#define CV_CPU_CALL_BASELINE(fn, args) return (cpu_baseline::fn args) +#define __CV_CPU_DISPATCH_CHAIN_BASELINE(fn, args, mode, ...) CV_CPU_CALL_BASELINE(fn, args) /* last in sequence */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cvdef.h b/third_party/opencv/uos/sw_64/include/opencv2/core/cvdef.h new file mode 100644 index 00000000..6011b2a9 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cvdef.h @@ -0,0 +1,882 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CVDEF_H +#define OPENCV_CORE_CVDEF_H + +#include "opencv2/core/version.hpp" + +//! @addtogroup core_utils +//! @{ + +#ifdef OPENCV_INCLUDE_PORT_FILE // User-provided header file with custom platform configuration +#include OPENCV_INCLUDE_PORT_FILE +#endif + +#if !defined CV_DOXYGEN && !defined CV_IGNORE_DEBUG_BUILD_GUARD +#if (defined(_MSC_VER) && (defined(DEBUG) || defined(_DEBUG))) || \ + (defined(_GLIBCXX_DEBUG) || defined(_GLIBCXX_DEBUG_PEDANTIC)) +// Guard to prevent using of binary incompatible binaries / runtimes +// https://github.com/opencv/opencv/pull/9161 +#define CV__DEBUG_NS_BEGIN namespace debug_build_guard { +#define CV__DEBUG_NS_END } +namespace cv { namespace debug_build_guard { } using namespace debug_build_guard; } +#endif +#endif + +#ifndef CV__DEBUG_NS_BEGIN +#define CV__DEBUG_NS_BEGIN +#define CV__DEBUG_NS_END +#endif + + +#ifdef __OPENCV_BUILD +#include "cvconfig.h" +#endif + +#ifndef __CV_EXPAND +#define __CV_EXPAND(x) x +#endif + +#ifndef __CV_CAT +#define __CV_CAT__(x, y) x ## y +#define __CV_CAT_(x, y) __CV_CAT__(x, y) +#define __CV_CAT(x, y) __CV_CAT_(x, y) +#endif + +#define __CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N +#define __CV_VA_NUM_ARGS(...) __CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) + +#ifdef CV_Func +// keep current value (through OpenCV port file) +#elif defined __GNUC__ || (defined (__cpluscplus) && (__cpluscplus >= 201103)) +#define CV_Func __func__ +#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major__ >= 305) +#define CV_Func __func__ +#elif defined(__STDC_VERSION__) && (__STDC_VERSION >= 199901) +#define CV_Func __func__ +#elif defined _MSC_VER +#define CV_Func __FUNCTION__ +#elif defined(__INTEL_COMPILER) && (_INTEL_COMPILER >= 600) +#define CV_Func __FUNCTION__ +#elif defined __IBMCPP__ && __IBMCPP__ >=500 +#define CV_Func __FUNCTION__ +#elif defined __BORLAND__ && (__BORLANDC__ >= 0x550) +#define CV_Func __FUNC__ +#else +#define CV_Func "" +#endif + +//! @cond IGNORED + +//////////////// static assert ///////////////// +#define CVAUX_CONCAT_EXP(a, b) a##b +#define CVAUX_CONCAT(a, b) CVAUX_CONCAT_EXP(a,b) + +#if defined(__clang__) +# ifndef __has_extension +# define __has_extension __has_feature /* compatibility, for older versions of clang */ +# endif +# if __has_extension(cxx_static_assert) +# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition) +# elif __has_extension(c_static_assert) +# define CV_StaticAssert(condition, reason) _Static_assert((condition), reason " " #condition) +# endif +#elif defined(__GNUC__) +# if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) +# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition) +# endif +#elif defined(_MSC_VER) +# if _MSC_VER >= 1600 /* MSVC 10 */ +# define CV_StaticAssert(condition, reason) static_assert((condition), reason " " #condition) +# endif +#endif +#ifndef CV_StaticAssert +# if !defined(__clang__) && defined(__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 302) +# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); }) +# else +namespace cv { + template struct CV_StaticAssert_failed; + template <> struct CV_StaticAssert_failed { enum { val = 1 }; }; + template struct CV_StaticAssert_test {}; +} +# define CV_StaticAssert(condition, reason)\ + typedef cv::CV_StaticAssert_test< sizeof(cv::CV_StaticAssert_failed< static_cast(condition) >) > CVAUX_CONCAT(CV_StaticAssert_failed_at_, __LINE__) +# endif +#endif + +// Suppress warning "-Wdeprecated-declarations" / C4996 +#if defined(_MSC_VER) + #define CV_DO_PRAGMA(x) __pragma(x) +#elif defined(__GNUC__) + #define CV_DO_PRAGMA(x) _Pragma (#x) +#else + #define CV_DO_PRAGMA(x) +#endif + +#ifdef _MSC_VER +#define CV_SUPPRESS_DEPRECATED_START \ + CV_DO_PRAGMA(warning(push)) \ + CV_DO_PRAGMA(warning(disable: 4996)) +#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(warning(pop)) +#elif defined (__clang__) || ((__GNUC__) && (__GNUC__*100 + __GNUC_MINOR__ > 405)) +#define CV_SUPPRESS_DEPRECATED_START \ + CV_DO_PRAGMA(GCC diagnostic push) \ + CV_DO_PRAGMA(GCC diagnostic ignored "-Wdeprecated-declarations") +#define CV_SUPPRESS_DEPRECATED_END CV_DO_PRAGMA(GCC diagnostic pop) +#else +#define CV_SUPPRESS_DEPRECATED_START +#define CV_SUPPRESS_DEPRECATED_END +#endif + +#define CV_UNUSED(name) (void)name + +#if defined __GNUC__ && !defined __EXCEPTIONS +#define CV_TRY +#define CV_CATCH(A, B) for (A B; false; ) +#define CV_CATCH_ALL if (false) +#define CV_THROW(A) abort() +#define CV_RETHROW() abort() +#else +#define CV_TRY try +#define CV_CATCH(A, B) catch(const A & B) +#define CV_CATCH_ALL catch(...) +#define CV_THROW(A) throw A +#define CV_RETHROW() throw +#endif + +//! @endcond + +// undef problematic defines sometimes defined by system headers (windows.h in particular) +#undef small +#undef min +#undef max +#undef abs +#undef Complex + +#if defined __cplusplus +#include +#else +#include +#endif + +#include "opencv2/core/hal/interface.h" + +#if defined __ICL +# define CV_ICC __ICL +#elif defined __ICC +# define CV_ICC __ICC +#elif defined __ECL +# define CV_ICC __ECL +#elif defined __ECC +# define CV_ICC __ECC +#elif defined __INTEL_COMPILER +# define CV_ICC __INTEL_COMPILER +#endif + +#ifndef CV_INLINE +# if defined __cplusplus +# define CV_INLINE static inline +# elif defined _MSC_VER +# define CV_INLINE __inline +# else +# define CV_INLINE static +# endif +#endif + +#ifndef CV_ALWAYS_INLINE +#if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +#define CV_ALWAYS_INLINE inline __attribute__((always_inline)) +#elif defined(_MSC_VER) +#define CV_ALWAYS_INLINE __forceinline +#else +#define CV_ALWAYS_INLINE inline +#endif +#endif + +#if defined CV_DISABLE_OPTIMIZATION || (defined CV_ICC && !defined CV_ENABLE_UNROLLED) +# define CV_ENABLE_UNROLLED 0 +#else +# define CV_ENABLE_UNROLLED 1 +#endif + +#ifdef __GNUC__ +# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +# define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +# define CV_DECL_ALIGNED(x) +#endif + +/* CPU features and intrinsics support */ +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_FP16 9 +#define CV_CPU_AVX 10 +#define CV_CPU_AVX2 11 +#define CV_CPU_FMA3 12 + +#define CV_CPU_AVX_512F 13 +#define CV_CPU_AVX_512BW 14 +#define CV_CPU_AVX_512CD 15 +#define CV_CPU_AVX_512DQ 16 +#define CV_CPU_AVX_512ER 17 +#define CV_CPU_AVX_512IFMA512 18 // deprecated +#define CV_CPU_AVX_512IFMA 18 +#define CV_CPU_AVX_512PF 19 +#define CV_CPU_AVX_512VBMI 20 +#define CV_CPU_AVX_512VL 21 +#define CV_CPU_AVX_512VBMI2 22 +#define CV_CPU_AVX_512VNNI 23 +#define CV_CPU_AVX_512BITALG 24 +#define CV_CPU_AVX_512VPOPCNTDQ 25 +#define CV_CPU_AVX_5124VNNIW 26 +#define CV_CPU_AVX_5124FMAPS 27 + +#define CV_CPU_NEON 100 + +#define CV_CPU_MSA 150 + +#define CV_CPU_VSX 200 +#define CV_CPU_VSX3 201 + +// CPU features groups +#define CV_CPU_AVX512_SKX 256 +#define CV_CPU_AVX512_COMMON 257 +#define CV_CPU_AVX512_KNL 258 +#define CV_CPU_AVX512_KNM 259 +#define CV_CPU_AVX512_CNL 260 +#define CV_CPU_AVX512_CLX 261 +#define CV_CPU_AVX512_ICL 262 + +// when adding to this list remember to update the following enum +#define CV_HARDWARE_MAX_FEATURE 512 + +/** @brief Available CPU features. +*/ +enum CpuFeatures { + CPU_MMX = 1, + CPU_SSE = 2, + CPU_SSE2 = 3, + CPU_SSE3 = 4, + CPU_SSSE3 = 5, + CPU_SSE4_1 = 6, + CPU_SSE4_2 = 7, + CPU_POPCNT = 8, + CPU_FP16 = 9, + CPU_AVX = 10, + CPU_AVX2 = 11, + CPU_FMA3 = 12, + + CPU_AVX_512F = 13, + CPU_AVX_512BW = 14, + CPU_AVX_512CD = 15, + CPU_AVX_512DQ = 16, + CPU_AVX_512ER = 17, + CPU_AVX_512IFMA512 = 18, // deprecated + CPU_AVX_512IFMA = 18, + CPU_AVX_512PF = 19, + CPU_AVX_512VBMI = 20, + CPU_AVX_512VL = 21, + CPU_AVX_512VBMI2 = 22, + CPU_AVX_512VNNI = 23, + CPU_AVX_512BITALG = 24, + CPU_AVX_512VPOPCNTDQ= 25, + CPU_AVX_5124VNNIW = 26, + CPU_AVX_5124FMAPS = 27, + + CPU_NEON = 100, + + CPU_MSA = 150, + + CPU_VSX = 200, + CPU_VSX3 = 201, + + CPU_AVX512_SKX = 256, //!< Skylake-X with AVX-512F/CD/BW/DQ/VL + CPU_AVX512_COMMON = 257, //!< Common instructions AVX-512F/CD for all CPUs that support AVX-512 + CPU_AVX512_KNL = 258, //!< Knights Landing with AVX-512F/CD/ER/PF + CPU_AVX512_KNM = 259, //!< Knights Mill with AVX-512F/CD/ER/PF/4FMAPS/4VNNIW/VPOPCNTDQ + CPU_AVX512_CNL = 260, //!< Cannon Lake with AVX-512F/CD/BW/DQ/VL/IFMA/VBMI + CPU_AVX512_CLX = 261, //!< Cascade Lake with AVX-512F/CD/BW/DQ/VL/VNNI + CPU_AVX512_ICL = 262, //!< Ice Lake with AVX-512F/CD/BW/DQ/VL/IFMA/VBMI/VNNI/VBMI2/BITALG/VPOPCNTDQ + + CPU_MAX_FEATURE = 512 // see CV_HARDWARE_MAX_FEATURE +}; + + +#include "cv_cpu_dispatch.h" + +#if !defined(CV_STRONG_ALIGNMENT) && defined(__arm__) && !(defined(__aarch64__) || defined(_M_ARM64)) +// int*, int64* should be propertly aligned pointers on ARMv7 +#define CV_STRONG_ALIGNMENT 1 +#endif +#if !defined(CV_STRONG_ALIGNMENT) +#define CV_STRONG_ALIGNMENT 0 +#endif + +/* fundamental constants */ +#define CV_PI 3.1415926535897932384626433832795 +#define CV_2PI 6.283185307179586476925286766559 +#define CV_LOG2 0.69314718055994530941723212145818 + +#if defined __ARM_FP16_FORMAT_IEEE \ + && !defined __CUDACC__ +# define CV_FP16_TYPE 1 +#else +# define CV_FP16_TYPE 0 +#endif + +typedef union Cv16suf +{ + short i; + ushort u; +#if CV_FP16_TYPE + __fp16 h; +#endif +} +Cv16suf; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +#ifndef OPENCV_ABI_COMPATIBILITY +#define OPENCV_ABI_COMPATIBILITY 300 +#endif + +#ifdef __OPENCV_BUILD +# define DISABLE_OPENCV_24_COMPATIBILITY +# define OPENCV_DISABLE_DEPRECATED_COMPATIBILITY +#endif + +#ifndef CV_EXPORTS +# if (defined _WIN32 || defined WINCE || defined __CYGWIN__) && defined(CVAPI_EXPORTS) +# define CV_EXPORTS __declspec(dllexport) +# elif defined __GNUC__ && __GNUC__ >= 4 && (defined(CVAPI_EXPORTS) || defined(__APPLE__)) +# define CV_EXPORTS __attribute__ ((visibility ("default"))) +# endif +#endif + +#ifndef CV_EXPORTS +# define CV_EXPORTS +#endif + +#ifdef _MSC_VER +# define CV_EXPORTS_TEMPLATE +#else +# define CV_EXPORTS_TEMPLATE CV_EXPORTS +#endif + +#ifndef CV_DEPRECATED +# if defined(__GNUC__) +# define CV_DEPRECATED __attribute__ ((deprecated)) +# elif defined(_MSC_VER) +# define CV_DEPRECATED __declspec(deprecated) +# else +# define CV_DEPRECATED +# endif +#endif + +#ifndef CV_DEPRECATED_EXTERNAL +# if defined(__OPENCV_BUILD) +# define CV_DEPRECATED_EXTERNAL /* nothing */ +# else +# define CV_DEPRECATED_EXTERNAL CV_DEPRECATED +# endif +#endif + + +#ifndef CV_EXTERN_C +# ifdef __cplusplus +# define CV_EXTERN_C extern "C" +# else +# define CV_EXTERN_C +# endif +#endif + +/* special informative macros for wrapper generators */ +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) + +/****************************************************************************************\ +* Matrix type (Mat) * +\****************************************************************************************/ + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +/** Size of each channel item, + 0x8442211 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/** 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#ifndef MIN +# define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +# define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/****************************************************************************************\ +* static analysys * +\****************************************************************************************/ + +// In practice, some macro are not processed correctly (noreturn is not detected). +// We need to use simplified definition for them. +#ifndef CV_STATIC_ANALYSIS +# if defined(__KLOCWORK__) || defined(__clang_analyzer__) || defined(__COVERITY__) +# define CV_STATIC_ANALYSIS 1 +# endif +#else +# if defined(CV_STATIC_ANALYSIS) && !(__CV_CAT(1, CV_STATIC_ANALYSIS) == 1) // defined and not empty +# if 0 == CV_STATIC_ANALYSIS +# undef CV_STATIC_ANALYSIS +# endif +# endif +#endif + +/****************************************************************************************\ +* Thread sanitizer * +\****************************************************************************************/ +#ifndef CV_THREAD_SANITIZER +# if defined(__has_feature) +# if __has_feature(thread_sanitizer) +# define CV_THREAD_SANITIZER +# endif +# endif +#endif + +/****************************************************************************************\ +* exchange-add operation for atomic operations on reference counters * +\****************************************************************************************/ + +#ifdef CV_XADD + // allow to use user-defined macro +#elif defined __GNUC__ || defined __clang__ +# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__) && !defined __INTEL_COMPILER +# ifdef __ATOMIC_ACQ_REL +# define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL) +# else +# define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4) +# endif +# else +# if defined __ATOMIC_ACQ_REL && !defined __clang__ + // version for gcc >= 4.7 +# define CV_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL) +# else +# define CV_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta)) +# endif +# endif +#elif defined _MSC_VER && !defined RC_INVOKED +# include +# define CV_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta) +#else + #ifdef OPENCV_FORCE_UNSAFE_XADD + CV_INLINE int CV_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; } + #else + #error "OpenCV: can't define safe CV_XADD macro for current platform (unsupported). Define CV_XADD macro through custom port header (see OPENCV_INCLUDE_PORT_FILE)" + #endif +#endif + + +/****************************************************************************************\ +* CV_NORETURN attribute * +\****************************************************************************************/ + +#ifndef CV_NORETURN +# if defined(__GNUC__) +# define CV_NORETURN __attribute__((__noreturn__)) +# elif defined(_MSC_VER) && (_MSC_VER >= 1300) +# define CV_NORETURN __declspec(noreturn) +# else +# define CV_NORETURN /* nothing by default */ +# endif +#endif + +/****************************************************************************************\ +* CV_NODISCARD_STD attribute (C++17) * +* encourages the compiler to issue a warning if the return value is discarded * +\****************************************************************************************/ +#ifndef CV_NODISCARD_STD +# ifndef __has_cpp_attribute +// workaround preprocessor non-compliance https://reviews.llvm.org/D57851 +# define __has_cpp_attribute(__x) 0 +# endif +# if __has_cpp_attribute(nodiscard) +# define CV_NODISCARD_STD [[nodiscard]] +# elif __cplusplus >= 201703L +// available when compiler is C++17 compliant +# define CV_NODISCARD_STD [[nodiscard]] +# elif defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L +// available with VS2017 v15.3+ with /std:c++17 or higher; works on functions and classes +# define CV_NODISCARD_STD [[nodiscard]] +# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 700) && (__cplusplus >= 201103L) +// available with GCC 7.0+; works on functions, works or silently fails on classes +# define CV_NODISCARD_STD [[nodiscard]] +# elif defined(__GNUC__) && (((__GNUC__ * 100) + __GNUC_MINOR__) >= 408) && (__cplusplus >= 201103L) +// available with GCC 4.8+ but it usually does nothing and can fail noisily -- therefore not used +// define CV_NODISCARD_STD [[gnu::warn_unused_result]] +# endif +#endif +#ifndef CV_NODISCARD_STD +# define CV_NODISCARD_STD /* nothing by default */ +#endif + + +/****************************************************************************************\ +* CV_NODISCARD attribute (deprecated, GCC only) * +* DONT USE: use instead the standard CV_NODISCARD_STD macro above * +* this legacy method silently fails to issue warning until some version * +* after gcc 6.3.0. Yet with gcc 7+ you can use the above standard method * +* which makes this method useless. Don't use it. * +* @deprecated use instead CV_NODISCARD_STD * +\****************************************************************************************/ +#ifndef CV_NODISCARD +# if defined(__GNUC__) +# define CV_NODISCARD __attribute__((__warn_unused_result__)) +# elif defined(__clang__) && defined(__has_attribute) +# if __has_attribute(__warn_unused_result__) +# define CV_NODISCARD __attribute__((__warn_unused_result__)) +# endif +# endif +#endif +#ifndef CV_NODISCARD +# define CV_NODISCARD /* nothing by default */ +#endif + + +/****************************************************************************************\ +* C++ 11 * +\****************************************************************************************/ +#ifndef CV_CXX11 +# if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1800) +# define CV_CXX11 1 +# endif +#else +# if CV_CXX11 == 0 +# undef CV_CXX11 +# endif +#endif + + +/****************************************************************************************\ +* C++ Move semantics * +\****************************************************************************************/ + +#ifndef CV_CXX_MOVE_SEMANTICS +# if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && _MSC_VER >= 1600) +# define CV_CXX_MOVE_SEMANTICS 1 +# elif defined(__clang) +# if __has_feature(cxx_rvalue_references) +# define CV_CXX_MOVE_SEMANTICS 1 +# endif +# endif +#else +# if CV_CXX_MOVE_SEMANTICS == 0 +# undef CV_CXX_MOVE_SEMANTICS +# endif +#endif + +#ifdef CV_CXX_MOVE_SEMANTICS +#define CV_CXX_MOVE(x) std::move(x) +#else +#define CV_CXX_MOVE(x) (x) +#endif + + +/****************************************************************************************\ +* C++11 std::array * +\****************************************************************************************/ + +#ifndef CV_CXX_STD_ARRAY +# if __cplusplus >= 201103L || (defined(__cplusplus) && defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/) +# define CV_CXX_STD_ARRAY 1 +# include +# endif +#else +# if CV_CXX_STD_ARRAY == 0 +# undef CV_CXX_STD_ARRAY +# endif +#endif + + +/****************************************************************************************\ +* C++11 override / final * +\****************************************************************************************/ + +#ifndef CV_OVERRIDE +# ifdef CV_CXX11 +# define CV_OVERRIDE override +# endif +#endif +#ifndef CV_OVERRIDE +# define CV_OVERRIDE +#endif + +#ifndef CV_FINAL +# ifdef CV_CXX11 +# define CV_FINAL final +# endif +#endif +#ifndef CV_FINAL +# define CV_FINAL +#endif + +/****************************************************************************************\ +* C++11 noexcept * +\****************************************************************************************/ + +#ifndef CV_NOEXCEPT +# if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/) +# define CV_NOEXCEPT noexcept +# endif +#endif +#ifndef CV_NOEXCEPT +# define CV_NOEXCEPT +#endif + + + +// Integer types portatibility +#ifdef OPENCV_STDINT_HEADER +#include OPENCV_STDINT_HEADER +#elif defined(__cplusplus) +#if defined(_MSC_VER) && _MSC_VER < 1600 /* MSVS 2010 */ +namespace cv { +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; +} +#elif defined(_MSC_VER) || __cplusplus >= 201103L +#include +namespace cv { +using std::int8_t; +using std::uint8_t; +using std::int16_t; +using std::uint16_t; +using std::int32_t; +using std::uint32_t; +using std::int64_t; +using std::uint64_t; +} +#else +#include +namespace cv { +typedef ::int8_t int8_t; +typedef ::uint8_t uint8_t; +typedef ::int16_t int16_t; +typedef ::uint16_t uint16_t; +typedef ::int32_t int32_t; +typedef ::uint32_t uint32_t; +typedef ::int64_t int64_t; +typedef ::uint64_t uint64_t; +} +#endif +#else // pure C +#include +#endif + +#ifdef __cplusplus +namespace cv +{ + +class float16_t +{ +public: +#if CV_FP16_TYPE + + float16_t() {} + explicit float16_t(float x) { h = (__fp16)x; } + operator float() const { return (float)h; } + static float16_t fromBits(ushort w) + { + Cv16suf u; + u.u = w; + float16_t result; + result.h = u.h; + return result; + } + static float16_t zero() + { + float16_t result; + result.h = (__fp16)0; + return result; + } + ushort bits() const + { + Cv16suf u; + u.h = h; + return u.u; + } +protected: + __fp16 h; + +#else + float16_t() {} + explicit float16_t(float x) + { + #if CV_FP16 + __m128 v = _mm_load_ss(&x); + w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0)); + #else + Cv32suf in; + in.f = x; + unsigned sign = in.u & 0x80000000; + in.u ^= sign; + + if( in.u >= 0x47800000 ) + w = (ushort)(in.u > 0x7f800000 ? 0x7e00 : 0x7c00); + else + { + if (in.u < 0x38800000) + { + in.f += 0.5f; + w = (ushort)(in.u - 0x3f000000); + } + else + { + unsigned t = in.u + 0xc8000fff; + w = (ushort)((t + ((in.u >> 13) & 1)) >> 13); + } + } + + w = (ushort)(w | (sign >> 16)); + #endif + } + + operator float() const + { + #if CV_FP16 + float f; + _mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w))); + return f; + #else + Cv32suf out; + + unsigned t = ((w & 0x7fff) << 13) + 0x38000000; + unsigned sign = (w & 0x8000) << 16; + unsigned e = w & 0x7c00; + + out.u = t + (1 << 23); + out.u = (e >= 0x7c00 ? t + 0x38000000 : + e == 0 ? (out.f -= 6.103515625e-05f, out.u) : t) | sign; + return out.f; + #endif + } + + static float16_t fromBits(ushort b) + { + float16_t result; + result.w = b; + return result; + } + static float16_t zero() + { + float16_t result; + result.w = (ushort)0; + return result; + } + ushort bits() const { return w; } +protected: + ushort w; + +#endif +}; + +} +#endif + +//! @} + +#ifndef __cplusplus +#include "opencv2/core/fast_math.hpp" // define cvRound(double) +#endif + +#endif // OPENCV_CORE_CVDEF_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.hpp new file mode 100644 index 00000000..fbf6d317 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.hpp @@ -0,0 +1,1074 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CVSTD_HPP +#define OPENCV_CORE_CVSTD_HPP + +#ifndef __cplusplus +# error cvstd.hpp header must be compiled as C++ +#endif + +#include "opencv2/core/cvdef.h" +#include +#include +#include + +#include + +// import useful primitives from stl +# include +# include +# include //for abs(int) +# include + +namespace cv +{ + static inline uchar abs(uchar a) { return a; } + static inline ushort abs(ushort a) { return a; } + static inline unsigned abs(unsigned a) { return a; } + static inline uint64 abs(uint64 a) { return a; } + + using std::min; + using std::max; + using std::abs; + using std::swap; + using std::sqrt; + using std::exp; + using std::pow; + using std::log; +} + +namespace cv { + +//! @addtogroup core_utils +//! @{ + +//////////////////////////// memory management functions //////////////////////////// + +/** @brief Allocates an aligned memory buffer. + +The function allocates the buffer of the specified size and returns it. When the buffer size is 16 +bytes or more, the returned buffer is aligned to 16 bytes. +@param bufSize Allocated buffer size. + */ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/** @brief Deallocates a memory buffer. + +The function deallocates the buffer allocated with fastMalloc . If NULL pointer is passed, the +function does nothing. C version of the function clears the pointer *pptr* to avoid problems with +double memory deallocation. +@param ptr Pointer to the allocated buffer. + */ +CV_EXPORTS void fastFree(void* ptr); + +/*! + The STL-compliant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + void deallocate(pointer p, size_type) { fastFree(p); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } + + size_type max_size() const { return cv::max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } +}; + +//! @} core_utils + +//! @cond IGNORED + +namespace detail +{ + +// Metafunction to avoid taking a reference to void. +template +struct RefOrVoid { typedef T& type; }; + +template<> +struct RefOrVoid{ typedef void type; }; + +template<> +struct RefOrVoid{ typedef const void type; }; + +template<> +struct RefOrVoid{ typedef volatile void type; }; + +template<> +struct RefOrVoid{ typedef const volatile void type; }; + +// This class would be private to Ptr, if it didn't have to be a non-template. +struct PtrOwner; + +} + +template +struct DefaultDeleter +{ + void operator () (Y* p) const; +}; + +//! @endcond + +//! @addtogroup core_basic +//! @{ + +/** @brief Template class for smart pointers with shared ownership + +A Ptr\ pretends to be a pointer to an object of type T. Unlike an ordinary pointer, however, the +object will be automatically cleaned up once all Ptr instances pointing to it are destroyed. + +Ptr is similar to boost::shared_ptr that is part of the Boost library +() and std::shared_ptr from +the [C++11](http://en.wikipedia.org/wiki/C++11) standard. + +This class provides the following advantages: +- Default constructor, copy constructor, and assignment operator for an arbitrary C++ class or C + structure. For some objects, like files, windows, mutexes, sockets, and others, a copy + constructor or an assignment operator are difficult to define. For some other objects, like + complex classifiers in OpenCV, copy constructors are absent and not easy to implement. Finally, + some of complex OpenCV and your own data structures may be written in C. However, copy + constructors and default constructors can simplify programming a lot. Besides, they are often + required (for example, by STL containers). By using a Ptr to such an object instead of the + object itself, you automatically get all of the necessary constructors and the assignment + operator. +- *O(1)* complexity of the above-mentioned operations. While some structures, like std::vector, + provide a copy constructor and an assignment operator, the operations may take a considerable + amount of time if the data structures are large. But if the structures are put into a Ptr, the + overhead is small and independent of the data size. +- Automatic and customizable cleanup, even for C structures. See the example below with FILE\*. +- Heterogeneous collections of objects. The standard STL and most other C++ and OpenCV containers + can store only objects of the same type and the same size. The classical solution to store + objects of different types in the same container is to store pointers to the base class (Base\*) + instead but then you lose the automatic memory management. Again, by using Ptr\ instead + of raw pointers, you can solve the problem. + +A Ptr is said to *own* a pointer - that is, for each Ptr there is a pointer that will be deleted +once all Ptr instances that own it are destroyed. The owned pointer may be null, in which case +nothing is deleted. Each Ptr also *stores* a pointer. The stored pointer is the pointer the Ptr +pretends to be; that is, the one you get when you use Ptr::get or the conversion to T\*. It's +usually the same as the owned pointer, but if you use casts or the general shared-ownership +constructor, the two may diverge: the Ptr will still own the original pointer, but will itself point +to something else. + +The owned pointer is treated as a black box. The only thing Ptr needs to know about it is how to +delete it. This knowledge is encapsulated in the *deleter* - an auxiliary object that is associated +with the owned pointer and shared between all Ptr instances that own it. The default deleter is an +instance of DefaultDeleter, which uses the standard C++ delete operator; as such it will work with +any pointer allocated with the standard new operator. + +However, if the pointer must be deleted in a different way, you must specify a custom deleter upon +Ptr construction. A deleter is simply a callable object that accepts the pointer as its sole +argument. For example, if you want to wrap FILE, you may do so as follows: +@code + Ptr f(fopen("myfile.txt", "w"), fclose); + if(!f) throw ...; + fprintf(f, ....); + ... + // the file will be closed automatically by f's destructor. +@endcode +Alternatively, if you want all pointers of a particular type to be deleted the same way, you can +specialize DefaultDeleter::operator() for that type, like this: +@code + namespace cv { + template<> void DefaultDeleter::operator ()(FILE * obj) const + { + fclose(obj); + } + } +@endcode +For convenience, the following types from the OpenCV C API already have such a specialization that +calls the appropriate release function: +- CvCapture +- CvFileStorage +- CvHaarClassifierCascade +- CvMat +- CvMatND +- CvMemStorage +- CvSparseMat +- CvVideoWriter +- IplImage +@note The shared ownership mechanism is implemented with reference counting. As such, cyclic +ownership (e.g. when object a contains a Ptr to object b, which contains a Ptr to object a) will +lead to all involved objects never being cleaned up. Avoid such situations. +@note It is safe to concurrently read (but not write) a Ptr instance from multiple threads and +therefore it is normally safe to use it in multi-threaded applications. The same is true for Mat and +other C++ OpenCV classes that use internal reference counts. +*/ +template +struct Ptr +{ + /** Generic programming support. */ + typedef T element_type; + + /** The default constructor creates a null Ptr - one that owns and stores a null pointer. + */ + Ptr(); + + /** + If p is null, these are equivalent to the default constructor. + Otherwise, these constructors assume ownership of p - that is, the created Ptr owns and stores p + and assumes it is the sole owner of it. Don't use them if p is already owned by another Ptr, or + else p will get deleted twice. + With the first constructor, DefaultDeleter\() becomes the associated deleter (so p will + eventually be deleted with the standard delete operator). Y must be a complete type at the point + of invocation. + With the second constructor, d becomes the associated deleter. + Y\* must be convertible to T\*. + @param p Pointer to own. + @note It is often easier to use makePtr instead. + */ + template +#ifdef DISABLE_OPENCV_24_COMPATIBILITY + explicit +#endif + Ptr(Y* p); + + /** @overload + @param d Deleter to use for the owned pointer. + @param p Pointer to own. + */ + template + Ptr(Y* p, D d); + + /** + These constructors create a Ptr that shares ownership with another Ptr - that is, own the same + pointer as o. + With the first two, the same pointer is stored, as well; for the second, Y\* must be convertible + to T\*. + With the third, p is stored, and Y may be any type. This constructor allows to have completely + unrelated owned and stored pointers, and should be used with care to avoid confusion. A relatively + benign use is to create a non-owning Ptr, like this: + @code + ptr = Ptr(Ptr(), dont_delete_me); // owns nothing; will not delete the pointer. + @endcode + @param o Ptr to share ownership with. + */ + Ptr(const Ptr& o); + + /** @overload + @param o Ptr to share ownership with. + */ + template + Ptr(const Ptr& o); + + /** @overload + @param o Ptr to share ownership with. + @param p Pointer to store. + */ + template + Ptr(const Ptr& o, T* p); + + /** The destructor is equivalent to calling Ptr::release. */ + ~Ptr(); + + /** + Assignment replaces the current Ptr instance with one that owns and stores same pointers as o and + then destroys the old instance. + @param o Ptr to share ownership with. + */ + Ptr& operator = (const Ptr& o); + + /** @overload */ + template + Ptr& operator = (const Ptr& o); + + /** If no other Ptr instance owns the owned pointer, deletes it with the associated deleter. Then sets + both the owned and the stored pointers to NULL. + */ + void release(); + + /** + `ptr.reset(...)` is equivalent to `ptr = Ptr(...)`. + @param p Pointer to own. + */ + template + void reset(Y* p); + + /** @overload + @param d Deleter to use for the owned pointer. + @param p Pointer to own. + */ + template + void reset(Y* p, D d); + + /** + Swaps the owned and stored pointers (and deleters, if any) of this and o. + @param o Ptr to swap with. + */ + void swap(Ptr& o); + + /** Returns the stored pointer. */ + T* get() const; + + /** Ordinary pointer emulation. */ + typename detail::RefOrVoid::type operator * () const; + + /** Ordinary pointer emulation. */ + T* operator -> () const; + + /** Equivalent to get(). */ + operator T* () const; + + /** ptr.empty() is equivalent to `!ptr.get()`. */ + bool empty() const; + + /** Returns a Ptr that owns the same pointer as this, and stores the same + pointer as this, except converted via static_cast to Y*. + */ + template + Ptr staticCast() const; + + /** Ditto for const_cast. */ + template + Ptr constCast() const; + + /** Ditto for dynamic_cast. */ + template + Ptr dynamicCast() const; + +#ifdef CV_CXX_MOVE_SEMANTICS + Ptr(Ptr&& o); + Ptr& operator = (Ptr&& o); +#endif + +private: + detail::PtrOwner* owner; + T* stored; + + template + friend struct Ptr; // have to do this for the cross-type copy constructor +}; + +/** Equivalent to ptr1.swap(ptr2). Provided to help write generic algorithms. */ +template +void swap(Ptr& ptr1, Ptr& ptr2); + +/** Return whether ptr1.get() and ptr2.get() are equal and not equal, respectively. */ +template +bool operator == (const Ptr& ptr1, const Ptr& ptr2); +template +bool operator != (const Ptr& ptr1, const Ptr& ptr2); + +/** `makePtr(...)` is equivalent to `Ptr(new T(...))`. It is shorter than the latter, and it's +marginally safer than using a constructor or Ptr::reset, since it ensures that the owned pointer +is new and thus not owned by any other Ptr instance. +Unfortunately, perfect forwarding is impossible to implement in C++03, and so makePtr is limited +to constructors of T that have up to 10 arguments, none of which are non-const references. + */ +template +Ptr makePtr(); +/** @overload */ +template +Ptr makePtr(const A1& a1); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9); +/** @overload */ +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10); + +//////////////////////////////// string class //////////////////////////////// + +class CV_EXPORTS FileNode; //for string constructor from FileNode + +class CV_EXPORTS String +{ +public: + typedef char value_type; + typedef char& reference; + typedef const char& const_reference; + typedef char* pointer; + typedef const char* const_pointer; + typedef ptrdiff_t difference_type; + typedef size_t size_type; + typedef char* iterator; + typedef const char* const_iterator; + + static const size_t npos = size_t(-1); + + String(); + String(const String& str); + String(const String& str, size_t pos, size_t len = npos); + String(const char* s); + String(const char* s, size_t n); + String(size_t n, char c); + String(const char* first, const char* last); + template String(Iterator first, Iterator last); + explicit String(const FileNode& fn); + ~String(); + + String& operator=(const String& str); + String& operator=(const char* s); + String& operator=(char c); + + String& operator+=(const String& str); + String& operator+=(const char* s); + String& operator+=(char c); + + size_t size() const; + size_t length() const; + + char operator[](size_t idx) const; + char operator[](int idx) const; + + const char* begin() const; + const char* end() const; + + const char* c_str() const; + + bool empty() const; + void clear(); + + int compare(const char* s) const; + int compare(const String& str) const; + + void swap(String& str); + String substr(size_t pos = 0, size_t len = npos) const; + + size_t find(const char* s, size_t pos, size_t n) const; + size_t find(char c, size_t pos = 0) const; + size_t find(const String& str, size_t pos = 0) const; + size_t find(const char* s, size_t pos = 0) const; + + size_t rfind(const char* s, size_t pos, size_t n) const; + size_t rfind(char c, size_t pos = npos) const; + size_t rfind(const String& str, size_t pos = npos) const; + size_t rfind(const char* s, size_t pos = npos) const; + + size_t find_first_of(const char* s, size_t pos, size_t n) const; + size_t find_first_of(char c, size_t pos = 0) const; + size_t find_first_of(const String& str, size_t pos = 0) const; + size_t find_first_of(const char* s, size_t pos = 0) const; + + size_t find_last_of(const char* s, size_t pos, size_t n) const; + size_t find_last_of(char c, size_t pos = npos) const; + size_t find_last_of(const String& str, size_t pos = npos) const; + size_t find_last_of(const char* s, size_t pos = npos) const; + + friend String operator+ (const String& lhs, const String& rhs); + friend String operator+ (const String& lhs, const char* rhs); + friend String operator+ (const char* lhs, const String& rhs); + friend String operator+ (const String& lhs, char rhs); + friend String operator+ (char lhs, const String& rhs); + + String toLowerCase() const; + + String(const std::string& str); + String(const std::string& str, size_t pos, size_t len = npos); + String& operator=(const std::string& str); + String& operator+=(const std::string& str); + operator std::string() const; + + friend String operator+ (const String& lhs, const std::string& rhs); + friend String operator+ (const std::string& lhs, const String& rhs); + +private: + char* cstr_; + size_t len_; + + char* allocate(size_t len); // len without trailing 0 + void deallocate(); + + String(int); // disabled and invalid. Catch invalid usages like, commandLineParser.has(0) problem +}; + +//! @} core_basic + +////////////////////////// cv::String implementation ///////////////////////// + +//! @cond IGNORED + +inline +String::String() + : cstr_(0), len_(0) +{} + +inline +String::String(const String& str) + : cstr_(str.cstr_), len_(str.len_) +{ + if (cstr_) + CV_XADD(((int*)cstr_)-1, 1); +} + +inline +String::String(const String& str, size_t pos, size_t len) + : cstr_(0), len_(0) +{ + pos = min(pos, str.len_); + len = min(str.len_ - pos, len); + if (!len) return; + if (len == str.len_) + { + CV_XADD(((int*)str.cstr_)-1, 1); + cstr_ = str.cstr_; + len_ = str.len_; + return; + } + memcpy(allocate(len), str.cstr_ + pos, len); +} + +inline +String::String(const char* s) + : cstr_(0), len_(0) +{ + if (!s) return; + size_t len = strlen(s); + if (!len) return; + memcpy(allocate(len), s, len); +} + +inline +String::String(const char* s, size_t n) + : cstr_(0), len_(0) +{ + if (!n) return; + if (!s) return; + memcpy(allocate(n), s, n); +} + +inline +String::String(size_t n, char c) + : cstr_(0), len_(0) +{ + if (!n) return; + memset(allocate(n), c, n); +} + +inline +String::String(const char* first, const char* last) + : cstr_(0), len_(0) +{ + size_t len = (size_t)(last - first); + if (!len) return; + memcpy(allocate(len), first, len); +} + +template inline +String::String(Iterator first, Iterator last) + : cstr_(0), len_(0) +{ + size_t len = (size_t)(last - first); + if (!len) return; + char* str = allocate(len); + while (first != last) + { + *str++ = *first; + ++first; + } +} + +inline +String::~String() +{ + deallocate(); +} + +inline +String& String::operator=(const String& str) +{ + if (&str == this) return *this; + + deallocate(); + if (str.cstr_) CV_XADD(((int*)str.cstr_)-1, 1); + cstr_ = str.cstr_; + len_ = str.len_; + return *this; +} + +inline +String& String::operator=(const char* s) +{ + deallocate(); + if (!s) return *this; + size_t len = strlen(s); + if (len) memcpy(allocate(len), s, len); + return *this; +} + +inline +String& String::operator=(char c) +{ + deallocate(); + allocate(1)[0] = c; + return *this; +} + +inline +String& String::operator+=(const String& str) +{ + *this = *this + str; + return *this; +} + +inline +String& String::operator+=(const char* s) +{ + *this = *this + s; + return *this; +} + +inline +String& String::operator+=(char c) +{ + *this = *this + c; + return *this; +} + +inline +size_t String::size() const +{ + return len_; +} + +inline +size_t String::length() const +{ + return len_; +} + +inline +char String::operator[](size_t idx) const +{ + return cstr_[idx]; +} + +inline +char String::operator[](int idx) const +{ + return cstr_[idx]; +} + +inline +const char* String::begin() const +{ + return cstr_; +} + +inline +const char* String::end() const +{ + return len_ ? cstr_ + len_ : NULL; +} + +inline +bool String::empty() const +{ + return len_ == 0; +} + +inline +const char* String::c_str() const +{ + return cstr_ ? cstr_ : ""; +} + +inline +void String::swap(String& str) +{ + cv::swap(cstr_, str.cstr_); + cv::swap(len_, str.len_); +} + +inline +void String::clear() +{ + deallocate(); +} + +inline +int String::compare(const char* s) const +{ + if (cstr_ == s) return 0; + return strcmp(c_str(), s); +} + +inline +int String::compare(const String& str) const +{ + if (cstr_ == str.cstr_) return 0; + return strcmp(c_str(), str.c_str()); +} + +inline +String String::substr(size_t pos, size_t len) const +{ + return String(*this, pos, len); +} + +inline +size_t String::find(const char* s, size_t pos, size_t n) const +{ + if (n == 0 || pos + n > len_) return npos; + const char* lmax = cstr_ + len_ - n; + for (const char* i = cstr_ + pos; i <= lmax; ++i) + { + size_t j = 0; + while (j < n && s[j] == i[j]) ++j; + if (j == n) return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::find(char c, size_t pos) const +{ + return find(&c, pos, 1); +} + +inline +size_t String::find(const String& str, size_t pos) const +{ + return find(str.c_str(), pos, str.len_); +} + +inline +size_t String::find(const char* s, size_t pos) const +{ + if (pos >= len_ || !s[0]) return npos; + const char* lmax = cstr_ + len_; + for (const char* i = cstr_ + pos; i < lmax; ++i) + { + size_t j = 0; + while (s[j] && s[j] == i[j]) + { if(i + j >= lmax) return npos; + ++j; + } + if (!s[j]) return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::rfind(const char* s, size_t pos, size_t n) const +{ + if (n > len_) return npos; + if (pos > len_ - n) pos = len_ - n; + for (const char* i = cstr_ + pos; i >= cstr_; --i) + { + size_t j = 0; + while (j < n && s[j] == i[j]) ++j; + if (j == n) return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::rfind(char c, size_t pos) const +{ + return rfind(&c, pos, 1); +} + +inline +size_t String::rfind(const String& str, size_t pos) const +{ + return rfind(str.c_str(), pos, str.len_); +} + +inline +size_t String::rfind(const char* s, size_t pos) const +{ + return rfind(s, pos, strlen(s)); +} + +inline +size_t String::find_first_of(const char* s, size_t pos, size_t n) const +{ + if (n == 0 || pos + n > len_) return npos; + const char* lmax = cstr_ + len_; + for (const char* i = cstr_ + pos; i < lmax; ++i) + { + for (size_t j = 0; j < n; ++j) + if (s[j] == *i) + return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::find_first_of(char c, size_t pos) const +{ + return find_first_of(&c, pos, 1); +} + +inline +size_t String::find_first_of(const String& str, size_t pos) const +{ + return find_first_of(str.c_str(), pos, str.len_); +} + +inline +size_t String::find_first_of(const char* s, size_t pos) const +{ + if (len_ == 0) return npos; + if (pos >= len_ || !s[0]) return npos; + const char* lmax = cstr_ + len_; + for (const char* i = cstr_ + pos; i < lmax; ++i) + { + for (size_t j = 0; s[j]; ++j) + if (s[j] == *i) + return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::find_last_of(const char* s, size_t pos, size_t n) const +{ + if (len_ == 0) return npos; + if (pos >= len_) pos = len_ - 1; + for (const char* i = cstr_ + pos; i >= cstr_; --i) + { + for (size_t j = 0; j < n; ++j) + if (s[j] == *i) + return (size_t)(i - cstr_); + } + return npos; +} + +inline +size_t String::find_last_of(char c, size_t pos) const +{ + return find_last_of(&c, pos, 1); +} + +inline +size_t String::find_last_of(const String& str, size_t pos) const +{ + return find_last_of(str.c_str(), pos, str.len_); +} + +inline +size_t String::find_last_of(const char* s, size_t pos) const +{ + if (len_ == 0) return npos; + if (pos >= len_) pos = len_ - 1; + for (const char* i = cstr_ + pos; i >= cstr_; --i) + { + for (size_t j = 0; s[j]; ++j) + if (s[j] == *i) + return (size_t)(i - cstr_); + } + return npos; +} + +inline +String String::toLowerCase() const +{ + if (!cstr_) + return String(); + String res(cstr_, len_); + for (size_t i = 0; i < len_; ++i) + res.cstr_[i] = (char) ::tolower(cstr_[i]); + + return res; +} + +//! @endcond + +// ************************* cv::String non-member functions ************************* + +//! @relates cv::String +//! @{ + +inline +String operator + (const String& lhs, const String& rhs) +{ + String s; + s.allocate(lhs.len_ + rhs.len_); + if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_); + if (rhs.len_) memcpy(s.cstr_ + lhs.len_, rhs.cstr_, rhs.len_); + return s; +} + +inline +String operator + (const String& lhs, const char* rhs) +{ + String s; + size_t rhslen = strlen(rhs); + s.allocate(lhs.len_ + rhslen); + if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_); + if (rhslen) memcpy(s.cstr_ + lhs.len_, rhs, rhslen); + return s; +} + +inline +String operator + (const char* lhs, const String& rhs) +{ + String s; + size_t lhslen = strlen(lhs); + s.allocate(lhslen + rhs.len_); + if (lhslen) memcpy(s.cstr_, lhs, lhslen); + if (rhs.len_) memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_); + return s; +} + +inline +String operator + (const String& lhs, char rhs) +{ + String s; + s.allocate(lhs.len_ + 1); + if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_); + s.cstr_[lhs.len_] = rhs; + return s; +} + +inline +String operator + (char lhs, const String& rhs) +{ + String s; + s.allocate(rhs.len_ + 1); + s.cstr_[0] = lhs; + if (rhs.len_) memcpy(s.cstr_ + 1, rhs.cstr_, rhs.len_); + return s; +} + +static inline bool operator== (const String& lhs, const String& rhs) { return 0 == lhs.compare(rhs); } +static inline bool operator== (const char* lhs, const String& rhs) { return 0 == rhs.compare(lhs); } +static inline bool operator== (const String& lhs, const char* rhs) { return 0 == lhs.compare(rhs); } +static inline bool operator!= (const String& lhs, const String& rhs) { return 0 != lhs.compare(rhs); } +static inline bool operator!= (const char* lhs, const String& rhs) { return 0 != rhs.compare(lhs); } +static inline bool operator!= (const String& lhs, const char* rhs) { return 0 != lhs.compare(rhs); } +static inline bool operator< (const String& lhs, const String& rhs) { return lhs.compare(rhs) < 0; } +static inline bool operator< (const char* lhs, const String& rhs) { return rhs.compare(lhs) > 0; } +static inline bool operator< (const String& lhs, const char* rhs) { return lhs.compare(rhs) < 0; } +static inline bool operator<= (const String& lhs, const String& rhs) { return lhs.compare(rhs) <= 0; } +static inline bool operator<= (const char* lhs, const String& rhs) { return rhs.compare(lhs) >= 0; } +static inline bool operator<= (const String& lhs, const char* rhs) { return lhs.compare(rhs) <= 0; } +static inline bool operator> (const String& lhs, const String& rhs) { return lhs.compare(rhs) > 0; } +static inline bool operator> (const char* lhs, const String& rhs) { return rhs.compare(lhs) < 0; } +static inline bool operator> (const String& lhs, const char* rhs) { return lhs.compare(rhs) > 0; } +static inline bool operator>= (const String& lhs, const String& rhs) { return lhs.compare(rhs) >= 0; } +static inline bool operator>= (const char* lhs, const String& rhs) { return rhs.compare(lhs) <= 0; } +static inline bool operator>= (const String& lhs, const char* rhs) { return lhs.compare(rhs) >= 0; } + + +#ifndef OPENCV_DISABLE_STRING_LOWER_UPPER_CONVERSIONS + +//! @cond IGNORED +namespace details { +// std::tolower is int->int +static inline char char_tolower(char ch) +{ + return (char)std::tolower((int)ch); +} +// std::toupper is int->int +static inline char char_toupper(char ch) +{ + return (char)std::toupper((int)ch); +} +} // namespace details +//! @endcond + +static inline std::string toLowerCase(const std::string& str) +{ + std::string result(str); + std::transform(result.begin(), result.end(), result.begin(), details::char_tolower); + return result; +} + +static inline std::string toUpperCase(const std::string& str) +{ + std::string result(str); + std::transform(result.begin(), result.end(), result.begin(), details::char_toupper); + return result; +} + +#endif // OPENCV_DISABLE_STRING_LOWER_UPPER_CONVERSIONS + +//! @} relates cv::String + +} // cv + +namespace std +{ + static inline void swap(cv::String& a, cv::String& b) { a.swap(b); } +} + +#include "opencv2/core/ptr.inl.hpp" + +#endif //OPENCV_CORE_CVSTD_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.inl.hpp new file mode 100644 index 00000000..36c83e28 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/cvstd.inl.hpp @@ -0,0 +1,286 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_CVSTDINL_HPP +#define OPENCV_CORE_CVSTDINL_HPP + +#include +#include +#include + +//! @cond IGNORED + +#ifdef _MSC_VER +#pragma warning( push ) +#pragma warning( disable: 4127 ) +#endif + +namespace cv +{ + +template class DataType< std::complex<_Tp> > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + depth = DataType::depth, + channels = 2, + fmt = DataType::fmt + ((channels - 1) << 8), + type = CV_MAKETYPE(depth, channels) }; + + typedef Vec vec_type; +}; + +inline +String::String(const std::string& str) + : cstr_(0), len_(0) +{ + size_t len = str.size(); + if (len) memcpy(allocate(len), str.c_str(), len); +} + +inline +String::String(const std::string& str, size_t pos, size_t len) + : cstr_(0), len_(0) +{ + size_t strlen = str.size(); + pos = min(pos, strlen); + len = min(strlen - pos, len); + if (!len) return; + memcpy(allocate(len), str.c_str() + pos, len); +} + +inline +String& String::operator = (const std::string& str) +{ + deallocate(); + size_t len = str.size(); + if (len) memcpy(allocate(len), str.c_str(), len); + return *this; +} + +inline +String& String::operator += (const std::string& str) +{ + *this = *this + str; + return *this; +} + +inline +String::operator std::string() const +{ + return std::string(cstr_, len_); +} + +inline +String operator + (const String& lhs, const std::string& rhs) +{ + String s; + size_t rhslen = rhs.size(); + s.allocate(lhs.len_ + rhslen); + if (lhs.len_) memcpy(s.cstr_, lhs.cstr_, lhs.len_); + if (rhslen) memcpy(s.cstr_ + lhs.len_, rhs.c_str(), rhslen); + return s; +} + +inline +String operator + (const std::string& lhs, const String& rhs) +{ + String s; + size_t lhslen = lhs.size(); + s.allocate(lhslen + rhs.len_); + if (lhslen) memcpy(s.cstr_, lhs.c_str(), lhslen); + if (rhs.len_) memcpy(s.cstr_ + lhslen, rhs.cstr_, rhs.len_); + return s; +} + +inline +FileNode::operator std::string() const +{ + String value; + read(*this, value, value); + return value; +} + +template<> inline +void operator >> (const FileNode& n, std::string& value) +{ + read(n, value, std::string()); +} + +template<> inline +FileStorage& operator << (FileStorage& fs, const std::string& value) +{ + return fs << cv::String(value); +} + +static inline +std::ostream& operator << (std::ostream& os, const String& str) +{ + return os << str.c_str(); +} + +static inline +std::ostream& operator << (std::ostream& out, Ptr fmtd) +{ + fmtd->reset(); + for(const char* str = fmtd->next(); str; str = fmtd->next()) + out << str; + return out; +} + +static inline +std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + return out << Formatter::get()->format(mtx); +} + +static inline +std::ostream& operator << (std::ostream& out, const UMat& m) +{ + return out << m.getMat(ACCESS_READ); +} + +template static inline +std::ostream& operator << (std::ostream& out, const Complex<_Tp>& c) +{ + return out << "(" << c.re << "," << c.im << ")"; +} + +template static inline +std::ostream& operator << (std::ostream& out, const std::vector >& vec) +{ + return out << Formatter::get()->format(Mat(vec)); +} + + +template static inline +std::ostream& operator << (std::ostream& out, const std::vector >& vec) +{ + return out << Formatter::get()->format(Mat(vec)); +} + + +template static inline +std::ostream& operator << (std::ostream& out, const Matx<_Tp, m, n>& matx) +{ + return out << Formatter::get()->format(Mat(matx)); +} + +template static inline +std::ostream& operator << (std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +template static inline +std::ostream& operator << (std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +template static inline +std::ostream& operator << (std::ostream& out, const Vec<_Tp, n>& vec) +{ + out << "["; + if (cv::traits::Depth<_Tp>::value <= CV_32S) + { + for (int i = 0; i < n - 1; ++i) { + out << (int)vec[i] << ", "; + } + out << (int)vec[n-1] << "]"; + } + else + { + for (int i = 0; i < n - 1; ++i) { + out << vec[i] << ", "; + } + out << vec[n-1] << "]"; + } + + return out; +} + +template static inline +std::ostream& operator << (std::ostream& out, const Size_<_Tp>& size) +{ + return out << "[" << size.width << " x " << size.height << "]"; +} + +template static inline +std::ostream& operator << (std::ostream& out, const Rect_<_Tp>& rect) +{ + return out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]"; +} + +static inline std::ostream& operator << (std::ostream& out, const MatSize& msize) +{ + int i, dims = msize.dims(); + for( i = 0; i < dims; i++ ) + { + out << msize[i]; + if( i < dims-1 ) + out << " x "; + } + return out; +} + +static inline std::ostream &operator<< (std::ostream &s, cv::Range &r) +{ + return s << "[" << r.start << " : " << r.end << ")"; +} + +} // cv + +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + +//! @endcond + +#endif // OPENCV_CORE_CVSTDINL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/detail/async_promise.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/detail/async_promise.hpp new file mode 100644 index 00000000..6eb3fb52 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/detail/async_promise.hpp @@ -0,0 +1,71 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_ASYNC_PROMISE_HPP +#define OPENCV_CORE_ASYNC_PROMISE_HPP + +#include "../async.hpp" + +#include "exception_ptr.hpp" + +namespace cv { + +/** @addtogroup core_async +@{ +*/ + + +/** @brief Provides result of asynchronous operations + +*/ +class CV_EXPORTS AsyncPromise +{ +public: + ~AsyncPromise() CV_NOEXCEPT; + AsyncPromise() CV_NOEXCEPT; + explicit AsyncPromise(const AsyncPromise& o) CV_NOEXCEPT; + AsyncPromise& operator=(const AsyncPromise& o) CV_NOEXCEPT; + void release() CV_NOEXCEPT; + + /** Returns associated AsyncArray + @note Can be called once + */ + AsyncArray getArrayResult(); + + /** Stores asynchronous result. + @param[in] value result + */ + void setValue(InputArray value); + + // TODO "move" setters + +#if CV__EXCEPTION_PTR + /** Stores exception. + @param[in] exception exception to be raised in AsyncArray + */ + void setException(std::exception_ptr exception); +#endif + + /** Stores exception. + @param[in] exception exception to be raised in AsyncArray + */ + void setException(const cv::Exception& exception); + +#ifdef CV_CXX11 + explicit AsyncPromise(AsyncPromise&& o) { p = o.p; o.p = NULL; } + AsyncPromise& operator=(AsyncPromise&& o) CV_NOEXCEPT { std::swap(p, o.p); return *this; } +#endif + + + // PImpl + typedef struct AsyncArray::Impl Impl; friend struct AsyncArray::Impl; + inline void* _getImpl() const CV_NOEXCEPT { return p; } +protected: + Impl* p; +}; + + +//! @} +} // namespace +#endif // OPENCV_CORE_ASYNC_PROMISE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/detail/exception_ptr.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/detail/exception_ptr.hpp new file mode 100644 index 00000000..d98ffc40 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/detail/exception_ptr.hpp @@ -0,0 +1,27 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_DETAILS_EXCEPTION_PTR_H +#define OPENCV_CORE_DETAILS_EXCEPTION_PTR_H + +#ifndef CV__EXCEPTION_PTR +# if defined(__ANDROID__) && defined(ATOMIC_INT_LOCK_FREE) && ATOMIC_INT_LOCK_FREE < 2 +# define CV__EXCEPTION_PTR 0 // Not supported, details: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58938 +# elif defined(CV_CXX11) +# define CV__EXCEPTION_PTR 1 +# elif defined(_MSC_VER) +# define CV__EXCEPTION_PTR (_MSC_VER >= 1600) +# elif defined(__clang__) +# define CV__EXCEPTION_PTR 0 // C++11 only (see above) +# elif defined(__GNUC__) && defined(__GXX_EXPERIMENTAL_CXX0X__) +# define CV__EXCEPTION_PTR (__GXX_EXPERIMENTAL_CXX0X__ > 0) +# endif +#endif +#ifndef CV__EXCEPTION_PTR +# define CV__EXCEPTION_PTR 0 +#elif CV__EXCEPTION_PTR +# include // std::exception_ptr +#endif + +#endif // OPENCV_CORE_DETAILS_EXCEPTION_PTR_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/directx.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/directx.hpp new file mode 100644 index 00000000..056a85a1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/directx.hpp @@ -0,0 +1,184 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors as is and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the copyright holders or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_DIRECTX_HPP +#define OPENCV_CORE_DIRECTX_HPP + +#include "mat.hpp" +#include "ocl.hpp" + +#if !defined(__d3d11_h__) +struct ID3D11Device; +struct ID3D11Texture2D; +#endif + +#if !defined(__d3d10_h__) +struct ID3D10Device; +struct ID3D10Texture2D; +#endif + +#if !defined(_D3D9_H_) +struct IDirect3DDevice9; +struct IDirect3DDevice9Ex; +struct IDirect3DSurface9; +#endif + + +namespace cv { namespace directx { + +namespace ocl { +using namespace cv::ocl; + +//! @addtogroup core_directx +// This section describes OpenCL and DirectX interoperability. +// +// To enable DirectX support, configure OpenCV using CMake with WITH_DIRECTX=ON . Note, DirectX is +// supported only on Windows. +// +// To use OpenCL functionality you should first initialize OpenCL context from DirectX resource. +// +//! @{ + +// TODO static functions in the Context class +//! @brief Creates OpenCL context from D3D11 device +// +//! @param pD3D11Device - pointer to D3D11 device +//! @return Returns reference to OpenCL Context +CV_EXPORTS Context& initializeContextFromD3D11Device(ID3D11Device* pD3D11Device); + +//! @brief Creates OpenCL context from D3D10 device +// +//! @param pD3D10Device - pointer to D3D10 device +//! @return Returns reference to OpenCL Context +CV_EXPORTS Context& initializeContextFromD3D10Device(ID3D10Device* pD3D10Device); + +//! @brief Creates OpenCL context from Direct3DDevice9Ex device +// +//! @param pDirect3DDevice9Ex - pointer to Direct3DDevice9Ex device +//! @return Returns reference to OpenCL Context +CV_EXPORTS Context& initializeContextFromDirect3DDevice9Ex(IDirect3DDevice9Ex* pDirect3DDevice9Ex); + +//! @brief Creates OpenCL context from Direct3DDevice9 device +// +//! @param pDirect3DDevice9 - pointer to Direct3Device9 device +//! @return Returns reference to OpenCL Context +CV_EXPORTS Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDirect3DDevice9); + +//! @} + +} // namespace cv::directx::ocl + +//! @addtogroup core_directx +//! @{ + +//! @brief Converts InputArray to ID3D11Texture2D. If destination texture format is DXGI_FORMAT_NV12 then +//! input UMat expected to be in BGR format and data will be downsampled and color-converted to NV12. +// +//! @note Note: Destination texture must be allocated by application. Function does memory copy from src to +//! pD3D11Texture2D +// +//! @param src - source InputArray +//! @param pD3D11Texture2D - destination D3D11 texture +CV_EXPORTS void convertToD3D11Texture2D(InputArray src, ID3D11Texture2D* pD3D11Texture2D); + +//! @brief Converts ID3D11Texture2D to OutputArray. If input texture format is DXGI_FORMAT_NV12 then +//! data will be upsampled and color-converted to BGR format. +// +//! @note Note: Destination matrix will be re-allocated if it has not enough memory to match texture size. +//! function does memory copy from pD3D11Texture2D to dst +// +//! @param pD3D11Texture2D - source D3D11 texture +//! @param dst - destination OutputArray +CV_EXPORTS void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst); + +//! @brief Converts InputArray to ID3D10Texture2D +// +//! @note Note: function does memory copy from src to +//! pD3D10Texture2D +// +//! @param src - source InputArray +//! @param pD3D10Texture2D - destination D3D10 texture +CV_EXPORTS void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D); + +//! @brief Converts ID3D10Texture2D to OutputArray +// +//! @note Note: function does memory copy from pD3D10Texture2D +//! to dst +// +//! @param pD3D10Texture2D - source D3D10 texture +//! @param dst - destination OutputArray +CV_EXPORTS void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst); + +//! @brief Converts InputArray to IDirect3DSurface9 +// +//! @note Note: function does memory copy from src to +//! pDirect3DSurface9 +// +//! @param src - source InputArray +//! @param pDirect3DSurface9 - destination D3D10 texture +//! @param surfaceSharedHandle - shared handle +CV_EXPORTS void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurface9, void* surfaceSharedHandle = NULL); + +//! @brief Converts IDirect3DSurface9 to OutputArray +// +//! @note Note: function does memory copy from pDirect3DSurface9 +//! to dst +// +//! @param pDirect3DSurface9 - source D3D10 texture +//! @param dst - destination OutputArray +//! @param surfaceSharedHandle - shared handle +CV_EXPORTS void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArray dst, void* surfaceSharedHandle = NULL); + +//! @brief Get OpenCV type from DirectX type +//! @param iDXGI_FORMAT - enum DXGI_FORMAT for D3D10/D3D11 +//! @return OpenCV type or -1 if there is no equivalent +CV_EXPORTS int getTypeFromDXGI_FORMAT(const int iDXGI_FORMAT); // enum DXGI_FORMAT for D3D10/D3D11 + +//! @brief Get OpenCV type from DirectX type +//! @param iD3DFORMAT - enum D3DTYPE for D3D9 +//! @return OpenCV type or -1 if there is no equivalent +CV_EXPORTS int getTypeFromD3DFORMAT(const int iD3DFORMAT); // enum D3DTYPE for D3D9 + +//! @} + +} } // namespace cv::directx + +#endif // OPENCV_CORE_DIRECTX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/eigen.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/eigen.hpp new file mode 100644 index 00000000..51f41474 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/eigen.hpp @@ -0,0 +1,402 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef OPENCV_CORE_EIGEN_HPP +#define OPENCV_CORE_EIGEN_HPP + +#ifndef EIGEN_WORLD_VERSION +#error "Wrong usage of OpenCV's Eigen utility header. Include Eigen's headers first. See https://github.com/opencv/opencv/issues/17366" +#endif + +#include "opencv2/core.hpp" + +#if defined _MSC_VER && _MSC_VER >= 1200 +#define NOMINMAX // fix https://github.com/opencv/opencv/issues/17548 +#pragma warning( disable: 4714 ) //__forceinline is not inlined +#pragma warning( disable: 4127 ) //conditional expression is constant +#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data +#endif + +#if !defined(OPENCV_DISABLE_EIGEN_TENSOR_SUPPORT) +#if EIGEN_WORLD_VERSION == 3 && EIGEN_MAJOR_VERSION >= 3 \ + && defined(CV_CXX11) && defined(CV_CXX_STD_ARRAY) +#include +#define OPENCV_EIGEN_TENSOR_SUPPORT 1 +#endif // EIGEN_WORLD_VERSION == 3 && EIGEN_MAJOR_VERSION >= 3 +#endif // !defined(OPENCV_DISABLE_EIGEN_TENSOR_SUPPORT) + +namespace cv +{ + +/** @addtogroup core_eigen +These functions are provided for OpenCV-Eigen interoperability. They convert `Mat` +objects to corresponding `Eigen::Matrix` objects and vice-versa. Consult the [Eigen +documentation](https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html) for +information about the `Matrix` template type. + +@note Using these functions requires the `Eigen/Dense` or similar header to be +included before this header. +*/ +//! @{ + +#if defined(OPENCV_EIGEN_TENSOR_SUPPORT) || defined(CV_DOXYGEN) +/** @brief Converts an Eigen::Tensor to a cv::Mat. + +The method converts an Eigen::Tensor with shape (H x W x C) to a cv::Mat where: + H = number of rows + W = number of columns + C = number of channels + +Usage: +\code +Eigen::Tensor a_tensor(...); +// populate tensor with values +Mat a_mat; +eigen2cv(a_tensor, a_mat); +\endcode +*/ +template static inline +void eigen2cv( const Eigen::Tensor<_Tp, 3, _layout> &src, OutputArray dst ) +{ + if( !(_layout & Eigen::RowMajorBit) ) + { + const std::array shuffle{2, 1, 0}; + Eigen::Tensor<_Tp, 3, !_layout> row_major_tensor = src.swap_layout().shuffle(shuffle); + Mat _src(src.dimension(0), src.dimension(1), CV_MAKETYPE(DataType<_Tp>::type, src.dimension(2)), row_major_tensor.data()); + _src.copyTo(dst); + } + else + { + Mat _src(src.dimension(0), src.dimension(1), CV_MAKETYPE(DataType<_Tp>::type, src.dimension(2)), (void *)src.data()); + _src.copyTo(dst); + } +} + +/** @brief Converts a cv::Mat to an Eigen::Tensor. + +The method converts a cv::Mat to an Eigen Tensor with shape (H x W x C) where: + H = number of rows + W = number of columns + C = number of channels + +Usage: +\code +Mat a_mat(...); +// populate Mat with values +Eigen::Tensor a_tensor(...); +cv2eigen(a_mat, a_tensor); +\endcode +*/ +template static inline +void cv2eigen( const Mat &src, Eigen::Tensor<_Tp, 3, _layout> &dst ) +{ + if( !(_layout & Eigen::RowMajorBit) ) + { + Eigen::Tensor<_Tp, 3, !_layout> row_major_tensor(src.rows, src.cols, src.channels()); + Mat _dst(src.rows, src.cols, CV_MAKETYPE(DataType<_Tp>::type, src.channels()), row_major_tensor.data()); + if (src.type() == _dst.type()) + src.copyTo(_dst); + else + src.convertTo(_dst, _dst.type()); + const std::array shuffle{2, 1, 0}; + dst = row_major_tensor.swap_layout().shuffle(shuffle); + } + else + { + dst.resize(src.rows, src.cols, src.channels()); + Mat _dst(src.rows, src.cols, CV_MAKETYPE(DataType<_Tp>::type, src.channels()), dst.data()); + if (src.type() == _dst.type()) + src.copyTo(_dst); + else + src.convertTo(_dst, _dst.type()); + } +} + +/** @brief Maps cv::Mat data to an Eigen::TensorMap. + +The method wraps an existing Mat data array with an Eigen TensorMap of shape (H x W x C) where: + H = number of rows + W = number of columns + C = number of channels + +Explicit instantiation of the return type is required. + +@note Caller should be aware of the lifetime of the cv::Mat instance and take appropriate safety measures. +The cv::Mat instance will retain ownership of the data and the Eigen::TensorMap will lose access when the cv::Mat data is deallocated. + +The example below initializes a cv::Mat and produces an Eigen::TensorMap: +\code +float arr[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; +Mat a_mat(2, 2, CV_32FC3, arr); +Eigen::TensorMap> a_tensormap = cv2eigen_tensormap(a_mat); +\endcode +*/ +template static inline +Eigen::TensorMap> cv2eigen_tensormap(InputArray src) +{ + Mat mat = src.getMat(); + CV_CheckTypeEQ(mat.type(), CV_MAKETYPE(traits::Type<_Tp>::value, mat.channels()), ""); + return Eigen::TensorMap>((_Tp *)mat.data, mat.rows, mat.cols, mat.channels()); +} +#endif // OPENCV_EIGEN_TENSOR_SUPPORT + +template static inline +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, OutputArray dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), traits::Type<_Tp>::value, + (void*)src.data(), src.outerStride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), traits::Type<_Tp>::value, + (void*)src.data(), src.outerStride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +// Matx case +template static inline +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, + Matx<_Tp, _rows, _cols>& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + dst = Matx<_Tp, _cols, _rows>(static_cast(src.data())).t(); + } + else + { + dst = Matx<_Tp, _rows, _cols>(static_cast(src.data())); + } +} + +template static inline +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + } + else + { + const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + } +} + +// Matx case +template static inline +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(_cols, _rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + transpose(src, _dst); + } + else + { + const Mat _dst(_rows, _cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + } +} + +template static inline +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + } + else + { + const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + } +} + +// Matx case +template static inline +void cv2eigen( const Matx<_Tp, _rows, _cols>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(_rows, _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(_cols, _rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + transpose(src, _dst); + } + else + { + const Mat _dst(_rows, _cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + } +} + +template static inline +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + } + else + { + const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + } +} + +// Matx case +template static inline +void cv2eigen( const Matx<_Tp, _rows, 1>& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + dst.resize(_rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(1, _rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + transpose(src, _dst); + } + else + { + const Mat _dst(_rows, 1, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + src.copyTo(_dst); + } +} + + +template static inline +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(src.cols, src.rows, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + } + else + { + const Mat _dst(src.rows, src.cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + } +} + +//Matx +template static inline +void cv2eigen( const Matx<_Tp, 1, _cols>& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + dst.resize(_cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + const Mat _dst(_cols, 1, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + transpose(src, _dst); + } + else + { + const Mat _dst(1, _cols, traits::Type<_Tp>::value, + dst.data(), (size_t)(dst.outerStride()*sizeof(_Tp))); + Mat(src).copyTo(_dst); + } +} + +//! @} + +} // cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/fast_math.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/fast_math.hpp new file mode 100644 index 00000000..eb4fbe21 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/fast_math.hpp @@ -0,0 +1,411 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_FAST_MATH_HPP +#define OPENCV_CORE_FAST_MATH_HPP + +#include "opencv2/core/cvdef.h" + +//! @addtogroup core_utils +//! @{ + +/****************************************************************************************\ +* fast math * +\****************************************************************************************/ + +#ifdef __cplusplus +# include +#else +# ifdef __BORLANDC__ +# include +# else +# include +# endif +#endif + +#if defined(__CUDACC__) + // nothing, intrinsics/asm code is not supported +#else + #if ((defined _MSC_VER && defined _M_X64) \ + || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__)) \ + && !defined(OPENCV_SKIP_INCLUDE_EMMINTRIN_H) + #include + #endif + + #if defined __PPC64__ && defined __GNUC__ && defined _ARCH_PWR8 \ + && !defined(OPENCV_SKIP_INCLUDE_ALTIVEC_H) + #include + #undef vector + #undef bool + #undef pixel + #endif + + #if defined(CV_INLINE_ROUND_FLT) + // user-specified version + // CV_INLINE_ROUND_DBL should be defined too + #elif defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__ + // 1. general scheme + #define ARM_ROUND(_value, _asm_string) \ + int res; \ + float temp; \ + CV_UNUSED(temp); \ + __asm__(_asm_string : [res] "=r" (res), [temp] "=w" (temp) : [value] "w" (_value)); \ + return res + // 2. version for double + #ifdef __clang__ + #define CV_INLINE_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %[value] \n vmov %[res], %[temp]") + #else + #define CV_INLINE_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %P[value] \n vmov %[res], %[temp]") + #endif + // 3. version for float + #define CV_INLINE_ROUND_FLT(value) ARM_ROUND(value, "vcvtr.s32.f32 %[temp], %[value]\n vmov %[res], %[temp]") + #elif defined __PPC64__ && defined __GNUC__ && defined _ARCH_PWR8 + // P8 and newer machines can convert fp32/64 to int quickly. + #define CV_INLINE_ROUND_DBL(value) \ + int out; \ + double temp; \ + __asm__( "fctiw %[temp],%[in]\n\tmfvsrwz %[out],%[temp]\n\t" : [out] "=r" (out), [temp] "=d" (temp) : [in] "d" ((double)(value)) : ); \ + return out; + + // FP32 also works with FP64 routine above + #define CV_INLINE_ROUND_FLT(value) CV_INLINE_ROUND_DBL(value) + #endif + + #ifdef CV_INLINE_ISINF_FLT + // user-specified version + // CV_INLINE_ISINF_DBL should be defined too + #elif defined __PPC64__ && defined _ARCH_PWR9 && defined(scalar_test_data_class) + #define CV_INLINE_ISINF_DBL(value) return scalar_test_data_class(value, 0x30); + #define CV_INLINE_ISINF_FLT(value) CV_INLINE_ISINF_DBL(value) + #endif + + #ifdef CV_INLINE_ISNAN_FLT + // user-specified version + // CV_INLINE_ISNAN_DBL should be defined too + #elif defined __PPC64__ && defined _ARCH_PWR9 && defined(scalar_test_data_class) + #define CV_INLINE_ISNAN_DBL(value) return scalar_test_data_class(value, 0x40); + #define CV_INLINE_ISNAN_FLT(value) CV_INLINE_ISNAN_DBL(value) + #endif + + #if !defined(OPENCV_USE_FASTMATH_BUILTINS) \ + && ( \ + defined(__x86_64__) || defined(__i686__) \ + || defined(__arm__) \ + || defined(__PPC64__) \ + ) + /* Let builtin C math functions when available. Dedicated hardware is available to + round and convert FP values. */ + #define OPENCV_USE_FASTMATH_BUILTINS 1 + #endif + + /* Enable builtin math functions if possible, desired, and available. + Note, not all math functions inline equally. E.g lrint will not inline + without the -fno-math-errno option. */ + #if defined(CV_ICC) + // nothing + #elif defined(OPENCV_USE_FASTMATH_BUILTINS) && OPENCV_USE_FASTMATH_BUILTINS + #if defined(__clang__) + #define CV__FASTMATH_ENABLE_CLANG_MATH_BUILTINS + #if !defined(CV_INLINE_ISNAN_DBL) && __has_builtin(__builtin_isnan) + #define CV_INLINE_ISNAN_DBL(value) return __builtin_isnan(value); + #endif + #if !defined(CV_INLINE_ISNAN_FLT) && __has_builtin(__builtin_isnan) + #define CV_INLINE_ISNAN_FLT(value) return __builtin_isnan(value); + #endif + #if !defined(CV_INLINE_ISINF_DBL) && __has_builtin(__builtin_isinf) + #define CV_INLINE_ISINF_DBL(value) return __builtin_isinf(value); + #endif + #if !defined(CV_INLINE_ISINF_FLT) && __has_builtin(__builtin_isinf) + #define CV_INLINE_ISINF_FLT(value) return __builtin_isinf(value); + #endif + #elif defined(__GNUC__) + #define CV__FASTMATH_ENABLE_GCC_MATH_BUILTINS + #if !defined(CV_INLINE_ISNAN_DBL) + #define CV_INLINE_ISNAN_DBL(value) return __builtin_isnan(value); + #endif + #if !defined(CV_INLINE_ISNAN_FLT) + #define CV_INLINE_ISNAN_FLT(value) return __builtin_isnanf(value); + #endif + #if !defined(CV_INLINE_ISINF_DBL) + #define CV_INLINE_ISINF_DBL(value) return __builtin_isinf(value); + #endif + #if !defined(CV_INLINE_ISINF_FLT) + #define CV_INLINE_ISINF_FLT(value) return __builtin_isinff(value); + #endif + #elif defined(_MSC_VER) + #if !defined(CV_INLINE_ISNAN_DBL) + #define CV_INLINE_ISNAN_DBL(value) return isnan(value); + #endif + #if !defined(CV_INLINE_ISNAN_FLT) + #define CV_INLINE_ISNAN_FLT(value) return isnan(value); + #endif + #if !defined(CV_INLINE_ISINF_DBL) + #define CV_INLINE_ISINF_DBL(value) return isinf(value); + #endif + #if !defined(CV_INLINE_ISINF_FLT) + #define CV_INLINE_ISINF_FLT(value) return isinf(value); + #endif + #endif + #endif + +#endif // defined(__CUDACC__) + +/** @brief Rounds floating-point number to the nearest integer + + @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the + result is not defined. + */ +CV_INLINE int +cvRound( double value ) +{ +#if defined CV_INLINE_ROUND_DBL + CV_INLINE_ROUND_DBL(value); +#elif ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \ + && defined __SSE2__ && !defined __APPLE__) || CV_SSE2) \ + && !defined(__CUDACC__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined CV_ICC || defined __GNUC__ + return (int)(lrint(value)); +#else + /* it's ok if round does not comply with IEEE754 standard; + the tests should allow +/-1 difference when the tested functions use round */ + return (int)(value + (value >= 0 ? 0.5 : -0.5)); +#endif +} + + +/** @brief Rounds floating-point number to the nearest integer not larger than the original. + + The function computes an integer i such that: + \f[i \le \texttt{value} < i+1\f] + @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the + result is not defined. + */ +CV_INLINE int cvFloor( double value ) +{ +#if (defined CV__FASTMATH_ENABLE_GCC_MATH_BUILTINS || defined CV__FASTMATH_ENABLE_CLANG_MATH_BUILTINS) \ + && ( \ + defined(__PPC64__) \ + ) + return __builtin_floor(value); +#else + int i = (int)value; + return i - (i > value); +#endif +} + +/** @brief Rounds floating-point number to the nearest integer not smaller than the original. + + The function computes an integer i such that: + \f[i \le \texttt{value} < i+1\f] + @param value floating-point number. If the value is outside of INT_MIN ... INT_MAX range, the + result is not defined. + */ +CV_INLINE int cvCeil( double value ) +{ +#if (defined CV__FASTMATH_ENABLE_GCC_MATH_BUILTINS || defined CV__FASTMATH_ENABLE_CLANG_MATH_BUILTINS) \ + && ( \ + defined(__PPC64__) \ + ) + return __builtin_ceil(value); +#else + int i = (int)value; + return i + (i < value); +#endif +} + +/** @brief Determines if the argument is Not A Number. + + @param value The input floating-point value + + The function returns 1 if the argument is Not A Number (as defined by IEEE754 standard), 0 + otherwise. */ +CV_INLINE int cvIsNaN( double value ) +{ +#if defined CV_INLINE_ISNAN_DBL + CV_INLINE_ISNAN_DBL(value); +#else + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +#endif +} + +/** @brief Determines if the argument is Infinity. + + @param value The input floating-point value + + The function returns 1 if the argument is a plus or minus infinity (as defined by IEEE754 standard) + and 0 otherwise. */ +CV_INLINE int cvIsInf( double value ) +{ +#if defined CV_INLINE_ISINF_DBL + CV_INLINE_ISINF_DBL(value); +#elif defined(__x86_64__) || defined(_M_X64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__PPC64__) + Cv64suf ieee754; + ieee754.f = value; + return (ieee754.u & 0x7fffffff00000000) == + 0x7ff0000000000000; +#else + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +#endif +} + +#ifdef __cplusplus + +/** @overload */ +CV_INLINE int cvRound(float value) +{ +#if defined CV_INLINE_ROUND_FLT + CV_INLINE_ROUND_FLT(value); +#elif ((defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ \ + && defined __SSE2__ && !defined __APPLE__) || CV_SSE2) \ + && !defined(__CUDACC__) + __m128 t = _mm_set_ss( value ); + return _mm_cvtss_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined CV_ICC || defined __GNUC__ + return (int)(lrintf(value)); +#else + /* it's ok if round does not comply with IEEE754 standard; + the tests should allow +/-1 difference when the tested functions use round */ + return (int)(value + (value >= 0 ? 0.5f : -0.5f)); +#endif +} + +/** @overload */ +CV_INLINE int cvRound( int value ) +{ + return value; +} + +/** @overload */ +CV_INLINE int cvFloor( float value ) +{ +#if (defined CV__FASTMATH_ENABLE_GCC_MATH_BUILTINS || defined CV__FASTMATH_ENABLE_CLANG_MATH_BUILTINS) \ + && ( \ + defined(__PPC64__) \ + ) + return __builtin_floorf(value); +#else + int i = (int)value; + return i - (i > value); +#endif +} + +/** @overload */ +CV_INLINE int cvFloor( int value ) +{ + return value; +} + +/** @overload */ +CV_INLINE int cvCeil( float value ) +{ +#if (defined CV__FASTMATH_ENABLE_GCC_MATH_BUILTINS || defined CV__FASTMATH_ENABLE_CLANG_MATH_BUILTINS) \ + && ( \ + defined(__PPC64__) \ + ) + return __builtin_ceilf(value); +#else + int i = (int)value; + return i + (i < value); +#endif +} + +/** @overload */ +CV_INLINE int cvCeil( int value ) +{ + return value; +} + +/** @overload */ +CV_INLINE int cvIsNaN( float value ) +{ +#if defined CV_INLINE_ISNAN_FLT + CV_INLINE_ISNAN_FLT(value); +#else + Cv32suf ieee754; + ieee754.f = value; + return (ieee754.u & 0x7fffffff) > 0x7f800000; +#endif +} + +/** @overload */ +CV_INLINE int cvIsInf( float value ) +{ +#if defined CV_INLINE_ISINF_FLT + CV_INLINE_ISINF_FLT(value); +#else + Cv32suf ieee754; + ieee754.f = value; + return (ieee754.u & 0x7fffffff) == 0x7f800000; +#endif +} + +#endif // __cplusplus + +//! @} core_utils + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/hal.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/hal.hpp new file mode 100644 index 00000000..68900ec4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/hal.hpp @@ -0,0 +1,250 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_HPP +#define OPENCV_HAL_HPP + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/cvstd.hpp" +#include "opencv2/core/hal/interface.h" + +namespace cv { namespace hal { + +//! @addtogroup core_hal_functions +//! @{ + +CV_EXPORTS int normHamming(const uchar* a, int n); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n); + +CV_EXPORTS int normHamming(const uchar* a, int n, int cellSize); +CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize); + +CV_EXPORTS int LU32f(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU64f(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky32f(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky64f(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS void SVD32f(float* At, size_t astep, float* W, float* U, size_t ustep, float* Vt, size_t vstep, int m, int n, int flags); +CV_EXPORTS void SVD64f(double* At, size_t astep, double* W, double* U, size_t ustep, double* Vt, size_t vstep, int m, int n, int flags); +CV_EXPORTS int QR32f(float* A, size_t astep, int m, int n, int k, float* b, size_t bstep, float* hFactors); +CV_EXPORTS int QR64f(double* A, size_t astep, int m, int n, int k, double* b, size_t bstep, double* hFactors); + +CV_EXPORTS void gemm32f(const float* src1, size_t src1_step, const float* src2, size_t src2_step, + float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step, + int m_a, int n_a, int n_d, int flags); +CV_EXPORTS void gemm64f(const double* src1, size_t src1_step, const double* src2, size_t src2_step, + double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step, + int m_a, int n_a, int n_d, int flags); +CV_EXPORTS void gemm32fc(const float* src1, size_t src1_step, const float* src2, size_t src2_step, + float alpha, const float* src3, size_t src3_step, float beta, float* dst, size_t dst_step, + int m_a, int n_a, int n_d, int flags); +CV_EXPORTS void gemm64fc(const double* src1, size_t src1_step, const double* src2, size_t src2_step, + double alpha, const double* src3, size_t src3_step, double beta, double* dst, size_t dst_step, + int m_a, int n_a, int n_d, int flags); + +CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n); +CV_EXPORTS float normL1_(const float* a, const float* b, int n); +CV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n); + +CV_EXPORTS void exp32f(const float* src, float* dst, int n); +CV_EXPORTS void exp64f(const double* src, double* dst, int n); +CV_EXPORTS void log32f(const float* src, float* dst, int n); +CV_EXPORTS void log64f(const double* src, double* dst, int n); + +CV_EXPORTS void fastAtan32f(const float* y, const float* x, float* dst, int n, bool angleInDegrees); +CV_EXPORTS void fastAtan64f(const double* y, const double* x, double* dst, int n, bool angleInDegrees); +CV_EXPORTS void magnitude32f(const float* x, const float* y, float* dst, int n); +CV_EXPORTS void magnitude64f(const double* x, const double* y, double* dst, int n); +CV_EXPORTS void sqrt32f(const float* src, float* dst, int len); +CV_EXPORTS void sqrt64f(const double* src, double* dst, int len); +CV_EXPORTS void invSqrt32f(const float* src, float* dst, int len); +CV_EXPORTS void invSqrt64f(const double* src, double* dst, int len); + +CV_EXPORTS void split8u(const uchar* src, uchar** dst, int len, int cn ); +CV_EXPORTS void split16u(const ushort* src, ushort** dst, int len, int cn ); +CV_EXPORTS void split32s(const int* src, int** dst, int len, int cn ); +CV_EXPORTS void split64s(const int64* src, int64** dst, int len, int cn ); + +CV_EXPORTS void merge8u(const uchar** src, uchar* dst, int len, int cn ); +CV_EXPORTS void merge16u(const ushort** src, ushort* dst, int len, int cn ); +CV_EXPORTS void merge32s(const int** src, int* dst, int len, int cn ); +CV_EXPORTS void merge64s(const int64** src, int64* dst, int len, int cn ); + +CV_EXPORTS void add8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void add64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void sub8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void sub64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void max8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void max64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void min8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void min64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void absdiff8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void absdiff64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void and8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void or8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void xor8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); +CV_EXPORTS void not8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* ); + +CV_EXPORTS void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp8s(const schar* src1, size_t step1, const schar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp16s(const short* src1, size_t step1, const short* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp32s(const int* src1, size_t step1, const int* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp32f(const float* src1, size_t step1, const float* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); +CV_EXPORTS void cmp64f(const double* src1, size_t step1, const double* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _cmpop); + +CV_EXPORTS void mul8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void mul64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale); + +CV_EXPORTS void div8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void div64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scale); + +CV_EXPORTS void recip8u( const uchar *, size_t, const uchar * src2, size_t step2, uchar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip8s( const schar *, size_t, const schar * src2, size_t step2, schar* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip16u( const ushort *, size_t, const ushort * src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip16s( const short *, size_t, const short * src2, size_t step2, short* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip32s( const int *, size_t, const int * src2, size_t step2, int* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip32f( const float *, size_t, const float * src2, size_t step2, float* dst, size_t step, int width, int height, void* scale); +CV_EXPORTS void recip64f( const double *, size_t, const double * src2, size_t step2, double* dst, size_t step, int width, int height, void* scale); + +CV_EXPORTS void addWeighted8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, uchar* dst, size_t step, int width, int height, void* _scalars ); +CV_EXPORTS void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2, schar* dst, size_t step, int width, int height, void* scalars ); +CV_EXPORTS void addWeighted16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, ushort* dst, size_t step, int width, int height, void* scalars ); +CV_EXPORTS void addWeighted16s( const short* src1, size_t step1, const short* src2, size_t step2, short* dst, size_t step, int width, int height, void* scalars ); +CV_EXPORTS void addWeighted32s( const int* src1, size_t step1, const int* src2, size_t step2, int* dst, size_t step, int width, int height, void* scalars ); +CV_EXPORTS void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2, float* dst, size_t step, int width, int height, void* scalars ); +CV_EXPORTS void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2, double* dst, size_t step, int width, int height, void* scalars ); + +struct CV_EXPORTS DFT1D +{ + static Ptr create(int len, int count, int depth, int flags, bool * useBuffer = 0); + virtual void apply(const uchar *src, uchar *dst) = 0; + virtual ~DFT1D() {} +}; + +struct CV_EXPORTS DFT2D +{ + static Ptr create(int width, int height, int depth, + int src_channels, int dst_channels, + int flags, int nonzero_rows = 0); + virtual void apply(const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) = 0; + virtual ~DFT2D() {} +}; + +struct CV_EXPORTS DCT2D +{ + static Ptr create(int width, int height, int depth, int flags); + virtual void apply(const uchar *src_data, size_t src_step, uchar *dst_data, size_t dst_step) = 0; + virtual ~DCT2D() {} +}; + +//! @} core_hal + +//============================================================================= +// for binary compatibility with 3.0 + +//! @cond IGNORED + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + +CV_EXPORTS void exp(const float* src, float* dst, int n); +CV_EXPORTS void exp(const double* src, double* dst, int n); +CV_EXPORTS void log(const float* src, float* dst, int n); +CV_EXPORTS void log(const double* src, double* dst, int n); + +CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees); +CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n); +CV_EXPORTS void magnitude(const double* x, const double* y, double* dst, int n); +CV_EXPORTS void sqrt(const float* src, float* dst, int len); +CV_EXPORTS void sqrt(const double* src, double* dst, int len); +CV_EXPORTS void invSqrt(const float* src, float* dst, int len); +CV_EXPORTS void invSqrt(const double* src, double* dst, int len); + +//! @endcond + +}} //cv::hal + +#endif //OPENCV_HAL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/interface.h b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/interface.h new file mode 100644 index 00000000..8f640254 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/interface.h @@ -0,0 +1,182 @@ +#ifndef OPENCV_CORE_HAL_INTERFACE_H +#define OPENCV_CORE_HAL_INTERFACE_H + +//! @addtogroup core_hal_interface +//! @{ + +//! @name Return codes +//! @{ +#define CV_HAL_ERROR_OK 0 +#define CV_HAL_ERROR_NOT_IMPLEMENTED 1 +#define CV_HAL_ERROR_UNKNOWN -1 +//! @} + +#ifdef __cplusplus +#include +#else +#include +#include +#endif + +//! @name Data types +//! primitive types +//! - schar - signed 1 byte integer +//! - uchar - unsigned 1 byte integer +//! - short - signed 2 byte integer +//! - ushort - unsigned 2 byte integer +//! - int - signed 4 byte integer +//! - uint - unsigned 4 byte integer +//! - int64 - signed 8 byte integer +//! - uint64 - unsigned 8 byte integer +//! @{ +#if !defined _MSC_VER && !defined __BORLANDC__ +# if defined __cplusplus && __cplusplus >= 201103L && !defined __APPLE__ +# include +# ifdef __NEWLIB__ + typedef unsigned int uint; +# else + typedef std::uint32_t uint; +# endif +# else +# include + typedef uint32_t uint; +# endif +#else + typedef unsigned uint; +#endif + +typedef signed char schar; + +#ifndef __IPL_H__ + typedef unsigned char uchar; + typedef unsigned short ushort; +#endif + +#if defined _MSC_VER || defined __BORLANDC__ + typedef __int64 int64; + typedef unsigned __int64 uint64; +# define CV_BIG_INT(n) n##I64 +# define CV_BIG_UINT(n) n##UI64 +#else + typedef int64_t int64; + typedef uint64_t uint64; +# define CV_BIG_INT(n) n##LL +# define CV_BIG_UINT(n) n##ULL +#endif + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) +//! @} + +//! @name Comparison operation +//! @sa cv::CmpTypes +//! @{ +#define CV_HAL_CMP_EQ 0 +#define CV_HAL_CMP_GT 1 +#define CV_HAL_CMP_GE 2 +#define CV_HAL_CMP_LT 3 +#define CV_HAL_CMP_LE 4 +#define CV_HAL_CMP_NE 5 +//! @} + +//! @name Border processing modes +//! @sa cv::BorderTypes +//! @{ +#define CV_HAL_BORDER_CONSTANT 0 +#define CV_HAL_BORDER_REPLICATE 1 +#define CV_HAL_BORDER_REFLECT 2 +#define CV_HAL_BORDER_WRAP 3 +#define CV_HAL_BORDER_REFLECT_101 4 +#define CV_HAL_BORDER_TRANSPARENT 5 +#define CV_HAL_BORDER_ISOLATED 16 +//! @} + +//! @name DFT flags +//! @{ +#define CV_HAL_DFT_INVERSE 1 +#define CV_HAL_DFT_SCALE 2 +#define CV_HAL_DFT_ROWS 4 +#define CV_HAL_DFT_COMPLEX_OUTPUT 16 +#define CV_HAL_DFT_REAL_OUTPUT 32 +#define CV_HAL_DFT_TWO_STAGE 64 +#define CV_HAL_DFT_STAGE_COLS 128 +#define CV_HAL_DFT_IS_CONTINUOUS 512 +#define CV_HAL_DFT_IS_INPLACE 1024 +//! @} + +//! @name SVD flags +//! @{ +#define CV_HAL_SVD_NO_UV 1 +#define CV_HAL_SVD_SHORT_UV 2 +#define CV_HAL_SVD_MODIFY_A 4 +#define CV_HAL_SVD_FULL_UV 8 +//! @} + +//! @name Gemm flags +//! @{ +#define CV_HAL_GEMM_1_T 1 +#define CV_HAL_GEMM_2_T 2 +#define CV_HAL_GEMM_3_T 4 +//! @} + +//! @} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin.hpp new file mode 100644 index 00000000..16d5284e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin.hpp @@ -0,0 +1,698 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_INTRIN_HPP +#define OPENCV_HAL_INTRIN_HPP + +#include +#include +#include +#include "opencv2/core/cvdef.h" + +#define OPENCV_HAL_ADD(a, b) ((a) + (b)) +#define OPENCV_HAL_AND(a, b) ((a) & (b)) +#define OPENCV_HAL_NOP(a) (a) +#define OPENCV_HAL_1ST(a, b) (a) + +namespace { +inline unsigned int trailingZeros32(unsigned int value) { +#if defined(_MSC_VER) +#if (_MSC_VER < 1700) || defined(_M_ARM) || defined(_M_ARM64) + unsigned long index = 0; + _BitScanForward(&index, value); + return (unsigned int)index; +#elif defined(__clang__) + // clang-cl doesn't export _tzcnt_u32 for non BMI systems + return value ? __builtin_ctz(value) : 32; +#else + return _tzcnt_u32(value); +#endif +#elif defined(__GNUC__) || defined(__GNUG__) + return __builtin_ctz(value); +#elif defined(__ICC) || defined(__INTEL_COMPILER) + return _bit_scan_forward(value); +#elif defined(__clang__) + return llvm.cttz.i32(value, true); +#else + static const int MultiplyDeBruijnBitPosition[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 }; + return MultiplyDeBruijnBitPosition[((uint32_t)((value & -value) * 0x077CB531U)) >> 27]; +#endif +} +} + +// unlike HAL API, which is in cv::hal, +// we put intrinsics into cv namespace to make its +// access from within opencv code more accessible +namespace cv { + +namespace hal { + +enum StoreMode +{ + STORE_UNALIGNED = 0, + STORE_ALIGNED = 1, + STORE_ALIGNED_NOCACHE = 2 +}; + +} + +// TODO FIXIT: Don't use "God" traits. Split on separate cases. +template struct V_TypeTraits +{ +}; + +#define CV_INTRIN_DEF_TYPE_TRAITS(type, int_type_, uint_type_, abs_type_, w_type_, q_type_, sum_type_) \ + template<> struct V_TypeTraits \ + { \ + typedef type value_type; \ + typedef int_type_ int_type; \ + typedef abs_type_ abs_type; \ + typedef uint_type_ uint_type; \ + typedef w_type_ w_type; \ + typedef q_type_ q_type; \ + typedef sum_type_ sum_type; \ + \ + static inline int_type reinterpret_int(type x) \ + { \ + union { type l; int_type i; } v; \ + v.l = x; \ + return v.i; \ + } \ + \ + static inline type reinterpret_from_int(int_type x) \ + { \ + union { type l; int_type i; } v; \ + v.i = x; \ + return v.l; \ + } \ + } + +#define CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(type, int_type_, uint_type_, abs_type_, w_type_, sum_type_) \ + template<> struct V_TypeTraits \ + { \ + typedef type value_type; \ + typedef int_type_ int_type; \ + typedef abs_type_ abs_type; \ + typedef uint_type_ uint_type; \ + typedef w_type_ w_type; \ + typedef sum_type_ sum_type; \ + \ + static inline int_type reinterpret_int(type x) \ + { \ + union { type l; int_type i; } v; \ + v.l = x; \ + return v.i; \ + } \ + \ + static inline type reinterpret_from_int(int_type x) \ + { \ + union { type l; int_type i; } v; \ + v.i = x; \ + return v.l; \ + } \ + } + +CV_INTRIN_DEF_TYPE_TRAITS(uchar, schar, uchar, uchar, ushort, unsigned, unsigned); +CV_INTRIN_DEF_TYPE_TRAITS(schar, schar, uchar, uchar, short, int, int); +CV_INTRIN_DEF_TYPE_TRAITS(ushort, short, ushort, ushort, unsigned, uint64, unsigned); +CV_INTRIN_DEF_TYPE_TRAITS(short, short, ushort, ushort, int, int64, int); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(unsigned, int, unsigned, unsigned, uint64, unsigned); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(int, int, unsigned, unsigned, int64, int); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(float, int, unsigned, float, double, float); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(uint64, int64, uint64, uint64, void, uint64); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(int64, int64, uint64, uint64, void, int64); +CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(double, int64, uint64, double, void, double); + +#ifndef CV_DOXYGEN + +#ifndef CV_CPU_OPTIMIZATION_HAL_NAMESPACE +#ifdef CV_FORCE_SIMD128_CPP + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_EMULATOR_CPP + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_EMULATOR_CPP { + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } +#elif defined(CV_CPU_DISPATCH_MODE) + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) { + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } +#else + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_baseline + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_baseline { + #define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END } +#endif +#endif // CV_CPU_OPTIMIZATION_HAL_NAMESPACE + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END +using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE; +#endif +} + +#ifdef CV_DOXYGEN +# undef CV_AVX2 +# undef CV_SSE2 +# undef CV_NEON +# undef CV_VSX +# undef CV_FP16 +# undef CV_MSA +#endif + +#if (CV_SSE2 || CV_NEON || CV_VSX || CV_MSA || CV_WASM_SIMD) && !defined(CV_FORCE_SIMD128_CPP) +#define CV__SIMD_FORWARD 128 +#include "opencv2/core/hal/intrin_forward.hpp" +#endif + +#if CV_SSE2 && !defined(CV_FORCE_SIMD128_CPP) + +#include "opencv2/core/hal/intrin_sse_em.hpp" +#include "opencv2/core/hal/intrin_sse.hpp" + +#elif CV_NEON && !defined(CV_FORCE_SIMD128_CPP) + +#include "opencv2/core/hal/intrin_neon.hpp" + +#elif CV_VSX && !defined(CV_FORCE_SIMD128_CPP) + +#include "opencv2/core/hal/intrin_vsx.hpp" + +#elif CV_MSA && !defined(CV_FORCE_SIMD128_CPP) + +#include "opencv2/core/hal/intrin_msa.hpp" + +#elif CV_WASM_SIMD && !defined(CV_FORCE_SIMD128_CPP) +#include "opencv2/core/hal/intrin_wasm.hpp" + +#else + +#include "opencv2/core/hal/intrin_cpp.hpp" + +#endif + +// AVX2 can be used together with SSE2, so +// we define those two sets of intrinsics at once. +// Most of the intrinsics do not conflict (the proper overloaded variant is +// resolved by the argument types, e.g. v_float32x4 ~ SSE2, v_float32x8 ~ AVX2), +// but some of AVX2 intrinsics get v256_ prefix instead of v_, e.g. v256_load() vs v_load(). +// Correspondingly, the wide intrinsics (which are mapped to the "widest" +// available instruction set) will get vx_ prefix +// (and will be mapped to v256_ counterparts) (e.g. vx_load() => v256_load()) +#if CV_AVX2 + +#define CV__SIMD_FORWARD 256 +#include "opencv2/core/hal/intrin_forward.hpp" +#include "opencv2/core/hal/intrin_avx.hpp" + +#endif + +// AVX512 can be used together with SSE2 and AVX2, so +// we define those sets of intrinsics at once. +// For some of AVX512 intrinsics get v512_ prefix instead of v_, e.g. v512_load() vs v_load(). +// Wide intrinsics will be mapped to v512_ counterparts in this case(e.g. vx_load() => v512_load()) +#if CV_AVX512_SKX + +#define CV__SIMD_FORWARD 512 +#include "opencv2/core/hal/intrin_forward.hpp" +#include "opencv2/core/hal/intrin_avx512.hpp" + +#endif + +//! @cond IGNORED + +namespace cv { + +#ifndef CV_DOXYGEN +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN +#endif + +#ifndef CV_SIMD128 +#define CV_SIMD128 0 +#endif + +#ifndef CV_SIMD128_CPP +#define CV_SIMD128_CPP 0 +#endif + +#ifndef CV_SIMD128_64F +#define CV_SIMD128_64F 0 +#endif + +#ifndef CV_SIMD256 +#define CV_SIMD256 0 +#endif + +#ifndef CV_SIMD256_64F +#define CV_SIMD256_64F 0 +#endif + +#ifndef CV_SIMD512 +#define CV_SIMD512 0 +#endif + +#ifndef CV_SIMD512_64F +#define CV_SIMD512_64F 0 +#endif + +#ifndef CV_SIMD128_FP16 +#define CV_SIMD128_FP16 0 +#endif + +#ifndef CV_SIMD256_FP16 +#define CV_SIMD256_FP16 0 +#endif + +#ifndef CV_SIMD512_FP16 +#define CV_SIMD512_FP16 0 +#endif + +//================================================================================================== + +template struct V_RegTraits +{ +}; + +#define CV_DEF_REG_TRAITS(prefix, _reg, lane_type, suffix, _u_reg, _w_reg, _q_reg, _int_reg, _round_reg) \ + template<> struct V_RegTraits<_reg> \ + { \ + typedef _reg reg; \ + typedef _u_reg u_reg; \ + typedef _w_reg w_reg; \ + typedef _q_reg q_reg; \ + typedef _int_reg int_reg; \ + typedef _round_reg round_reg; \ + } + +#if CV_SIMD128 || CV_SIMD128_CPP + CV_DEF_REG_TRAITS(v, v_uint8x16, uchar, u8, v_uint8x16, v_uint16x8, v_uint32x4, v_int8x16, void); + CV_DEF_REG_TRAITS(v, v_int8x16, schar, s8, v_uint8x16, v_int16x8, v_int32x4, v_int8x16, void); + CV_DEF_REG_TRAITS(v, v_uint16x8, ushort, u16, v_uint16x8, v_uint32x4, v_uint64x2, v_int16x8, void); + CV_DEF_REG_TRAITS(v, v_int16x8, short, s16, v_uint16x8, v_int32x4, v_int64x2, v_int16x8, void); + CV_DEF_REG_TRAITS(v, v_uint32x4, unsigned, u32, v_uint32x4, v_uint64x2, void, v_int32x4, void); + CV_DEF_REG_TRAITS(v, v_int32x4, int, s32, v_uint32x4, v_int64x2, void, v_int32x4, void); +#if CV_SIMD128_64F || CV_SIMD128_CPP + CV_DEF_REG_TRAITS(v, v_float32x4, float, f32, v_float32x4, v_float64x2, void, v_int32x4, v_int32x4); +#else + CV_DEF_REG_TRAITS(v, v_float32x4, float, f32, v_float32x4, void, void, v_int32x4, v_int32x4); +#endif + CV_DEF_REG_TRAITS(v, v_uint64x2, uint64, u64, v_uint64x2, void, void, v_int64x2, void); + CV_DEF_REG_TRAITS(v, v_int64x2, int64, s64, v_uint64x2, void, void, v_int64x2, void); +#if CV_SIMD128_64F + CV_DEF_REG_TRAITS(v, v_float64x2, double, f64, v_float64x2, void, void, v_int64x2, v_int32x4); +#endif +#endif + +#if CV_SIMD256 + CV_DEF_REG_TRAITS(v256, v_uint8x32, uchar, u8, v_uint8x32, v_uint16x16, v_uint32x8, v_int8x32, void); + CV_DEF_REG_TRAITS(v256, v_int8x32, schar, s8, v_uint8x32, v_int16x16, v_int32x8, v_int8x32, void); + CV_DEF_REG_TRAITS(v256, v_uint16x16, ushort, u16, v_uint16x16, v_uint32x8, v_uint64x4, v_int16x16, void); + CV_DEF_REG_TRAITS(v256, v_int16x16, short, s16, v_uint16x16, v_int32x8, v_int64x4, v_int16x16, void); + CV_DEF_REG_TRAITS(v256, v_uint32x8, unsigned, u32, v_uint32x8, v_uint64x4, void, v_int32x8, void); + CV_DEF_REG_TRAITS(v256, v_int32x8, int, s32, v_uint32x8, v_int64x4, void, v_int32x8, void); + CV_DEF_REG_TRAITS(v256, v_float32x8, float, f32, v_float32x8, v_float64x4, void, v_int32x8, v_int32x8); + CV_DEF_REG_TRAITS(v256, v_uint64x4, uint64, u64, v_uint64x4, void, void, v_int64x4, void); + CV_DEF_REG_TRAITS(v256, v_int64x4, int64, s64, v_uint64x4, void, void, v_int64x4, void); + CV_DEF_REG_TRAITS(v256, v_float64x4, double, f64, v_float64x4, void, void, v_int64x4, v_int32x8); +#endif + +#if CV_SIMD512 + CV_DEF_REG_TRAITS(v512, v_uint8x64, uchar, u8, v_uint8x64, v_uint16x32, v_uint32x16, v_int8x64, void); + CV_DEF_REG_TRAITS(v512, v_int8x64, schar, s8, v_uint8x64, v_int16x32, v_int32x16, v_int8x64, void); + CV_DEF_REG_TRAITS(v512, v_uint16x32, ushort, u16, v_uint16x32, v_uint32x16, v_uint64x8, v_int16x32, void); + CV_DEF_REG_TRAITS(v512, v_int16x32, short, s16, v_uint16x32, v_int32x16, v_int64x8, v_int16x32, void); + CV_DEF_REG_TRAITS(v512, v_uint32x16, unsigned, u32, v_uint32x16, v_uint64x8, void, v_int32x16, void); + CV_DEF_REG_TRAITS(v512, v_int32x16, int, s32, v_uint32x16, v_int64x8, void, v_int32x16, void); + CV_DEF_REG_TRAITS(v512, v_float32x16, float, f32, v_float32x16, v_float64x8, void, v_int32x16, v_int32x16); + CV_DEF_REG_TRAITS(v512, v_uint64x8, uint64, u64, v_uint64x8, void, void, v_int64x8, void); + CV_DEF_REG_TRAITS(v512, v_int64x8, int64, s64, v_uint64x8, void, void, v_int64x8, void); + CV_DEF_REG_TRAITS(v512, v_float64x8, double, f64, v_float64x8, void, void, v_int64x8, v_int32x16); +#endif +//! @endcond + +#if CV_SIMD512 && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 512) +#define CV__SIMD_NAMESPACE simd512 +namespace CV__SIMD_NAMESPACE { + #define CV_SIMD 1 + #define CV_SIMD_64F CV_SIMD512_64F + #define CV_SIMD_FP16 CV_SIMD512_FP16 + #define CV_SIMD_WIDTH 64 +//! @addtogroup core_hal_intrin +//! @{ + //! @brief Maximum available vector register capacity 8-bit unsigned integer values + typedef v_uint8x64 v_uint8; + //! @brief Maximum available vector register capacity 8-bit signed integer values + typedef v_int8x64 v_int8; + //! @brief Maximum available vector register capacity 16-bit unsigned integer values + typedef v_uint16x32 v_uint16; + //! @brief Maximum available vector register capacity 16-bit signed integer values + typedef v_int16x32 v_int16; + //! @brief Maximum available vector register capacity 32-bit unsigned integer values + typedef v_uint32x16 v_uint32; + //! @brief Maximum available vector register capacity 32-bit signed integer values + typedef v_int32x16 v_int32; + //! @brief Maximum available vector register capacity 64-bit unsigned integer values + typedef v_uint64x8 v_uint64; + //! @brief Maximum available vector register capacity 64-bit signed integer values + typedef v_int64x8 v_int64; + //! @brief Maximum available vector register capacity 32-bit floating point values (single precision) + typedef v_float32x16 v_float32; + #if CV_SIMD512_64F + //! @brief Maximum available vector register capacity 64-bit floating point values (double precision) + typedef v_float64x8 v_float64; + #endif +//! @} + + #define VXPREFIX(func) v512##func +} // namespace +using namespace CV__SIMD_NAMESPACE; +#elif CV_SIMD256 && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 256) +#define CV__SIMD_NAMESPACE simd256 +namespace CV__SIMD_NAMESPACE { + #define CV_SIMD 1 + #define CV_SIMD_64F CV_SIMD256_64F + #define CV_SIMD_FP16 CV_SIMD256_FP16 + #define CV_SIMD_WIDTH 32 +//! @addtogroup core_hal_intrin +//! @{ + //! @brief Maximum available vector register capacity 8-bit unsigned integer values + typedef v_uint8x32 v_uint8; + //! @brief Maximum available vector register capacity 8-bit signed integer values + typedef v_int8x32 v_int8; + //! @brief Maximum available vector register capacity 16-bit unsigned integer values + typedef v_uint16x16 v_uint16; + //! @brief Maximum available vector register capacity 16-bit signed integer values + typedef v_int16x16 v_int16; + //! @brief Maximum available vector register capacity 32-bit unsigned integer values + typedef v_uint32x8 v_uint32; + //! @brief Maximum available vector register capacity 32-bit signed integer values + typedef v_int32x8 v_int32; + //! @brief Maximum available vector register capacity 64-bit unsigned integer values + typedef v_uint64x4 v_uint64; + //! @brief Maximum available vector register capacity 64-bit signed integer values + typedef v_int64x4 v_int64; + //! @brief Maximum available vector register capacity 32-bit floating point values (single precision) + typedef v_float32x8 v_float32; + #if CV_SIMD256_64F + //! @brief Maximum available vector register capacity 64-bit floating point values (double precision) + typedef v_float64x4 v_float64; + #endif +//! @} + + #define VXPREFIX(func) v256##func +} // namespace +using namespace CV__SIMD_NAMESPACE; +#elif (CV_SIMD128 || CV_SIMD128_CPP) && (!defined(CV__SIMD_FORCE_WIDTH) || CV__SIMD_FORCE_WIDTH == 128) +#if defined CV_SIMD128_CPP +#define CV__SIMD_NAMESPACE simd128_cpp +#else +#define CV__SIMD_NAMESPACE simd128 +#endif +namespace CV__SIMD_NAMESPACE { + #define CV_SIMD CV_SIMD128 + #define CV_SIMD_64F CV_SIMD128_64F + #define CV_SIMD_WIDTH 16 +//! @addtogroup core_hal_intrin +//! @{ + //! @brief Maximum available vector register capacity 8-bit unsigned integer values + typedef v_uint8x16 v_uint8; + //! @brief Maximum available vector register capacity 8-bit signed integer values + typedef v_int8x16 v_int8; + //! @brief Maximum available vector register capacity 16-bit unsigned integer values + typedef v_uint16x8 v_uint16; + //! @brief Maximum available vector register capacity 16-bit signed integer values + typedef v_int16x8 v_int16; + //! @brief Maximum available vector register capacity 32-bit unsigned integer values + typedef v_uint32x4 v_uint32; + //! @brief Maximum available vector register capacity 32-bit signed integer values + typedef v_int32x4 v_int32; + //! @brief Maximum available vector register capacity 64-bit unsigned integer values + typedef v_uint64x2 v_uint64; + //! @brief Maximum available vector register capacity 64-bit signed integer values + typedef v_int64x2 v_int64; + //! @brief Maximum available vector register capacity 32-bit floating point values (single precision) + typedef v_float32x4 v_float32; + #if CV_SIMD128_64F + //! @brief Maximum available vector register capacity 64-bit floating point values (double precision) + typedef v_float64x2 v_float64; + #endif +//! @} + + #define VXPREFIX(func) v##func +} // namespace +using namespace CV__SIMD_NAMESPACE; +#endif + +namespace CV__SIMD_NAMESPACE { +//! @addtogroup core_hal_intrin +//! @{ + //! @name Wide init with value + //! @{ + //! @brief Create maximum available capacity vector with elements set to a specific value + inline v_uint8 vx_setall_u8(uchar v) { return VXPREFIX(_setall_u8)(v); } + inline v_int8 vx_setall_s8(schar v) { return VXPREFIX(_setall_s8)(v); } + inline v_uint16 vx_setall_u16(ushort v) { return VXPREFIX(_setall_u16)(v); } + inline v_int16 vx_setall_s16(short v) { return VXPREFIX(_setall_s16)(v); } + inline v_int32 vx_setall_s32(int v) { return VXPREFIX(_setall_s32)(v); } + inline v_uint32 vx_setall_u32(unsigned v) { return VXPREFIX(_setall_u32)(v); } + inline v_float32 vx_setall_f32(float v) { return VXPREFIX(_setall_f32)(v); } + inline v_int64 vx_setall_s64(int64 v) { return VXPREFIX(_setall_s64)(v); } + inline v_uint64 vx_setall_u64(uint64 v) { return VXPREFIX(_setall_u64)(v); } +#if CV_SIMD_64F + inline v_float64 vx_setall_f64(double v) { return VXPREFIX(_setall_f64)(v); } +#endif + //! @} + + //! @name Wide init with zero + //! @{ + //! @brief Create maximum available capacity vector with elements set to zero + inline v_uint8 vx_setzero_u8() { return VXPREFIX(_setzero_u8)(); } + inline v_int8 vx_setzero_s8() { return VXPREFIX(_setzero_s8)(); } + inline v_uint16 vx_setzero_u16() { return VXPREFIX(_setzero_u16)(); } + inline v_int16 vx_setzero_s16() { return VXPREFIX(_setzero_s16)(); } + inline v_int32 vx_setzero_s32() { return VXPREFIX(_setzero_s32)(); } + inline v_uint32 vx_setzero_u32() { return VXPREFIX(_setzero_u32)(); } + inline v_float32 vx_setzero_f32() { return VXPREFIX(_setzero_f32)(); } + inline v_int64 vx_setzero_s64() { return VXPREFIX(_setzero_s64)(); } + inline v_uint64 vx_setzero_u64() { return VXPREFIX(_setzero_u64)(); } +#if CV_SIMD_64F + inline v_float64 vx_setzero_f64() { return VXPREFIX(_setzero_f64)(); } +#endif + //! @} + + //! @name Wide load from memory + //! @{ + //! @brief Load maximum available capacity register contents from memory + inline v_uint8 vx_load(const uchar * ptr) { return VXPREFIX(_load)(ptr); } + inline v_int8 vx_load(const schar * ptr) { return VXPREFIX(_load)(ptr); } + inline v_uint16 vx_load(const ushort * ptr) { return VXPREFIX(_load)(ptr); } + inline v_int16 vx_load(const short * ptr) { return VXPREFIX(_load)(ptr); } + inline v_int32 vx_load(const int * ptr) { return VXPREFIX(_load)(ptr); } + inline v_uint32 vx_load(const unsigned * ptr) { return VXPREFIX(_load)(ptr); } + inline v_float32 vx_load(const float * ptr) { return VXPREFIX(_load)(ptr); } + inline v_int64 vx_load(const int64 * ptr) { return VXPREFIX(_load)(ptr); } + inline v_uint64 vx_load(const uint64 * ptr) { return VXPREFIX(_load)(ptr); } +#if CV_SIMD_64F + inline v_float64 vx_load(const double * ptr) { return VXPREFIX(_load)(ptr); } +#endif + //! @} + + //! @name Wide load from memory(aligned) + //! @{ + //! @brief Load maximum available capacity register contents from memory(aligned) + inline v_uint8 vx_load_aligned(const uchar * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_int8 vx_load_aligned(const schar * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_uint16 vx_load_aligned(const ushort * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_int16 vx_load_aligned(const short * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_int32 vx_load_aligned(const int * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_uint32 vx_load_aligned(const unsigned * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_float32 vx_load_aligned(const float * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_int64 vx_load_aligned(const int64 * ptr) { return VXPREFIX(_load_aligned)(ptr); } + inline v_uint64 vx_load_aligned(const uint64 * ptr) { return VXPREFIX(_load_aligned)(ptr); } +#if CV_SIMD_64F + inline v_float64 vx_load_aligned(const double * ptr) { return VXPREFIX(_load_aligned)(ptr); } +#endif + //! @} + + //! @name Wide load lower half from memory + //! @{ + //! @brief Load lower half of maximum available capacity register from memory + inline v_uint8 vx_load_low(const uchar * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_int8 vx_load_low(const schar * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_uint16 vx_load_low(const ushort * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_int16 vx_load_low(const short * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_int32 vx_load_low(const int * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_uint32 vx_load_low(const unsigned * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_float32 vx_load_low(const float * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_int64 vx_load_low(const int64 * ptr) { return VXPREFIX(_load_low)(ptr); } + inline v_uint64 vx_load_low(const uint64 * ptr) { return VXPREFIX(_load_low)(ptr); } +#if CV_SIMD_64F + inline v_float64 vx_load_low(const double * ptr) { return VXPREFIX(_load_low)(ptr); } +#endif + //! @} + + //! @name Wide load halfs from memory + //! @{ + //! @brief Load maximum available capacity register contents from two memory blocks + inline v_uint8 vx_load_halves(const uchar * ptr0, const uchar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_int8 vx_load_halves(const schar * ptr0, const schar * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_uint16 vx_load_halves(const ushort * ptr0, const ushort * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_int16 vx_load_halves(const short * ptr0, const short * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_int32 vx_load_halves(const int * ptr0, const int * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_uint32 vx_load_halves(const unsigned * ptr0, const unsigned * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_float32 vx_load_halves(const float * ptr0, const float * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_int64 vx_load_halves(const int64 * ptr0, const int64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } + inline v_uint64 vx_load_halves(const uint64 * ptr0, const uint64 * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } +#if CV_SIMD_64F + inline v_float64 vx_load_halves(const double * ptr0, const double * ptr1) { return VXPREFIX(_load_halves)(ptr0, ptr1); } +#endif + //! @} + + //! @name Wide LUT of elements + //! @{ + //! @brief Load maximum available capacity register contents with array elements by provided indexes + inline v_uint8 vx_lut(const uchar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_int8 vx_lut(const schar * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_uint16 vx_lut(const ushort * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_int16 vx_lut(const short* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_int32 vx_lut(const int* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_uint32 vx_lut(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_float32 vx_lut(const float* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_int64 vx_lut(const int64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } + inline v_uint64 vx_lut(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } +#if CV_SIMD_64F + inline v_float64 vx_lut(const double* ptr, const int* idx) { return VXPREFIX(_lut)(ptr, idx); } +#endif + //! @} + + //! @name Wide LUT of element pairs + //! @{ + //! @brief Load maximum available capacity register contents with array element pairs by provided indexes + inline v_uint8 vx_lut_pairs(const uchar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_int8 vx_lut_pairs(const schar * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_uint16 vx_lut_pairs(const ushort * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_int16 vx_lut_pairs(const short* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_int32 vx_lut_pairs(const int* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_uint32 vx_lut_pairs(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_float32 vx_lut_pairs(const float* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_int64 vx_lut_pairs(const int64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } + inline v_uint64 vx_lut_pairs(const uint64 * ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } +#if CV_SIMD_64F + inline v_float64 vx_lut_pairs(const double* ptr, const int* idx) { return VXPREFIX(_lut_pairs)(ptr, idx); } +#endif + //! @} + + //! @name Wide LUT of element quads + //! @{ + //! @brief Load maximum available capacity register contents with array element quads by provided indexes + inline v_uint8 vx_lut_quads(const uchar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_int8 vx_lut_quads(const schar* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_uint16 vx_lut_quads(const ushort* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_int16 vx_lut_quads(const short* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_int32 vx_lut_quads(const int* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_uint32 vx_lut_quads(const unsigned* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + inline v_float32 vx_lut_quads(const float* ptr, const int* idx) { return VXPREFIX(_lut_quads)(ptr, idx); } + //! @} + + //! @name Wide load with double expansion + //! @{ + //! @brief Load maximum available capacity register contents from memory with double expand + inline v_uint16 vx_load_expand(const uchar * ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_int16 vx_load_expand(const schar * ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_uint32 vx_load_expand(const ushort * ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_int32 vx_load_expand(const short* ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_int64 vx_load_expand(const int* ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_uint64 vx_load_expand(const unsigned* ptr) { return VXPREFIX(_load_expand)(ptr); } + inline v_float32 vx_load_expand(const float16_t * ptr) { return VXPREFIX(_load_expand)(ptr); } + //! @} + + //! @name Wide load with quad expansion + //! @{ + //! @brief Load maximum available capacity register contents from memory with quad expand + inline v_uint32 vx_load_expand_q(const uchar * ptr) { return VXPREFIX(_load_expand_q)(ptr); } + inline v_int32 vx_load_expand_q(const schar * ptr) { return VXPREFIX(_load_expand_q)(ptr); } + //! @} + + /** @brief SIMD processing state cleanup call */ + inline void vx_cleanup() { VXPREFIX(_cleanup)(); } + + +//! @cond IGNORED + + // backward compatibility + template static inline + void vx_store(_Tp* dst, const _Tvec& v) { return v_store(dst, v); } + // backward compatibility + template static inline + void vx_store_aligned(_Tp* dst, const _Tvec& v) { return v_store_aligned(dst, v); } + +//! @endcond + + +//! @} + #undef VXPREFIX +} // namespace + +//! @cond IGNORED +#ifndef CV_SIMD_64F +#define CV_SIMD_64F 0 +#endif + +#ifndef CV_SIMD_FP16 +#define CV_SIMD_FP16 0 //!< Defined to 1 on native support of operations with float16x8_t / float16x16_t (SIMD256) types +#endif + +#ifndef CV_SIMD +#define CV_SIMD 0 +#endif + +#include "simd_utils.impl.hpp" + +#ifndef CV_DOXYGEN +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END +#endif + +} // cv:: + +//! @endcond + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx.hpp new file mode 100644 index 00000000..54e89271 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx.hpp @@ -0,0 +1,3165 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_HAL_INTRIN_AVX_HPP +#define OPENCV_HAL_INTRIN_AVX_HPP + +#define CV_SIMD256 1 +#define CV_SIMD256_64F 1 +#define CV_SIMD256_FP16 0 // no native operations with FP16 type. Only load/store from float32x8 are available (if CV_FP16 == 1) + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +///////// Utils //////////// + +inline __m256i _v256_combine(const __m128i& lo, const __m128i& hi) +{ return _mm256_inserti128_si256(_mm256_castsi128_si256(lo), hi, 1); } + +inline __m256 _v256_combine(const __m128& lo, const __m128& hi) +{ return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 1); } + +inline __m256d _v256_combine(const __m128d& lo, const __m128d& hi) +{ return _mm256_insertf128_pd(_mm256_castpd128_pd256(lo), hi, 1); } + +inline int _v_cvtsi256_si32(const __m256i& a) +{ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); } + +inline __m256i _v256_shuffle_odd_64(const __m256i& v) +{ return _mm256_permute4x64_epi64(v, _MM_SHUFFLE(3, 1, 2, 0)); } + +inline __m256d _v256_shuffle_odd_64(const __m256d& v) +{ return _mm256_permute4x64_pd(v, _MM_SHUFFLE(3, 1, 2, 0)); } + +template +inline __m256i _v256_permute2x128(const __m256i& a, const __m256i& b) +{ return _mm256_permute2x128_si256(a, b, imm); } + +template +inline __m256 _v256_permute2x128(const __m256& a, const __m256& b) +{ return _mm256_permute2f128_ps(a, b, imm); } + +template +inline __m256d _v256_permute2x128(const __m256d& a, const __m256d& b) +{ return _mm256_permute2f128_pd(a, b, imm); } + +template +inline _Tpvec v256_permute2x128(const _Tpvec& a, const _Tpvec& b) +{ return _Tpvec(_v256_permute2x128(a.val, b.val)); } + +template +inline __m256i _v256_permute4x64(const __m256i& a) +{ return _mm256_permute4x64_epi64(a, imm); } + +template +inline __m256d _v256_permute4x64(const __m256d& a) +{ return _mm256_permute4x64_pd(a, imm); } + +template +inline _Tpvec v256_permute4x64(const _Tpvec& a) +{ return _Tpvec(_v256_permute4x64(a.val)); } + +inline __m128i _v256_extract_high(const __m256i& v) +{ return _mm256_extracti128_si256(v, 1); } + +inline __m128 _v256_extract_high(const __m256& v) +{ return _mm256_extractf128_ps(v, 1); } + +inline __m128d _v256_extract_high(const __m256d& v) +{ return _mm256_extractf128_pd(v, 1); } + +inline __m128i _v256_extract_low(const __m256i& v) +{ return _mm256_castsi256_si128(v); } + +inline __m128 _v256_extract_low(const __m256& v) +{ return _mm256_castps256_ps128(v); } + +inline __m128d _v256_extract_low(const __m256d& v) +{ return _mm256_castpd256_pd128(v); } + +inline __m256i _v256_packs_epu32(const __m256i& a, const __m256i& b) +{ + const __m256i m = _mm256_set1_epi32(65535); + __m256i am = _mm256_min_epu32(a, m); + __m256i bm = _mm256_min_epu32(b, m); + return _mm256_packus_epi32(am, bm); +} + +template +inline int _v256_extract_epi8(const __m256i& a) +{ +#if defined(CV__SIMD_HAVE_mm256_extract_epi8) || (CV_AVX2 && (!defined(_MSC_VER) || _MSC_VER >= 1910/*MSVS 2017*/)) + return _mm256_extract_epi8(a, i); +#else + __m128i b = _mm256_extractf128_si256(a, ((i) >> 4)); + return _mm_extract_epi8(b, i & 15); // SSE4.1 +#endif +} + +template +inline int _v256_extract_epi16(const __m256i& a) +{ +#if defined(CV__SIMD_HAVE_mm256_extract_epi8) || (CV_AVX2 && (!defined(_MSC_VER) || _MSC_VER >= 1910/*MSVS 2017*/)) + return _mm256_extract_epi16(a, i); +#else + __m128i b = _mm256_extractf128_si256(a, ((i) >> 3)); + return _mm_extract_epi16(b, i & 7); // SSE2 +#endif +} + +template +inline int _v256_extract_epi32(const __m256i& a) +{ +#if defined(CV__SIMD_HAVE_mm256_extract_epi8) || (CV_AVX2 && (!defined(_MSC_VER) || _MSC_VER >= 1910/*MSVS 2017*/)) + return _mm256_extract_epi32(a, i); +#else + __m128i b = _mm256_extractf128_si256(a, ((i) >> 2)); + return _mm_extract_epi32(b, i & 3); // SSE4.1 +#endif +} + +template +inline int64 _v256_extract_epi64(const __m256i& a) +{ +#if defined(CV__SIMD_HAVE_mm256_extract_epi8) || (CV_AVX2 && (!defined(_MSC_VER) || _MSC_VER >= 1910/*MSVS 2017*/)) + return _mm256_extract_epi64(a, i); +#else + __m128i b = _mm256_extractf128_si256(a, ((i) >> 1)); + return _mm_extract_epi64(b, i & 1); // SSE4.1 +#endif +} + +///////// Types //////////// + +struct v_uint8x32 +{ + typedef uchar lane_type; + enum { nlanes = 32 }; + __m256i val; + + explicit v_uint8x32(__m256i v) : val(v) {} + v_uint8x32(uchar v0, uchar v1, uchar v2, uchar v3, + uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, + uchar v12, uchar v13, uchar v14, uchar v15, + uchar v16, uchar v17, uchar v18, uchar v19, + uchar v20, uchar v21, uchar v22, uchar v23, + uchar v24, uchar v25, uchar v26, uchar v27, + uchar v28, uchar v29, uchar v30, uchar v31) + { + val = _mm256_setr_epi8((char)v0, (char)v1, (char)v2, (char)v3, + (char)v4, (char)v5, (char)v6 , (char)v7, (char)v8, (char)v9, + (char)v10, (char)v11, (char)v12, (char)v13, (char)v14, (char)v15, + (char)v16, (char)v17, (char)v18, (char)v19, (char)v20, (char)v21, + (char)v22, (char)v23, (char)v24, (char)v25, (char)v26, (char)v27, + (char)v28, (char)v29, (char)v30, (char)v31); + } + /* coverity[uninit_ctor]: suppress warning */ + v_uint8x32() {} + + uchar get0() const { return (uchar)_v_cvtsi256_si32(val); } +}; + +struct v_int8x32 +{ + typedef schar lane_type; + enum { nlanes = 32 }; + __m256i val; + + explicit v_int8x32(__m256i v) : val(v) {} + v_int8x32(schar v0, schar v1, schar v2, schar v3, + schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, + schar v12, schar v13, schar v14, schar v15, + schar v16, schar v17, schar v18, schar v19, + schar v20, schar v21, schar v22, schar v23, + schar v24, schar v25, schar v26, schar v27, + schar v28, schar v29, schar v30, schar v31) + { + val = _mm256_setr_epi8(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, + v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31); + } + /* coverity[uninit_ctor]: suppress warning */ + v_int8x32() {} + + schar get0() const { return (schar)_v_cvtsi256_si32(val); } +}; + +struct v_uint16x16 +{ + typedef ushort lane_type; + enum { nlanes = 16 }; + __m256i val; + + explicit v_uint16x16(__m256i v) : val(v) {} + v_uint16x16(ushort v0, ushort v1, ushort v2, ushort v3, + ushort v4, ushort v5, ushort v6, ushort v7, + ushort v8, ushort v9, ushort v10, ushort v11, + ushort v12, ushort v13, ushort v14, ushort v15) + { + val = _mm256_setr_epi16((short)v0, (short)v1, (short)v2, (short)v3, + (short)v4, (short)v5, (short)v6, (short)v7, (short)v8, (short)v9, + (short)v10, (short)v11, (short)v12, (short)v13, (short)v14, (short)v15); + } + /* coverity[uninit_ctor]: suppress warning */ + v_uint16x16() {} + + ushort get0() const { return (ushort)_v_cvtsi256_si32(val); } +}; + +struct v_int16x16 +{ + typedef short lane_type; + enum { nlanes = 16 }; + __m256i val; + + explicit v_int16x16(__m256i v) : val(v) {} + v_int16x16(short v0, short v1, short v2, short v3, + short v4, short v5, short v6, short v7, + short v8, short v9, short v10, short v11, + short v12, short v13, short v14, short v15) + { + val = _mm256_setr_epi16(v0, v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15); + } + /* coverity[uninit_ctor]: suppress warning */ + v_int16x16() {} + + short get0() const { return (short)_v_cvtsi256_si32(val); } +}; + +struct v_uint32x8 +{ + typedef unsigned lane_type; + enum { nlanes = 8 }; + __m256i val; + + explicit v_uint32x8(__m256i v) : val(v) {} + v_uint32x8(unsigned v0, unsigned v1, unsigned v2, unsigned v3, + unsigned v4, unsigned v5, unsigned v6, unsigned v7) + { + val = _mm256_setr_epi32((unsigned)v0, (unsigned)v1, (unsigned)v2, + (unsigned)v3, (unsigned)v4, (unsigned)v5, (unsigned)v6, (unsigned)v7); + } + /* coverity[uninit_ctor]: suppress warning */ + v_uint32x8() {} + + unsigned get0() const { return (unsigned)_v_cvtsi256_si32(val); } +}; + +struct v_int32x8 +{ + typedef int lane_type; + enum { nlanes = 8 }; + __m256i val; + + explicit v_int32x8(__m256i v) : val(v) {} + v_int32x8(int v0, int v1, int v2, int v3, + int v4, int v5, int v6, int v7) + { + val = _mm256_setr_epi32(v0, v1, v2, v3, v4, v5, v6, v7); + } + /* coverity[uninit_ctor]: suppress warning */ + v_int32x8() {} + + int get0() const { return _v_cvtsi256_si32(val); } +}; + +struct v_float32x8 +{ + typedef float lane_type; + enum { nlanes = 8 }; + __m256 val; + + explicit v_float32x8(__m256 v) : val(v) {} + v_float32x8(float v0, float v1, float v2, float v3, + float v4, float v5, float v6, float v7) + { + val = _mm256_setr_ps(v0, v1, v2, v3, v4, v5, v6, v7); + } + /* coverity[uninit_ctor]: suppress warning */ + v_float32x8() {} + + float get0() const { return _mm_cvtss_f32(_mm256_castps256_ps128(val)); } +}; + +struct v_uint64x4 +{ + typedef uint64 lane_type; + enum { nlanes = 4 }; + __m256i val; + + explicit v_uint64x4(__m256i v) : val(v) {} + v_uint64x4(uint64 v0, uint64 v1, uint64 v2, uint64 v3) + { val = _mm256_setr_epi64x((int64)v0, (int64)v1, (int64)v2, (int64)v3); } + /* coverity[uninit_ctor]: suppress warning */ + v_uint64x4() {} + + uint64 get0() const + { + #if defined __x86_64__ || defined _M_X64 + return (uint64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val)); + #else + int a = _mm_cvtsi128_si32(_mm256_castsi256_si128(val)); + int b = _mm_cvtsi128_si32(_mm256_castsi256_si128(_mm256_srli_epi64(val, 32))); + return (unsigned)a | ((uint64)(unsigned)b << 32); + #endif + } +}; + +struct v_int64x4 +{ + typedef int64 lane_type; + enum { nlanes = 4 }; + __m256i val; + + explicit v_int64x4(__m256i v) : val(v) {} + v_int64x4(int64 v0, int64 v1, int64 v2, int64 v3) + { val = _mm256_setr_epi64x(v0, v1, v2, v3); } + /* coverity[uninit_ctor]: suppress warning */ + v_int64x4() {} + + int64 get0() const + { + #if defined __x86_64__ || defined _M_X64 + return (int64)_mm_cvtsi128_si64(_mm256_castsi256_si128(val)); + #else + int a = _mm_cvtsi128_si32(_mm256_castsi256_si128(val)); + int b = _mm_cvtsi128_si32(_mm256_castsi256_si128(_mm256_srli_epi64(val, 32))); + return (int64)((unsigned)a | ((uint64)(unsigned)b << 32)); + #endif + } +}; + +struct v_float64x4 +{ + typedef double lane_type; + enum { nlanes = 4 }; + __m256d val; + + explicit v_float64x4(__m256d v) : val(v) {} + v_float64x4(double v0, double v1, double v2, double v3) + { val = _mm256_setr_pd(v0, v1, v2, v3); } + /* coverity[uninit_ctor]: suppress warning */ + v_float64x4() {} + + double get0() const { return _mm_cvtsd_f64(_mm256_castpd256_pd128(val)); } +}; + +//////////////// Load and store operations /////////////// + +#define OPENCV_HAL_IMPL_AVX_LOADSTORE(_Tpvec, _Tp) \ + inline _Tpvec v256_load(const _Tp* ptr) \ + { return _Tpvec(_mm256_loadu_si256((const __m256i*)ptr)); } \ + inline _Tpvec v256_load_aligned(const _Tp* ptr) \ + { return _Tpvec(_mm256_load_si256((const __m256i*)ptr)); } \ + inline _Tpvec v256_load_low(const _Tp* ptr) \ + { \ + __m128i v128 = _mm_loadu_si128((const __m128i*)ptr); \ + return _Tpvec(_mm256_castsi128_si256(v128)); \ + } \ + inline _Tpvec v256_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ + { \ + __m128i vlo = _mm_loadu_si128((const __m128i*)ptr0); \ + __m128i vhi = _mm_loadu_si128((const __m128i*)ptr1); \ + return _Tpvec(_v256_combine(vlo, vhi)); \ + } \ + inline void v_store(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_si256((__m256i*)ptr, a.val); } \ + inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ + { _mm256_store_si256((__m256i*)ptr, a.val); } \ + inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ + { _mm256_stream_si256((__m256i*)ptr, a.val); } \ + inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ + { \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm256_storeu_si256((__m256i*)ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm256_stream_si256((__m256i*)ptr, a.val); \ + else \ + _mm256_store_si256((__m256i*)ptr, a.val); \ + } \ + inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ + { _mm_storeu_si128((__m128i*)ptr, _v256_extract_low(a.val)); } \ + inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ + { _mm_storeu_si128((__m128i*)ptr, _v256_extract_high(a.val)); } + +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_uint8x32, uchar) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_int8x32, schar) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_uint16x16, ushort) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_int16x16, short) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_uint32x8, unsigned) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_int32x8, int) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_uint64x4, uint64) +OPENCV_HAL_IMPL_AVX_LOADSTORE(v_int64x4, int64) + +#define OPENCV_HAL_IMPL_AVX_LOADSTORE_FLT(_Tpvec, _Tp, suffix, halfreg) \ + inline _Tpvec v256_load(const _Tp* ptr) \ + { return _Tpvec(_mm256_loadu_##suffix(ptr)); } \ + inline _Tpvec v256_load_aligned(const _Tp* ptr) \ + { return _Tpvec(_mm256_load_##suffix(ptr)); } \ + inline _Tpvec v256_load_low(const _Tp* ptr) \ + { \ + return _Tpvec(_mm256_cast##suffix##128_##suffix##256 \ + (_mm_loadu_##suffix(ptr))); \ + } \ + inline _Tpvec v256_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ + { \ + halfreg vlo = _mm_loadu_##suffix(ptr0); \ + halfreg vhi = _mm_loadu_##suffix(ptr1); \ + return _Tpvec(_v256_combine(vlo, vhi)); \ + } \ + inline void v_store(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_##suffix(ptr, a.val); } \ + inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ + { _mm256_store_##suffix(ptr, a.val); } \ + inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ + { _mm256_stream_##suffix(ptr, a.val); } \ + inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ + { \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm256_storeu_##suffix(ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm256_stream_##suffix(ptr, a.val); \ + else \ + _mm256_store_##suffix(ptr, a.val); \ + } \ + inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ + { _mm_storeu_##suffix(ptr, _v256_extract_low(a.val)); } \ + inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ + { _mm_storeu_##suffix(ptr, _v256_extract_high(a.val)); } + +OPENCV_HAL_IMPL_AVX_LOADSTORE_FLT(v_float32x8, float, ps, __m128) +OPENCV_HAL_IMPL_AVX_LOADSTORE_FLT(v_float64x4, double, pd, __m128d) + +#define OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, _Tpvecf, suffix, cast) \ + inline _Tpvec v_reinterpret_as_##suffix(const _Tpvecf& a) \ + { return _Tpvec(cast(a.val)); } + +#define OPENCV_HAL_IMPL_AVX_INIT(_Tpvec, _Tp, suffix, ssuffix, ctype_s) \ + inline _Tpvec v256_setzero_##suffix() \ + { return _Tpvec(_mm256_setzero_si256()); } \ + inline _Tpvec v256_setall_##suffix(_Tp v) \ + { return _Tpvec(_mm256_set1_##ssuffix((ctype_s)v)); } \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint8x32, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int8x32, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint16x16, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int16x16, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint32x8, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int32x8, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint64x4, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int64x4, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_float32x8, suffix, _mm256_castps_si256) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_float64x4, suffix, _mm256_castpd_si256) + +OPENCV_HAL_IMPL_AVX_INIT(v_uint8x32, uchar, u8, epi8, char) +OPENCV_HAL_IMPL_AVX_INIT(v_int8x32, schar, s8, epi8, char) +OPENCV_HAL_IMPL_AVX_INIT(v_uint16x16, ushort, u16, epi16, short) +OPENCV_HAL_IMPL_AVX_INIT(v_int16x16, short, s16, epi16, short) +OPENCV_HAL_IMPL_AVX_INIT(v_uint32x8, unsigned, u32, epi32, int) +OPENCV_HAL_IMPL_AVX_INIT(v_int32x8, int, s32, epi32, int) +OPENCV_HAL_IMPL_AVX_INIT(v_uint64x4, uint64, u64, epi64x, int64) +OPENCV_HAL_IMPL_AVX_INIT(v_int64x4, int64, s64, epi64x, int64) + +#define OPENCV_HAL_IMPL_AVX_INIT_FLT(_Tpvec, _Tp, suffix, zsuffix, cast) \ + inline _Tpvec v256_setzero_##suffix() \ + { return _Tpvec(_mm256_setzero_##zsuffix()); } \ + inline _Tpvec v256_setall_##suffix(_Tp v) \ + { return _Tpvec(_mm256_set1_##zsuffix(v)); } \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint8x32, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int8x32, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint16x16, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int16x16, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint32x8, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int32x8, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint64x4, suffix, cast) \ + OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int64x4, suffix, cast) + +OPENCV_HAL_IMPL_AVX_INIT_FLT(v_float32x8, float, f32, ps, _mm256_castsi256_ps) +OPENCV_HAL_IMPL_AVX_INIT_FLT(v_float64x4, double, f64, pd, _mm256_castsi256_pd) + +inline v_float32x8 v_reinterpret_as_f32(const v_float32x8& a) +{ return a; } +inline v_float32x8 v_reinterpret_as_f32(const v_float64x4& a) +{ return v_float32x8(_mm256_castpd_ps(a.val)); } + +inline v_float64x4 v_reinterpret_as_f64(const v_float64x4& a) +{ return a; } +inline v_float64x4 v_reinterpret_as_f64(const v_float32x8& a) +{ return v_float64x4(_mm256_castps_pd(a.val)); } + +/* Recombine */ +/*#define OPENCV_HAL_IMPL_AVX_COMBINE(_Tpvec, perm) \ + inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(perm(a.val, b.val, 0x20)); } \ + inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(perm(a.val, b.val, 0x31)); } \ + inline void v_recombine(const _Tpvec& a, const _Tpvec& b, \ + _Tpvec& c, _Tpvec& d) \ + { c = v_combine_low(a, b); d = v_combine_high(a, b); } + +#define OPENCV_HAL_IMPL_AVX_UNPACKS(_Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_COMBINE(_Tpvec, _mm256_permute2x128_si256) \ + inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, \ + _Tpvec& b0, _Tpvec& b1) \ + { \ + __m256i v0 = _v256_shuffle_odd_64(a0.val); \ + __m256i v1 = _v256_shuffle_odd_64(a1.val); \ + b0.val = _mm256_unpacklo_##suffix(v0, v1); \ + b1.val = _mm256_unpackhi_##suffix(v0, v1); \ + } + +OPENCV_HAL_IMPL_AVX_UNPACKS(v_uint8x32, epi8) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_int8x32, epi8) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_uint16x16, epi16) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_int16x16, epi16) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_uint32x8, epi32) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_int32x8, epi32) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_uint64x4, epi64) +OPENCV_HAL_IMPL_AVX_UNPACKS(v_int64x4, epi64) +OPENCV_HAL_IMPL_AVX_COMBINE(v_float32x8, _mm256_permute2f128_ps) +OPENCV_HAL_IMPL_AVX_COMBINE(v_float64x4, _mm256_permute2f128_pd) + +inline void v_zip(const v_float32x8& a0, const v_float32x8& a1, v_float32x8& b0, v_float32x8& b1) +{ + __m256 v0 = _mm256_unpacklo_ps(a0.val, a1.val); + __m256 v1 = _mm256_unpackhi_ps(a0.val, a1.val); + v_recombine(v_float32x8(v0), v_float32x8(v1), b0, b1); +} + +inline void v_zip(const v_float64x4& a0, const v_float64x4& a1, v_float64x4& b0, v_float64x4& b1) +{ + __m256d v0 = _v_shuffle_odd_64(a0.val); + __m256d v1 = _v_shuffle_odd_64(a1.val); + b0.val = _mm256_unpacklo_pd(v0, v1); + b1.val = _mm256_unpackhi_pd(v0, v1); +}*/ + +//////////////// Variant Value reordering /////////////// + +// unpacks +#define OPENCV_HAL_IMPL_AVX_UNPACK(_Tpvec, suffix) \ + inline _Tpvec v256_unpacklo(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_unpacklo_##suffix(a.val, b.val)); } \ + inline _Tpvec v256_unpackhi(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_unpackhi_##suffix(a.val, b.val)); } + +OPENCV_HAL_IMPL_AVX_UNPACK(v_uint8x32, epi8) +OPENCV_HAL_IMPL_AVX_UNPACK(v_int8x32, epi8) +OPENCV_HAL_IMPL_AVX_UNPACK(v_uint16x16, epi16) +OPENCV_HAL_IMPL_AVX_UNPACK(v_int16x16, epi16) +OPENCV_HAL_IMPL_AVX_UNPACK(v_uint32x8, epi32) +OPENCV_HAL_IMPL_AVX_UNPACK(v_int32x8, epi32) +OPENCV_HAL_IMPL_AVX_UNPACK(v_uint64x4, epi64) +OPENCV_HAL_IMPL_AVX_UNPACK(v_int64x4, epi64) +OPENCV_HAL_IMPL_AVX_UNPACK(v_float32x8, ps) +OPENCV_HAL_IMPL_AVX_UNPACK(v_float64x4, pd) + +// blend +#define OPENCV_HAL_IMPL_AVX_BLEND(_Tpvec, suffix) \ + template \ + inline _Tpvec v256_blend(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_blend_##suffix(a.val, b.val, m)); } + +OPENCV_HAL_IMPL_AVX_BLEND(v_uint16x16, epi16) +OPENCV_HAL_IMPL_AVX_BLEND(v_int16x16, epi16) +OPENCV_HAL_IMPL_AVX_BLEND(v_uint32x8, epi32) +OPENCV_HAL_IMPL_AVX_BLEND(v_int32x8, epi32) +OPENCV_HAL_IMPL_AVX_BLEND(v_float32x8, ps) +OPENCV_HAL_IMPL_AVX_BLEND(v_float64x4, pd) + +template +inline v_uint64x4 v256_blend(const v_uint64x4& a, const v_uint64x4& b) +{ + enum {M0 = m}; + enum {M1 = (M0 | (M0 << 2)) & 0x33}; + enum {M2 = (M1 | (M1 << 1)) & 0x55}; + enum {MM = M2 | (M2 << 1)}; + return v_uint64x4(_mm256_blend_epi32(a.val, b.val, MM)); +} +template +inline v_int64x4 v256_blend(const v_int64x4& a, const v_int64x4& b) +{ return v_int64x4(v256_blend(v_uint64x4(a.val), v_uint64x4(b.val)).val); } + +// shuffle +// todo: emulate 64bit +#define OPENCV_HAL_IMPL_AVX_SHUFFLE(_Tpvec, intrin) \ + template \ + inline _Tpvec v256_shuffle(const _Tpvec& a) \ + { return _Tpvec(_mm256_##intrin(a.val, m)); } + +OPENCV_HAL_IMPL_AVX_SHUFFLE(v_uint32x8, shuffle_epi32) +OPENCV_HAL_IMPL_AVX_SHUFFLE(v_int32x8, shuffle_epi32) +OPENCV_HAL_IMPL_AVX_SHUFFLE(v_float32x8, permute_ps) +OPENCV_HAL_IMPL_AVX_SHUFFLE(v_float64x4, permute_pd) + +template +inline void v256_zip(const _Tpvec& a, const _Tpvec& b, _Tpvec& ab0, _Tpvec& ab1) +{ + ab0 = v256_unpacklo(a, b); + ab1 = v256_unpackhi(a, b); +} + +template +inline _Tpvec v256_combine_diagonal(const _Tpvec& a, const _Tpvec& b) +{ return _Tpvec(_mm256_blend_epi32(a.val, b.val, 0xf0)); } + +inline v_float32x8 v256_combine_diagonal(const v_float32x8& a, const v_float32x8& b) +{ return v256_blend<0xf0>(a, b); } + +inline v_float64x4 v256_combine_diagonal(const v_float64x4& a, const v_float64x4& b) +{ return v256_blend<0xc>(a, b); } + +template +inline _Tpvec v256_alignr_128(const _Tpvec& a, const _Tpvec& b) +{ return v256_permute2x128<0x21>(a, b); } + +template +inline _Tpvec v256_alignr_64(const _Tpvec& a, const _Tpvec& b) +{ return _Tpvec(_mm256_alignr_epi8(a.val, b.val, 8)); } +inline v_float64x4 v256_alignr_64(const v_float64x4& a, const v_float64x4& b) +{ return v_float64x4(_mm256_shuffle_pd(b.val, a.val, _MM_SHUFFLE(0, 0, 1, 1))); } +// todo: emulate float32 + +template +inline _Tpvec v256_swap_halves(const _Tpvec& a) +{ return v256_permute2x128<1>(a, a); } + +template +inline _Tpvec v256_reverse_64(const _Tpvec& a) +{ return v256_permute4x64<_MM_SHUFFLE(0, 1, 2, 3)>(a); } + +// ZIP +#define OPENCV_HAL_IMPL_AVX_ZIP(_Tpvec) \ + inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ + { return v256_permute2x128<0x20>(a, b); } \ + inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ + { return v256_permute2x128<0x31>(a, b); } \ + inline void v_recombine(const _Tpvec& a, const _Tpvec& b, \ + _Tpvec& c, _Tpvec& d) \ + { \ + _Tpvec a1b0 = v256_alignr_128(a, b); \ + c = v256_combine_diagonal(a, a1b0); \ + d = v256_combine_diagonal(a1b0, b); \ + } \ + inline void v_zip(const _Tpvec& a, const _Tpvec& b, \ + _Tpvec& ab0, _Tpvec& ab1) \ + { \ + _Tpvec ab0ab2, ab1ab3; \ + v256_zip(a, b, ab0ab2, ab1ab3); \ + v_recombine(ab0ab2, ab1ab3, ab0, ab1); \ + } + +OPENCV_HAL_IMPL_AVX_ZIP(v_uint8x32) +OPENCV_HAL_IMPL_AVX_ZIP(v_int8x32) +OPENCV_HAL_IMPL_AVX_ZIP(v_uint16x16) +OPENCV_HAL_IMPL_AVX_ZIP(v_int16x16) +OPENCV_HAL_IMPL_AVX_ZIP(v_uint32x8) +OPENCV_HAL_IMPL_AVX_ZIP(v_int32x8) +OPENCV_HAL_IMPL_AVX_ZIP(v_uint64x4) +OPENCV_HAL_IMPL_AVX_ZIP(v_int64x4) +OPENCV_HAL_IMPL_AVX_ZIP(v_float32x8) +OPENCV_HAL_IMPL_AVX_ZIP(v_float64x4) + +////////// Arithmetic, bitwise and comparison operations ///////// + +/* Element-wise binary and unary operations */ + +/** Arithmetics **/ +#define OPENCV_HAL_IMPL_AVX_BIN_OP(bin_op, _Tpvec, intrin) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(intrin(a.val, b.val)); } \ + inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ + { a.val = intrin(a.val, b.val); return a; } + +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_uint8x32, _mm256_adds_epu8) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_uint8x32, _mm256_subs_epu8) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_int8x32, _mm256_adds_epi8) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_int8x32, _mm256_subs_epi8) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_uint16x16, _mm256_adds_epu16) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_uint16x16, _mm256_subs_epu16) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_int16x16, _mm256_adds_epi16) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_int16x16, _mm256_subs_epi16) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_uint32x8, _mm256_add_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_uint32x8, _mm256_sub_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(*, v_uint32x8, _mm256_mullo_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_int32x8, _mm256_add_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_int32x8, _mm256_sub_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(*, v_int32x8, _mm256_mullo_epi32) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_uint64x4, _mm256_add_epi64) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_uint64x4, _mm256_sub_epi64) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_int64x4, _mm256_add_epi64) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_int64x4, _mm256_sub_epi64) + +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_float32x8, _mm256_add_ps) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_float32x8, _mm256_sub_ps) +OPENCV_HAL_IMPL_AVX_BIN_OP(*, v_float32x8, _mm256_mul_ps) +OPENCV_HAL_IMPL_AVX_BIN_OP(/, v_float32x8, _mm256_div_ps) +OPENCV_HAL_IMPL_AVX_BIN_OP(+, v_float64x4, _mm256_add_pd) +OPENCV_HAL_IMPL_AVX_BIN_OP(-, v_float64x4, _mm256_sub_pd) +OPENCV_HAL_IMPL_AVX_BIN_OP(*, v_float64x4, _mm256_mul_pd) +OPENCV_HAL_IMPL_AVX_BIN_OP(/, v_float64x4, _mm256_div_pd) + +// saturating multiply 8-bit, 16-bit +inline v_uint8x32 operator * (const v_uint8x32& a, const v_uint8x32& b) +{ + v_uint16x16 c, d; + v_mul_expand(a, b, c, d); + return v_pack(c, d); +} +inline v_int8x32 operator * (const v_int8x32& a, const v_int8x32& b) +{ + v_int16x16 c, d; + v_mul_expand(a, b, c, d); + return v_pack(c, d); +} +inline v_uint16x16 operator * (const v_uint16x16& a, const v_uint16x16& b) +{ + __m256i pl = _mm256_mullo_epi16(a.val, b.val); + __m256i ph = _mm256_mulhi_epu16(a.val, b.val); + __m256i p0 = _mm256_unpacklo_epi16(pl, ph); + __m256i p1 = _mm256_unpackhi_epi16(pl, ph); + return v_uint16x16(_v256_packs_epu32(p0, p1)); +} +inline v_int16x16 operator * (const v_int16x16& a, const v_int16x16& b) +{ + __m256i pl = _mm256_mullo_epi16(a.val, b.val); + __m256i ph = _mm256_mulhi_epi16(a.val, b.val); + __m256i p0 = _mm256_unpacklo_epi16(pl, ph); + __m256i p1 = _mm256_unpackhi_epi16(pl, ph); + return v_int16x16(_mm256_packs_epi32(p0, p1)); +} +inline v_uint8x32& operator *= (v_uint8x32& a, const v_uint8x32& b) +{ a = a * b; return a; } +inline v_int8x32& operator *= (v_int8x32& a, const v_int8x32& b) +{ a = a * b; return a; } +inline v_uint16x16& operator *= (v_uint16x16& a, const v_uint16x16& b) +{ a = a * b; return a; } +inline v_int16x16& operator *= (v_int16x16& a, const v_int16x16& b) +{ a = a * b; return a; } + +/** Non-saturating arithmetics **/ +#define OPENCV_HAL_IMPL_AVX_BIN_FUNC(func, _Tpvec, intrin) \ + inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(intrin(a.val, b.val)); } + +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_add_wrap, v_uint8x32, _mm256_add_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_add_wrap, v_int8x32, _mm256_add_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_add_wrap, v_uint16x16, _mm256_add_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_add_wrap, v_int16x16, _mm256_add_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_sub_wrap, v_uint8x32, _mm256_sub_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_sub_wrap, v_int8x32, _mm256_sub_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_sub_wrap, v_uint16x16, _mm256_sub_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_sub_wrap, v_int16x16, _mm256_sub_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_mul_wrap, v_uint16x16, _mm256_mullo_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_mul_wrap, v_int16x16, _mm256_mullo_epi16) + +inline v_uint8x32 v_mul_wrap(const v_uint8x32& a, const v_uint8x32& b) +{ + __m256i ad = _mm256_srai_epi16(a.val, 8); + __m256i bd = _mm256_srai_epi16(b.val, 8); + __m256i p0 = _mm256_mullo_epi16(a.val, b.val); // even + __m256i p1 = _mm256_slli_epi16(_mm256_mullo_epi16(ad, bd), 8); // odd + + const __m256i b01 = _mm256_set1_epi32(0xFF00FF00); + return v_uint8x32(_mm256_blendv_epi8(p0, p1, b01)); +} +inline v_int8x32 v_mul_wrap(const v_int8x32& a, const v_int8x32& b) +{ + return v_reinterpret_as_s8(v_mul_wrap(v_reinterpret_as_u8(a), v_reinterpret_as_u8(b))); +} + +// Multiply and expand +inline void v_mul_expand(const v_uint8x32& a, const v_uint8x32& b, + v_uint16x16& c, v_uint16x16& d) +{ + v_uint16x16 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int8x32& a, const v_int8x32& b, + v_int16x16& c, v_int16x16& d) +{ + v_int16x16 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int16x16& a, const v_int16x16& b, + v_int32x8& c, v_int32x8& d) +{ + v_int16x16 vhi = v_int16x16(_mm256_mulhi_epi16(a.val, b.val)); + + v_int16x16 v0, v1; + v_zip(v_mul_wrap(a, b), vhi, v0, v1); + + c = v_reinterpret_as_s32(v0); + d = v_reinterpret_as_s32(v1); +} + +inline void v_mul_expand(const v_uint16x16& a, const v_uint16x16& b, + v_uint32x8& c, v_uint32x8& d) +{ + v_uint16x16 vhi = v_uint16x16(_mm256_mulhi_epu16(a.val, b.val)); + + v_uint16x16 v0, v1; + v_zip(v_mul_wrap(a, b), vhi, v0, v1); + + c = v_reinterpret_as_u32(v0); + d = v_reinterpret_as_u32(v1); +} + +inline void v_mul_expand(const v_uint32x8& a, const v_uint32x8& b, + v_uint64x4& c, v_uint64x4& d) +{ + __m256i v0 = _mm256_mul_epu32(a.val, b.val); + __m256i v1 = _mm256_mul_epu32(_mm256_srli_epi64(a.val, 32), _mm256_srli_epi64(b.val, 32)); + v_zip(v_uint64x4(v0), v_uint64x4(v1), c, d); +} + +inline v_int16x16 v_mul_hi(const v_int16x16& a, const v_int16x16& b) { return v_int16x16(_mm256_mulhi_epi16(a.val, b.val)); } +inline v_uint16x16 v_mul_hi(const v_uint16x16& a, const v_uint16x16& b) { return v_uint16x16(_mm256_mulhi_epu16(a.val, b.val)); } + +/** Bitwise shifts **/ +#define OPENCV_HAL_IMPL_AVX_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, srai) \ + inline _Tpuvec operator << (const _Tpuvec& a, int imm) \ + { return _Tpuvec(_mm256_slli_##suffix(a.val, imm)); } \ + inline _Tpsvec operator << (const _Tpsvec& a, int imm) \ + { return _Tpsvec(_mm256_slli_##suffix(a.val, imm)); } \ + inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \ + { return _Tpuvec(_mm256_srli_##suffix(a.val, imm)); } \ + inline _Tpsvec operator >> (const _Tpsvec& a, int imm) \ + { return _Tpsvec(srai(a.val, imm)); } \ + template \ + inline _Tpuvec v_shl(const _Tpuvec& a) \ + { return _Tpuvec(_mm256_slli_##suffix(a.val, imm)); } \ + template \ + inline _Tpsvec v_shl(const _Tpsvec& a) \ + { return _Tpsvec(_mm256_slli_##suffix(a.val, imm)); } \ + template \ + inline _Tpuvec v_shr(const _Tpuvec& a) \ + { return _Tpuvec(_mm256_srli_##suffix(a.val, imm)); } \ + template \ + inline _Tpsvec v_shr(const _Tpsvec& a) \ + { return _Tpsvec(srai(a.val, imm)); } + +OPENCV_HAL_IMPL_AVX_SHIFT_OP(v_uint16x16, v_int16x16, epi16, _mm256_srai_epi16) +OPENCV_HAL_IMPL_AVX_SHIFT_OP(v_uint32x8, v_int32x8, epi32, _mm256_srai_epi32) + +inline __m256i _mm256_srai_epi64xx(const __m256i a, int imm) +{ + __m256i d = _mm256_set1_epi64x((int64)1 << 63); + __m256i r = _mm256_srli_epi64(_mm256_add_epi64(a, d), imm); + return _mm256_sub_epi64(r, _mm256_srli_epi64(d, imm)); +} +OPENCV_HAL_IMPL_AVX_SHIFT_OP(v_uint64x4, v_int64x4, epi64, _mm256_srai_epi64xx) + + +/** Bitwise logic **/ +#define OPENCV_HAL_IMPL_AVX_LOGIC_OP(_Tpvec, suffix, not_const) \ + OPENCV_HAL_IMPL_AVX_BIN_OP(&, _Tpvec, _mm256_and_##suffix) \ + OPENCV_HAL_IMPL_AVX_BIN_OP(|, _Tpvec, _mm256_or_##suffix) \ + OPENCV_HAL_IMPL_AVX_BIN_OP(^, _Tpvec, _mm256_xor_##suffix) \ + inline _Tpvec operator ~ (const _Tpvec& a) \ + { return _Tpvec(_mm256_xor_##suffix(a.val, not_const)); } + +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_uint8x32, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_int8x32, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_uint16x16, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_int16x16, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_uint32x8, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_int32x8, si256, _mm256_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_uint64x4, si256, _mm256_set1_epi64x(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_int64x4, si256, _mm256_set1_epi64x(-1)) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_float32x8, ps, _mm256_castsi256_ps(_mm256_set1_epi32(-1))) +OPENCV_HAL_IMPL_AVX_LOGIC_OP(v_float64x4, pd, _mm256_castsi256_pd(_mm256_set1_epi32(-1))) + +/** Select **/ +#define OPENCV_HAL_IMPL_AVX_SELECT(_Tpvec, suffix) \ + inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_blendv_##suffix(b.val, a.val, mask.val)); } + +OPENCV_HAL_IMPL_AVX_SELECT(v_uint8x32, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_int8x32, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_uint16x16, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_int16x16, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_uint32x8, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_int32x8, epi8) +OPENCV_HAL_IMPL_AVX_SELECT(v_float32x8, ps) +OPENCV_HAL_IMPL_AVX_SELECT(v_float64x4, pd) + +/** Comparison **/ +#define OPENCV_HAL_IMPL_AVX_CMP_OP_OV(_Tpvec) \ + inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ + { return ~(a == b); } \ + inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ + { return b > a; } \ + inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ + { return ~(a < b); } \ + inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ + { return b >= a; } + +#define OPENCV_HAL_IMPL_AVX_CMP_OP_INT(_Tpuvec, _Tpsvec, suffix, sbit) \ + inline _Tpuvec operator == (const _Tpuvec& a, const _Tpuvec& b) \ + { return _Tpuvec(_mm256_cmpeq_##suffix(a.val, b.val)); } \ + inline _Tpuvec operator > (const _Tpuvec& a, const _Tpuvec& b) \ + { \ + __m256i smask = _mm256_set1_##suffix(sbit); \ + return _Tpuvec(_mm256_cmpgt_##suffix( \ + _mm256_xor_si256(a.val, smask), \ + _mm256_xor_si256(b.val, smask))); \ + } \ + inline _Tpsvec operator == (const _Tpsvec& a, const _Tpsvec& b) \ + { return _Tpsvec(_mm256_cmpeq_##suffix(a.val, b.val)); } \ + inline _Tpsvec operator > (const _Tpsvec& a, const _Tpsvec& b) \ + { return _Tpsvec(_mm256_cmpgt_##suffix(a.val, b.val)); } \ + OPENCV_HAL_IMPL_AVX_CMP_OP_OV(_Tpuvec) \ + OPENCV_HAL_IMPL_AVX_CMP_OP_OV(_Tpsvec) + +OPENCV_HAL_IMPL_AVX_CMP_OP_INT(v_uint8x32, v_int8x32, epi8, (char)-128) +OPENCV_HAL_IMPL_AVX_CMP_OP_INT(v_uint16x16, v_int16x16, epi16, (short)-32768) +OPENCV_HAL_IMPL_AVX_CMP_OP_INT(v_uint32x8, v_int32x8, epi32, (int)0x80000000) + +#define OPENCV_HAL_IMPL_AVX_CMP_OP_64BIT(_Tpvec) \ + inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_cmpeq_epi64(a.val, b.val)); } \ + inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ + { return ~(a == b); } + +OPENCV_HAL_IMPL_AVX_CMP_OP_64BIT(v_uint64x4) +OPENCV_HAL_IMPL_AVX_CMP_OP_64BIT(v_int64x4) + +#define OPENCV_HAL_IMPL_AVX_CMP_FLT(bin_op, imm8, _Tpvec, suffix) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm256_cmp_##suffix(a.val, b.val, imm8)); } + +#define OPENCV_HAL_IMPL_AVX_CMP_OP_FLT(_Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(==, _CMP_EQ_OQ, _Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(!=, _CMP_NEQ_OQ, _Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(<, _CMP_LT_OQ, _Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(>, _CMP_GT_OQ, _Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(<=, _CMP_LE_OQ, _Tpvec, suffix) \ + OPENCV_HAL_IMPL_AVX_CMP_FLT(>=, _CMP_GE_OQ, _Tpvec, suffix) + +OPENCV_HAL_IMPL_AVX_CMP_OP_FLT(v_float32x8, ps) +OPENCV_HAL_IMPL_AVX_CMP_OP_FLT(v_float64x4, pd) + +inline v_float32x8 v_not_nan(const v_float32x8& a) +{ return v_float32x8(_mm256_cmp_ps(a.val, a.val, _CMP_ORD_Q)); } +inline v_float64x4 v_not_nan(const v_float64x4& a) +{ return v_float64x4(_mm256_cmp_pd(a.val, a.val, _CMP_ORD_Q)); } + +/** min/max **/ +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_uint8x32, _mm256_min_epu8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_uint8x32, _mm256_max_epu8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_int8x32, _mm256_min_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_int8x32, _mm256_max_epi8) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_uint16x16, _mm256_min_epu16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_uint16x16, _mm256_max_epu16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_int16x16, _mm256_min_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_int16x16, _mm256_max_epi16) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_uint32x8, _mm256_min_epu32) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_uint32x8, _mm256_max_epu32) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_int32x8, _mm256_min_epi32) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_int32x8, _mm256_max_epi32) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_float32x8, _mm256_min_ps) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_float32x8, _mm256_max_ps) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_min, v_float64x4, _mm256_min_pd) +OPENCV_HAL_IMPL_AVX_BIN_FUNC(v_max, v_float64x4, _mm256_max_pd) + +/** Rotate **/ +template +inline v_uint8x32 v_rotate_left(const v_uint8x32& a, const v_uint8x32& b) +{ + enum {IMM_R = (16 - imm) & 0xFF}; + enum {IMM_R2 = (32 - imm) & 0xFF}; + + if (imm == 0) return a; + if (imm == 32) return b; + if (imm > 32) return v_uint8x32(); + + __m256i swap = _mm256_permute2x128_si256(a.val, b.val, 0x03); + if (imm == 16) return v_uint8x32(swap); + if (imm < 16) return v_uint8x32(_mm256_alignr_epi8(a.val, swap, IMM_R)); + return v_uint8x32(_mm256_alignr_epi8(swap, b.val, IMM_R2)); // imm < 32 +} + +template +inline v_uint8x32 v_rotate_right(const v_uint8x32& a, const v_uint8x32& b) +{ + enum {IMM_L = (imm - 16) & 0xFF}; + + if (imm == 0) return a; + if (imm == 32) return b; + if (imm > 32) return v_uint8x32(); + + __m256i swap = _mm256_permute2x128_si256(a.val, b.val, 0x21); + if (imm == 16) return v_uint8x32(swap); + if (imm < 16) return v_uint8x32(_mm256_alignr_epi8(swap, a.val, imm)); + return v_uint8x32(_mm256_alignr_epi8(b.val, swap, IMM_L)); +} + +template +inline v_uint8x32 v_rotate_left(const v_uint8x32& a) +{ + enum {IMM_L = (imm - 16) & 0xFF}; + enum {IMM_R = (16 - imm) & 0xFF}; + + if (imm == 0) return a; + if (imm > 32) return v_uint8x32(); + + // ESAC control[3] ? [127:0] = 0 + __m256i swapz = _mm256_permute2x128_si256(a.val, a.val, _MM_SHUFFLE(0, 0, 2, 0)); + if (imm == 16) return v_uint8x32(swapz); + if (imm < 16) return v_uint8x32(_mm256_alignr_epi8(a.val, swapz, IMM_R)); + return v_uint8x32(_mm256_slli_si256(swapz, IMM_L)); +} + +template +inline v_uint8x32 v_rotate_right(const v_uint8x32& a) +{ + enum {IMM_L = (imm - 16) & 0xFF}; + + if (imm == 0) return a; + if (imm > 32) return v_uint8x32(); + + // ESAC control[3] ? [127:0] = 0 + __m256i swapz = _mm256_permute2x128_si256(a.val, a.val, _MM_SHUFFLE(2, 0, 0, 1)); + if (imm == 16) return v_uint8x32(swapz); + if (imm < 16) return v_uint8x32(_mm256_alignr_epi8(swapz, a.val, imm)); + return v_uint8x32(_mm256_srli_si256(swapz, IMM_L)); +} + +#define OPENCV_HAL_IMPL_AVX_ROTATE_CAST(intrin, _Tpvec, cast) \ + template \ + inline _Tpvec intrin(const _Tpvec& a, const _Tpvec& b) \ + { \ + enum {IMMxW = imm * sizeof(typename _Tpvec::lane_type)}; \ + v_uint8x32 ret = intrin(v_reinterpret_as_u8(a), \ + v_reinterpret_as_u8(b)); \ + return _Tpvec(cast(ret.val)); \ + } \ + template \ + inline _Tpvec intrin(const _Tpvec& a) \ + { \ + enum {IMMxW = imm * sizeof(typename _Tpvec::lane_type)}; \ + v_uint8x32 ret = intrin(v_reinterpret_as_u8(a)); \ + return _Tpvec(cast(ret.val)); \ + } + +#define OPENCV_HAL_IMPL_AVX_ROTATE(_Tpvec) \ + OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_left, _Tpvec, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_right, _Tpvec, OPENCV_HAL_NOP) + +OPENCV_HAL_IMPL_AVX_ROTATE(v_int8x32) +OPENCV_HAL_IMPL_AVX_ROTATE(v_uint16x16) +OPENCV_HAL_IMPL_AVX_ROTATE(v_int16x16) +OPENCV_HAL_IMPL_AVX_ROTATE(v_uint32x8) +OPENCV_HAL_IMPL_AVX_ROTATE(v_int32x8) +OPENCV_HAL_IMPL_AVX_ROTATE(v_uint64x4) +OPENCV_HAL_IMPL_AVX_ROTATE(v_int64x4) + +OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_left, v_float32x8, _mm256_castsi256_ps) +OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_right, v_float32x8, _mm256_castsi256_ps) +OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_left, v_float64x4, _mm256_castsi256_pd) +OPENCV_HAL_IMPL_AVX_ROTATE_CAST(v_rotate_right, v_float64x4, _mm256_castsi256_pd) + +/** Reverse **/ +inline v_uint8x32 v_reverse(const v_uint8x32 &a) +{ + static const __m256i perm = _mm256_setr_epi8( + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __m256i vec = _mm256_shuffle_epi8(a.val, perm); + return v_uint8x32(_mm256_permute2x128_si256(vec, vec, 1)); +} + +inline v_int8x32 v_reverse(const v_int8x32 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x16 v_reverse(const v_uint16x16 &a) +{ + static const __m256i perm = _mm256_setr_epi8( + 14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1, + 14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1); + __m256i vec = _mm256_shuffle_epi8(a.val, perm); + return v_uint16x16(_mm256_permute2x128_si256(vec, vec, 1)); +} + +inline v_int16x16 v_reverse(const v_int16x16 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x8 v_reverse(const v_uint32x8 &a) +{ + static const __m256i perm = _mm256_setr_epi32(7, 6, 5, 4, 3, 2, 1, 0); + return v_uint32x8(_mm256_permutevar8x32_epi32(a.val, perm)); +} + +inline v_int32x8 v_reverse(const v_int32x8 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x8 v_reverse(const v_float32x8 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x4 v_reverse(const v_uint64x4 &a) +{ + return v_uint64x4(_mm256_permute4x64_epi64(a.val, _MM_SHUFFLE(0, 1, 2, 3))); +} + +inline v_int64x4 v_reverse(const v_int64x4 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x4 v_reverse(const v_float64x4 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + +////////// Reduce and mask ///////// + +/** Reduce **/ +inline unsigned v_reduce_sum(const v_uint8x32& a) +{ + __m256i half = _mm256_sad_epu8(a.val, _mm256_setzero_si256()); + __m128i quarter = _mm_add_epi32(_v256_extract_low(half), _v256_extract_high(half)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} +inline int v_reduce_sum(const v_int8x32& a) +{ + __m256i half = _mm256_sad_epu8(_mm256_xor_si256(a.val, _mm256_set1_epi8((schar)-128)), _mm256_setzero_si256()); + __m128i quarter = _mm_add_epi32(_v256_extract_low(half), _v256_extract_high(half)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))) - 4096; +} +#define OPENCV_HAL_IMPL_AVX_REDUCE_32(_Tpvec, sctype, func, intrin) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { \ + __m128i val = intrin(_v256_extract_low(a.val), _v256_extract_high(a.val)); \ + val = intrin(val, _mm_srli_si128(val,8)); \ + val = intrin(val, _mm_srli_si128(val,4)); \ + val = intrin(val, _mm_srli_si128(val,2)); \ + val = intrin(val, _mm_srli_si128(val,1)); \ + return (sctype)_mm_cvtsi128_si32(val); \ + } + +OPENCV_HAL_IMPL_AVX_REDUCE_32(v_uint8x32, uchar, min, _mm_min_epu8) +OPENCV_HAL_IMPL_AVX_REDUCE_32(v_int8x32, schar, min, _mm_min_epi8) +OPENCV_HAL_IMPL_AVX_REDUCE_32(v_uint8x32, uchar, max, _mm_max_epu8) +OPENCV_HAL_IMPL_AVX_REDUCE_32(v_int8x32, schar, max, _mm_max_epi8) + +#define OPENCV_HAL_IMPL_AVX_REDUCE_16(_Tpvec, sctype, func, intrin) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { \ + __m128i v0 = _v256_extract_low(a.val); \ + __m128i v1 = _v256_extract_high(a.val); \ + v0 = intrin(v0, v1); \ + v0 = intrin(v0, _mm_srli_si128(v0, 8)); \ + v0 = intrin(v0, _mm_srli_si128(v0, 4)); \ + v0 = intrin(v0, _mm_srli_si128(v0, 2)); \ + return (sctype) _mm_cvtsi128_si32(v0); \ + } + +OPENCV_HAL_IMPL_AVX_REDUCE_16(v_uint16x16, ushort, min, _mm_min_epu16) +OPENCV_HAL_IMPL_AVX_REDUCE_16(v_int16x16, short, min, _mm_min_epi16) +OPENCV_HAL_IMPL_AVX_REDUCE_16(v_uint16x16, ushort, max, _mm_max_epu16) +OPENCV_HAL_IMPL_AVX_REDUCE_16(v_int16x16, short, max, _mm_max_epi16) + +#define OPENCV_HAL_IMPL_AVX_REDUCE_8(_Tpvec, sctype, func, intrin) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { \ + __m128i v0 = _v256_extract_low(a.val); \ + __m128i v1 = _v256_extract_high(a.val); \ + v0 = intrin(v0, v1); \ + v0 = intrin(v0, _mm_srli_si128(v0, 8)); \ + v0 = intrin(v0, _mm_srli_si128(v0, 4)); \ + return (sctype) _mm_cvtsi128_si32(v0); \ + } + +OPENCV_HAL_IMPL_AVX_REDUCE_8(v_uint32x8, unsigned, min, _mm_min_epu32) +OPENCV_HAL_IMPL_AVX_REDUCE_8(v_int32x8, int, min, _mm_min_epi32) +OPENCV_HAL_IMPL_AVX_REDUCE_8(v_uint32x8, unsigned, max, _mm_max_epu32) +OPENCV_HAL_IMPL_AVX_REDUCE_8(v_int32x8, int, max, _mm_max_epi32) + +#define OPENCV_HAL_IMPL_AVX_REDUCE_FLT(func, intrin) \ + inline float v_reduce_##func(const v_float32x8& a) \ + { \ + __m128 v0 = _v256_extract_low(a.val); \ + __m128 v1 = _v256_extract_high(a.val); \ + v0 = intrin(v0, v1); \ + v0 = intrin(v0, _mm_permute_ps(v0, _MM_SHUFFLE(0, 0, 3, 2))); \ + v0 = intrin(v0, _mm_permute_ps(v0, _MM_SHUFFLE(0, 0, 0, 1))); \ + return _mm_cvtss_f32(v0); \ + } + +OPENCV_HAL_IMPL_AVX_REDUCE_FLT(min, _mm_min_ps) +OPENCV_HAL_IMPL_AVX_REDUCE_FLT(max, _mm_max_ps) + +inline int v_reduce_sum(const v_int32x8& a) +{ + __m256i s0 = _mm256_hadd_epi32(a.val, a.val); + s0 = _mm256_hadd_epi32(s0, s0); + + __m128i s1 = _v256_extract_high(s0); + s1 = _mm_add_epi32(_v256_extract_low(s0), s1); + + return _mm_cvtsi128_si32(s1); +} + +inline unsigned v_reduce_sum(const v_uint32x8& a) +{ return v_reduce_sum(v_reinterpret_as_s32(a)); } + +inline int v_reduce_sum(const v_int16x16& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } +inline unsigned v_reduce_sum(const v_uint16x16& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } + +inline float v_reduce_sum(const v_float32x8& a) +{ + __m256 s0 = _mm256_hadd_ps(a.val, a.val); + s0 = _mm256_hadd_ps(s0, s0); + + __m128 s1 = _v256_extract_high(s0); + s1 = _mm_add_ps(_v256_extract_low(s0), s1); + + return _mm_cvtss_f32(s1); +} + +inline uint64 v_reduce_sum(const v_uint64x4& a) +{ + uint64 CV_DECL_ALIGNED(32) idx[2]; + _mm_store_si128((__m128i*)idx, _mm_add_epi64(_v256_extract_low(a.val), _v256_extract_high(a.val))); + return idx[0] + idx[1]; +} +inline int64 v_reduce_sum(const v_int64x4& a) +{ + int64 CV_DECL_ALIGNED(32) idx[2]; + _mm_store_si128((__m128i*)idx, _mm_add_epi64(_v256_extract_low(a.val), _v256_extract_high(a.val))); + return idx[0] + idx[1]; +} +inline double v_reduce_sum(const v_float64x4& a) +{ + __m256d s0 = _mm256_hadd_pd(a.val, a.val); + return _mm_cvtsd_f64(_mm_add_pd(_v256_extract_low(s0), _v256_extract_high(s0))); +} + +inline v_float32x8 v_reduce_sum4(const v_float32x8& a, const v_float32x8& b, + const v_float32x8& c, const v_float32x8& d) +{ + __m256 ab = _mm256_hadd_ps(a.val, b.val); + __m256 cd = _mm256_hadd_ps(c.val, d.val); + return v_float32x8(_mm256_hadd_ps(ab, cd)); +} + +inline unsigned v_reduce_sad(const v_uint8x32& a, const v_uint8x32& b) +{ + __m256i half = _mm256_sad_epu8(a.val, b.val); + __m128i quarter = _mm_add_epi32(_v256_extract_low(half), _v256_extract_high(half)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} +inline unsigned v_reduce_sad(const v_int8x32& a, const v_int8x32& b) +{ + __m256i half = _mm256_set1_epi8(0x7f); + half = _mm256_sad_epu8(_mm256_add_epi8(a.val, half), _mm256_add_epi8(b.val, half)); + __m128i quarter = _mm_add_epi32(_v256_extract_low(half), _v256_extract_high(half)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} +inline unsigned v_reduce_sad(const v_uint16x16& a, const v_uint16x16& b) +{ + v_uint32x8 l, h; + v_expand(v_add_wrap(a - b, b - a), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_int16x16& a, const v_int16x16& b) +{ + v_uint32x8 l, h; + v_expand(v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_uint32x8& a, const v_uint32x8& b) +{ + return v_reduce_sum(v_max(a, b) - v_min(a, b)); +} +inline unsigned v_reduce_sad(const v_int32x8& a, const v_int32x8& b) +{ + v_int32x8 m = a < b; + return v_reduce_sum(v_reinterpret_as_u32(((a - b) ^ m) - m)); +} +inline float v_reduce_sad(const v_float32x8& a, const v_float32x8& b) +{ + return v_reduce_sum((a - b) & v_float32x8(_mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff)))); +} + +/** Popcount **/ +inline v_uint8x32 v_popcount(const v_uint8x32& a) +{ + __m256i _popcnt_table = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); + __m256i _popcnt_mask = _mm256_set1_epi8(0x0F); + return v_uint8x32(_mm256_add_epi8(_mm256_shuffle_epi8(_popcnt_table, _mm256_and_si256( a.val , _popcnt_mask)), + _mm256_shuffle_epi8(_popcnt_table, _mm256_and_si256(_mm256_srli_epi16(a.val, 4), _popcnt_mask)))); +} +inline v_uint16x16 v_popcount(const v_uint16x16& a) +{ + v_uint8x32 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + return v_reinterpret_as_u16(p) & v256_setall_u16(0x00ff); +} +inline v_uint32x8 v_popcount(const v_uint32x8& a) +{ + v_uint8x32 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + p += v_rotate_right<2>(p); + return v_reinterpret_as_u32(p) & v256_setall_u32(0x000000ff); +} +inline v_uint64x4 v_popcount(const v_uint64x4& a) +{ + return v_uint64x4(_mm256_sad_epu8(v_popcount(v_reinterpret_as_u8(a)).val, _mm256_setzero_si256())); +} +inline v_uint8x32 v_popcount(const v_int8x32& a) +{ return v_popcount(v_reinterpret_as_u8(a)); } +inline v_uint16x16 v_popcount(const v_int16x16& a) +{ return v_popcount(v_reinterpret_as_u16(a)); } +inline v_uint32x8 v_popcount(const v_int32x8& a) +{ return v_popcount(v_reinterpret_as_u32(a)); } +inline v_uint64x4 v_popcount(const v_int64x4& a) +{ return v_popcount(v_reinterpret_as_u64(a)); } + +/** Mask **/ +inline int v_signmask(const v_int8x32& a) +{ return _mm256_movemask_epi8(a.val); } +inline int v_signmask(const v_uint8x32& a) +{ return v_signmask(v_reinterpret_as_s8(a)); } + +inline int v_signmask(const v_int16x16& a) +{ return v_signmask(v_pack(a, a)) & 0xFFFF; } +inline int v_signmask(const v_uint16x16& a) +{ return v_signmask(v_reinterpret_as_s16(a)); } + +inline int v_signmask(const v_float32x8& a) +{ return _mm256_movemask_ps(a.val); } +inline int v_signmask(const v_float64x4& a) +{ return _mm256_movemask_pd(a.val); } + +inline int v_signmask(const v_int32x8& a) +{ return v_signmask(v_reinterpret_as_f32(a)); } +inline int v_signmask(const v_uint32x8& a) +{ return v_signmask(v_reinterpret_as_f32(a)); } + +inline int v_signmask(const v_int64x4& a) +{ return v_signmask(v_reinterpret_as_f64(a)); } +inline int v_signmask(const v_uint64x4& a) +{ return v_signmask(v_reinterpret_as_f64(a)); } + +inline int v_scan_forward(const v_int8x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_uint8x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_int16x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_uint16x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_int32x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_uint32x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_float32x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_int64x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_uint64x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_float64x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } + +/** Checks **/ +#define OPENCV_HAL_IMPL_AVX_CHECK(_Tpvec, allmask) \ + inline bool v_check_all(const _Tpvec& a) { return v_signmask(a) == allmask; } \ + inline bool v_check_any(const _Tpvec& a) { return v_signmask(a) != 0; } +OPENCV_HAL_IMPL_AVX_CHECK(v_uint8x32, -1) +OPENCV_HAL_IMPL_AVX_CHECK(v_int8x32, -1) +OPENCV_HAL_IMPL_AVX_CHECK(v_uint32x8, 255) +OPENCV_HAL_IMPL_AVX_CHECK(v_int32x8, 255) +OPENCV_HAL_IMPL_AVX_CHECK(v_uint64x4, 15) +OPENCV_HAL_IMPL_AVX_CHECK(v_int64x4, 15) +OPENCV_HAL_IMPL_AVX_CHECK(v_float32x8, 255) +OPENCV_HAL_IMPL_AVX_CHECK(v_float64x4, 15) + +#define OPENCV_HAL_IMPL_AVX_CHECK_SHORT(_Tpvec) \ + inline bool v_check_all(const _Tpvec& a) { return (v_signmask(v_reinterpret_as_s8(a)) & 0xaaaaaaaa) == 0xaaaaaaaa; } \ + inline bool v_check_any(const _Tpvec& a) { return (v_signmask(v_reinterpret_as_s8(a)) & 0xaaaaaaaa) != 0; } +OPENCV_HAL_IMPL_AVX_CHECK_SHORT(v_uint16x16) +OPENCV_HAL_IMPL_AVX_CHECK_SHORT(v_int16x16) + +////////// Other math ///////// + +/** Some frequent operations **/ +#define OPENCV_HAL_IMPL_AVX_MULADD(_Tpvec, suffix) \ + inline _Tpvec v_fma(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ + { return _Tpvec(_mm256_fmadd_##suffix(a.val, b.val, c.val)); } \ + inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ + { return _Tpvec(_mm256_fmadd_##suffix(a.val, b.val, c.val)); } \ + inline _Tpvec v_sqrt(const _Tpvec& x) \ + { return _Tpvec(_mm256_sqrt_##suffix(x.val)); } \ + inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ + { return v_fma(a, a, b * b); } \ + inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ + { return v_sqrt(v_fma(a, a, b*b)); } + +OPENCV_HAL_IMPL_AVX_MULADD(v_float32x8, ps) +OPENCV_HAL_IMPL_AVX_MULADD(v_float64x4, pd) + +inline v_int32x8 v_fma(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c) +{ + return a * b + c; +} + +inline v_int32x8 v_muladd(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c) +{ + return v_fma(a, b, c); +} + +inline v_float32x8 v_invsqrt(const v_float32x8& x) +{ + v_float32x8 half = x * v256_setall_f32(0.5); + v_float32x8 t = v_float32x8(_mm256_rsqrt_ps(x.val)); + // todo: _mm256_fnmsub_ps + t *= v256_setall_f32(1.5) - ((t * t) * half); + return t; +} + +inline v_float64x4 v_invsqrt(const v_float64x4& x) +{ + return v256_setall_f64(1.) / v_sqrt(x); +} + +/** Absolute values **/ +#define OPENCV_HAL_IMPL_AVX_ABS(_Tpvec, suffix) \ + inline v_u##_Tpvec v_abs(const v_##_Tpvec& x) \ + { return v_u##_Tpvec(_mm256_abs_##suffix(x.val)); } + +OPENCV_HAL_IMPL_AVX_ABS(int8x32, epi8) +OPENCV_HAL_IMPL_AVX_ABS(int16x16, epi16) +OPENCV_HAL_IMPL_AVX_ABS(int32x8, epi32) + +inline v_float32x8 v_abs(const v_float32x8& x) +{ return x & v_float32x8(_mm256_castsi256_ps(_mm256_set1_epi32(0x7fffffff))); } +inline v_float64x4 v_abs(const v_float64x4& x) +{ return x & v_float64x4(_mm256_castsi256_pd(_mm256_srli_epi64(_mm256_set1_epi64x(-1), 1))); } + +/** Absolute difference **/ +inline v_uint8x32 v_absdiff(const v_uint8x32& a, const v_uint8x32& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint16x16 v_absdiff(const v_uint16x16& a, const v_uint16x16& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint32x8 v_absdiff(const v_uint32x8& a, const v_uint32x8& b) +{ return v_max(a, b) - v_min(a, b); } + +inline v_uint8x32 v_absdiff(const v_int8x32& a, const v_int8x32& b) +{ + v_int8x32 d = v_sub_wrap(a, b); + v_int8x32 m = a < b; + return v_reinterpret_as_u8(v_sub_wrap(d ^ m, m)); +} + +inline v_uint16x16 v_absdiff(const v_int16x16& a, const v_int16x16& b) +{ return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))); } + +inline v_uint32x8 v_absdiff(const v_int32x8& a, const v_int32x8& b) +{ + v_int32x8 d = a - b; + v_int32x8 m = a < b; + return v_reinterpret_as_u32((d ^ m) - m); +} + +inline v_float32x8 v_absdiff(const v_float32x8& a, const v_float32x8& b) +{ return v_abs(a - b); } + +inline v_float64x4 v_absdiff(const v_float64x4& a, const v_float64x4& b) +{ return v_abs(a - b); } + +/** Saturating absolute difference **/ +inline v_int8x32 v_absdiffs(const v_int8x32& a, const v_int8x32& b) +{ + v_int8x32 d = a - b; + v_int8x32 m = a < b; + return (d ^ m) - m; +} +inline v_int16x16 v_absdiffs(const v_int16x16& a, const v_int16x16& b) +{ return v_max(a, b) - v_min(a, b); } + +////////// Conversions ///////// + +/** Rounding **/ +inline v_int32x8 v_round(const v_float32x8& a) +{ return v_int32x8(_mm256_cvtps_epi32(a.val)); } + +inline v_int32x8 v_round(const v_float64x4& a) +{ return v_int32x8(_mm256_castsi128_si256(_mm256_cvtpd_epi32(a.val))); } + +inline v_int32x8 v_round(const v_float64x4& a, const v_float64x4& b) +{ + __m128i ai = _mm256_cvtpd_epi32(a.val), bi = _mm256_cvtpd_epi32(b.val); + return v_int32x8(_v256_combine(ai, bi)); +} + +inline v_int32x8 v_trunc(const v_float32x8& a) +{ return v_int32x8(_mm256_cvttps_epi32(a.val)); } + +inline v_int32x8 v_trunc(const v_float64x4& a) +{ return v_int32x8(_mm256_castsi128_si256(_mm256_cvttpd_epi32(a.val))); } + +inline v_int32x8 v_floor(const v_float32x8& a) +{ return v_int32x8(_mm256_cvttps_epi32(_mm256_floor_ps(a.val))); } + +inline v_int32x8 v_floor(const v_float64x4& a) +{ return v_trunc(v_float64x4(_mm256_floor_pd(a.val))); } + +inline v_int32x8 v_ceil(const v_float32x8& a) +{ return v_int32x8(_mm256_cvttps_epi32(_mm256_ceil_ps(a.val))); } + +inline v_int32x8 v_ceil(const v_float64x4& a) +{ return v_trunc(v_float64x4(_mm256_ceil_pd(a.val))); } + +/** To float **/ +inline v_float32x8 v_cvt_f32(const v_int32x8& a) +{ return v_float32x8(_mm256_cvtepi32_ps(a.val)); } + +inline v_float32x8 v_cvt_f32(const v_float64x4& a) +{ return v_float32x8(_mm256_castps128_ps256(_mm256_cvtpd_ps(a.val))); } + +inline v_float32x8 v_cvt_f32(const v_float64x4& a, const v_float64x4& b) +{ + __m128 af = _mm256_cvtpd_ps(a.val), bf = _mm256_cvtpd_ps(b.val); + return v_float32x8(_v256_combine(af, bf)); +} + +inline v_float64x4 v_cvt_f64(const v_int32x8& a) +{ return v_float64x4(_mm256_cvtepi32_pd(_v256_extract_low(a.val))); } + +inline v_float64x4 v_cvt_f64_high(const v_int32x8& a) +{ return v_float64x4(_mm256_cvtepi32_pd(_v256_extract_high(a.val))); } + +inline v_float64x4 v_cvt_f64(const v_float32x8& a) +{ return v_float64x4(_mm256_cvtps_pd(_v256_extract_low(a.val))); } + +inline v_float64x4 v_cvt_f64_high(const v_float32x8& a) +{ return v_float64x4(_mm256_cvtps_pd(_v256_extract_high(a.val))); } + +// from (Mysticial and wim) https://stackoverflow.com/q/41144668 +inline v_float64x4 v_cvt_f64(const v_int64x4& v) +{ + // constants encoded as floating-point + __m256i magic_i_lo = _mm256_set1_epi64x(0x4330000000000000); // 2^52 + __m256i magic_i_hi32 = _mm256_set1_epi64x(0x4530000080000000); // 2^84 + 2^63 + __m256i magic_i_all = _mm256_set1_epi64x(0x4530000080100000); // 2^84 + 2^63 + 2^52 + __m256d magic_d_all = _mm256_castsi256_pd(magic_i_all); + + // Blend the 32 lowest significant bits of v with magic_int_lo + __m256i v_lo = _mm256_blend_epi32(magic_i_lo, v.val, 0x55); + // Extract the 32 most significant bits of v + __m256i v_hi = _mm256_srli_epi64(v.val, 32); + // Flip the msb of v_hi and blend with 0x45300000 + v_hi = _mm256_xor_si256(v_hi, magic_i_hi32); + // Compute in double precision + __m256d v_hi_dbl = _mm256_sub_pd(_mm256_castsi256_pd(v_hi), magic_d_all); + // (v_hi - magic_d_all) + v_lo Do not assume associativity of floating point addition + __m256d result = _mm256_add_pd(v_hi_dbl, _mm256_castsi256_pd(v_lo)); + return v_float64x4(result); +} + +////////////// Lookup table access //////////////////// + +inline v_int8x32 v256_lut(const schar* tab, const int* idx) +{ + return v_int8x32(_mm256_setr_epi8(tab[idx[ 0]], tab[idx[ 1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]], + tab[idx[ 8]], tab[idx[ 9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]], + tab[idx[16]], tab[idx[17]], tab[idx[18]], tab[idx[19]], tab[idx[20]], tab[idx[21]], tab[idx[22]], tab[idx[23]], + tab[idx[24]], tab[idx[25]], tab[idx[26]], tab[idx[27]], tab[idx[28]], tab[idx[29]], tab[idx[30]], tab[idx[31]])); +} +inline v_int8x32 v256_lut_pairs(const schar* tab, const int* idx) +{ + return v_int8x32(_mm256_setr_epi16(*(const short*)(tab + idx[ 0]), *(const short*)(tab + idx[ 1]), *(const short*)(tab + idx[ 2]), *(const short*)(tab + idx[ 3]), + *(const short*)(tab + idx[ 4]), *(const short*)(tab + idx[ 5]), *(const short*)(tab + idx[ 6]), *(const short*)(tab + idx[ 7]), + *(const short*)(tab + idx[ 8]), *(const short*)(tab + idx[ 9]), *(const short*)(tab + idx[10]), *(const short*)(tab + idx[11]), + *(const short*)(tab + idx[12]), *(const short*)(tab + idx[13]), *(const short*)(tab + idx[14]), *(const short*)(tab + idx[15]))); +} +inline v_int8x32 v256_lut_quads(const schar* tab, const int* idx) +{ + return v_int8x32(_mm256_i32gather_epi32((const int*)tab, _mm256_loadu_si256((const __m256i*)idx), 1)); +} +inline v_uint8x32 v256_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v256_lut((const schar *)tab, idx)); } +inline v_uint8x32 v256_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v256_lut_pairs((const schar *)tab, idx)); } +inline v_uint8x32 v256_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v256_lut_quads((const schar *)tab, idx)); } + +inline v_int16x16 v256_lut(const short* tab, const int* idx) +{ + return v_int16x16(_mm256_setr_epi16(tab[idx[0]], tab[idx[1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]], + tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]])); +} +inline v_int16x16 v256_lut_pairs(const short* tab, const int* idx) +{ + return v_int16x16(_mm256_i32gather_epi32((const int*)tab, _mm256_loadu_si256((const __m256i*)idx), 2)); +} +inline v_int16x16 v256_lut_quads(const short* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int16x16(_mm256_i32gather_epi64((const long long int*)tab, _mm_loadu_si128((const __m128i*)idx), 2));//Looks like intrinsic has wrong definition +#else + return v_int16x16(_mm256_i32gather_epi64((const int64*)tab, _mm_loadu_si128((const __m128i*)idx), 2)); +#endif +} +inline v_uint16x16 v256_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v256_lut((const short *)tab, idx)); } +inline v_uint16x16 v256_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v256_lut_pairs((const short *)tab, idx)); } +inline v_uint16x16 v256_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v256_lut_quads((const short *)tab, idx)); } + +inline v_int32x8 v256_lut(const int* tab, const int* idx) +{ + return v_int32x8(_mm256_i32gather_epi32(tab, _mm256_loadu_si256((const __m256i*)idx), 4)); +} +inline v_int32x8 v256_lut_pairs(const int* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int32x8(_mm256_i32gather_epi64((const long long int*)tab, _mm_loadu_si128((const __m128i*)idx), 4)); +#else + return v_int32x8(_mm256_i32gather_epi64((const int64*)tab, _mm_loadu_si128((const __m128i*)idx), 4)); +#endif +} +inline v_int32x8 v256_lut_quads(const int* tab, const int* idx) +{ + return v_int32x8(_v256_combine(_mm_loadu_si128((const __m128i*)(tab + idx[0])), _mm_loadu_si128((const __m128i*)(tab + idx[1])))); +} +inline v_uint32x8 v256_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v256_lut((const int *)tab, idx)); } +inline v_uint32x8 v256_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v256_lut_pairs((const int *)tab, idx)); } +inline v_uint32x8 v256_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v256_lut_quads((const int *)tab, idx)); } + +inline v_int64x4 v256_lut(const int64* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int64x4(_mm256_i32gather_epi64((const long long int*)tab, _mm_loadu_si128((const __m128i*)idx), 8)); +#else + return v_int64x4(_mm256_i32gather_epi64(tab, _mm_loadu_si128((const __m128i*)idx), 8)); +#endif +} +inline v_int64x4 v256_lut_pairs(const int64* tab, const int* idx) +{ + return v_int64x4(_v256_combine(_mm_loadu_si128((const __m128i*)(tab + idx[0])), _mm_loadu_si128((const __m128i*)(tab + idx[1])))); +} +inline v_uint64x4 v256_lut(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v256_lut((const int64 *)tab, idx)); } +inline v_uint64x4 v256_lut_pairs(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v256_lut_pairs((const int64 *)tab, idx)); } + +inline v_float32x8 v256_lut(const float* tab, const int* idx) +{ + return v_float32x8(_mm256_i32gather_ps(tab, _mm256_loadu_si256((const __m256i*)idx), 4)); +} +inline v_float32x8 v256_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v256_lut_pairs((const int *)tab, idx)); } +inline v_float32x8 v256_lut_quads(const float* tab, const int* idx) { return v_reinterpret_as_f32(v256_lut_quads((const int *)tab, idx)); } + +inline v_float64x4 v256_lut(const double* tab, const int* idx) +{ + return v_float64x4(_mm256_i32gather_pd(tab, _mm_loadu_si128((const __m128i*)idx), 8)); +} +inline v_float64x4 v256_lut_pairs(const double* tab, const int* idx) { return v_float64x4(_v256_combine(_mm_loadu_pd(tab + idx[0]), _mm_loadu_pd(tab + idx[1]))); } + +inline v_int32x8 v_lut(const int* tab, const v_int32x8& idxvec) +{ + return v_int32x8(_mm256_i32gather_epi32(tab, idxvec.val, 4)); +} + +inline v_uint32x8 v_lut(const unsigned* tab, const v_int32x8& idxvec) +{ + return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec)); +} + +inline v_float32x8 v_lut(const float* tab, const v_int32x8& idxvec) +{ + return v_float32x8(_mm256_i32gather_ps(tab, idxvec.val, 4)); +} + +inline v_float64x4 v_lut(const double* tab, const v_int32x8& idxvec) +{ + return v_float64x4(_mm256_i32gather_pd(tab, _mm256_castsi256_si128(idxvec.val), 8)); +} + +inline void v_lut_deinterleave(const float* tab, const v_int32x8& idxvec, v_float32x8& x, v_float32x8& y) +{ + int CV_DECL_ALIGNED(32) idx[8]; + v_store_aligned(idx, idxvec); + __m128 z = _mm_setzero_ps(); + __m128 xy01, xy45, xy23, xy67; + xy01 = _mm_loadl_pi(z, (const __m64*)(tab + idx[0])); + xy01 = _mm_loadh_pi(xy01, (const __m64*)(tab + idx[1])); + xy45 = _mm_loadl_pi(z, (const __m64*)(tab + idx[4])); + xy45 = _mm_loadh_pi(xy45, (const __m64*)(tab + idx[5])); + __m256 xy0145 = _v256_combine(xy01, xy45); + xy23 = _mm_loadl_pi(z, (const __m64*)(tab + idx[2])); + xy23 = _mm_loadh_pi(xy23, (const __m64*)(tab + idx[3])); + xy67 = _mm_loadl_pi(z, (const __m64*)(tab + idx[6])); + xy67 = _mm_loadh_pi(xy67, (const __m64*)(tab + idx[7])); + __m256 xy2367 = _v256_combine(xy23, xy67); + + __m256 xxyy0145 = _mm256_unpacklo_ps(xy0145, xy2367); + __m256 xxyy2367 = _mm256_unpackhi_ps(xy0145, xy2367); + + x = v_float32x8(_mm256_unpacklo_ps(xxyy0145, xxyy2367)); + y = v_float32x8(_mm256_unpackhi_ps(xxyy0145, xxyy2367)); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x8& idxvec, v_float64x4& x, v_float64x4& y) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_low(idx, idxvec); + __m128d xy0 = _mm_loadu_pd(tab + idx[0]); + __m128d xy2 = _mm_loadu_pd(tab + idx[2]); + __m128d xy1 = _mm_loadu_pd(tab + idx[1]); + __m128d xy3 = _mm_loadu_pd(tab + idx[3]); + __m256d xy02 = _v256_combine(xy0, xy2); + __m256d xy13 = _v256_combine(xy1, xy3); + + x = v_float64x4(_mm256_unpacklo_pd(xy02, xy13)); + y = v_float64x4(_mm256_unpackhi_pd(xy02, xy13)); +} + +inline v_int8x32 v_interleave_pairs(const v_int8x32& vec) +{ + return v_int8x32(_mm256_shuffle_epi8(vec.val, _mm256_set_epi64x(0x0f0d0e0c0b090a08, 0x0705060403010200, 0x0f0d0e0c0b090a08, 0x0705060403010200))); +} +inline v_uint8x32 v_interleave_pairs(const v_uint8x32& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x32 v_interleave_quads(const v_int8x32& vec) +{ + return v_int8x32(_mm256_shuffle_epi8(vec.val, _mm256_set_epi64x(0x0f0b0e0a0d090c08, 0x0703060205010400, 0x0f0b0e0a0d090c08, 0x0703060205010400))); +} +inline v_uint8x32 v_interleave_quads(const v_uint8x32& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x16 v_interleave_pairs(const v_int16x16& vec) +{ + return v_int16x16(_mm256_shuffle_epi8(vec.val, _mm256_set_epi64x(0x0f0e0b0a0d0c0908, 0x0706030205040100, 0x0f0e0b0a0d0c0908, 0x0706030205040100))); +} +inline v_uint16x16 v_interleave_pairs(const v_uint16x16& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } +inline v_int16x16 v_interleave_quads(const v_int16x16& vec) +{ + return v_int16x16(_mm256_shuffle_epi8(vec.val, _mm256_set_epi64x(0x0f0e07060d0c0504, 0x0b0a030209080100, 0x0f0e07060d0c0504, 0x0b0a030209080100))); +} +inline v_uint16x16 v_interleave_quads(const v_uint16x16& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x8 v_interleave_pairs(const v_int32x8& vec) +{ + return v_int32x8(_mm256_shuffle_epi32(vec.val, _MM_SHUFFLE(3, 1, 2, 0))); +} +inline v_uint32x8 v_interleave_pairs(const v_uint32x8& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x8 v_interleave_pairs(const v_float32x8& vec) { return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x32 v_pack_triplets(const v_int8x32& vec) +{ + return v_int8x32(_mm256_permutevar8x32_epi32(_mm256_shuffle_epi8(vec.val, _mm256_broadcastsi128_si256(_mm_set_epi64x(0xffffff0f0e0d0c0a, 0x0908060504020100))), + _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} +inline v_uint8x32 v_pack_triplets(const v_uint8x32& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x16 v_pack_triplets(const v_int16x16& vec) +{ + return v_int16x16(_mm256_permutevar8x32_epi32(_mm256_shuffle_epi8(vec.val, _mm256_broadcastsi128_si256(_mm_set_epi64x(0xffff0f0e0d0c0b0a, 0x0908050403020100))), + _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} +inline v_uint16x16 v_pack_triplets(const v_uint16x16& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x8 v_pack_triplets(const v_int32x8& vec) +{ + return v_int32x8(_mm256_permutevar8x32_epi32(vec.val, _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} +inline v_uint32x8 v_pack_triplets(const v_uint32x8& vec) { return v_reinterpret_as_u32(v_pack_triplets(v_reinterpret_as_s32(vec))); } +inline v_float32x8 v_pack_triplets(const v_float32x8& vec) +{ + return v_float32x8(_mm256_permutevar8x32_ps(vec.val, _mm256_set_epi64x(0x0000000700000007, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000))); +} + +////////// Matrix operations ///////// + +//////// Dot Product //////// + +// 16 >> 32 +inline v_int32x8 v_dotprod(const v_int16x16& a, const v_int16x16& b) +{ return v_int32x8(_mm256_madd_epi16(a.val, b.val)); } +inline v_int32x8 v_dotprod(const v_int16x16& a, const v_int16x16& b, const v_int32x8& c) +{ return v_dotprod(a, b) + c; } + +// 32 >> 64 +inline v_int64x4 v_dotprod(const v_int32x8& a, const v_int32x8& b) +{ + __m256i even = _mm256_mul_epi32(a.val, b.val); + __m256i odd = _mm256_mul_epi32(_mm256_srli_epi64(a.val, 32), _mm256_srli_epi64(b.val, 32)); + return v_int64x4(_mm256_add_epi64(even, odd)); +} +inline v_int64x4 v_dotprod(const v_int32x8& a, const v_int32x8& b, const v_int64x4& c) +{ return v_dotprod(a, b) + c; } + +// 8 >> 32 +inline v_uint32x8 v_dotprod_expand(const v_uint8x32& a, const v_uint8x32& b) +{ + __m256i even_m = _mm256_set1_epi32(0xFF00FF00); + __m256i even_a = _mm256_blendv_epi8(a.val, _mm256_setzero_si256(), even_m); + __m256i odd_a = _mm256_srli_epi16(a.val, 8); + + __m256i even_b = _mm256_blendv_epi8(b.val, _mm256_setzero_si256(), even_m); + __m256i odd_b = _mm256_srli_epi16(b.val, 8); + + __m256i prod0 = _mm256_madd_epi16(even_a, even_b); + __m256i prod1 = _mm256_madd_epi16(odd_a, odd_b); + return v_uint32x8(_mm256_add_epi32(prod0, prod1)); +} +inline v_uint32x8 v_dotprod_expand(const v_uint8x32& a, const v_uint8x32& b, const v_uint32x8& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int32x8 v_dotprod_expand(const v_int8x32& a, const v_int8x32& b) +{ + __m256i even_a = _mm256_srai_epi16(_mm256_bslli_epi128(a.val, 1), 8); + __m256i odd_a = _mm256_srai_epi16(a.val, 8); + + __m256i even_b = _mm256_srai_epi16(_mm256_bslli_epi128(b.val, 1), 8); + __m256i odd_b = _mm256_srai_epi16(b.val, 8); + + __m256i prod0 = _mm256_madd_epi16(even_a, even_b); + __m256i prod1 = _mm256_madd_epi16(odd_a, odd_b); + return v_int32x8(_mm256_add_epi32(prod0, prod1)); +} +inline v_int32x8 v_dotprod_expand(const v_int8x32& a, const v_int8x32& b, const v_int32x8& c) +{ return v_dotprod_expand(a, b) + c; } + +// 16 >> 64 +inline v_uint64x4 v_dotprod_expand(const v_uint16x16& a, const v_uint16x16& b) +{ + __m256i mullo = _mm256_mullo_epi16(a.val, b.val); + __m256i mulhi = _mm256_mulhi_epu16(a.val, b.val); + __m256i mul0 = _mm256_unpacklo_epi16(mullo, mulhi); + __m256i mul1 = _mm256_unpackhi_epi16(mullo, mulhi); + + __m256i p02 = _mm256_blend_epi32(mul0, _mm256_setzero_si256(), 0xAA); + __m256i p13 = _mm256_srli_epi64(mul0, 32); + __m256i p46 = _mm256_blend_epi32(mul1, _mm256_setzero_si256(), 0xAA); + __m256i p57 = _mm256_srli_epi64(mul1, 32); + + __m256i p15_ = _mm256_add_epi64(p02, p13); + __m256i p9d_ = _mm256_add_epi64(p46, p57); + + return v_uint64x4(_mm256_add_epi64( + _mm256_unpacklo_epi64(p15_, p9d_), + _mm256_unpackhi_epi64(p15_, p9d_) + )); +} +inline v_uint64x4 v_dotprod_expand(const v_uint16x16& a, const v_uint16x16& b, const v_uint64x4& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x4 v_dotprod_expand(const v_int16x16& a, const v_int16x16& b) +{ + __m256i prod = _mm256_madd_epi16(a.val, b.val); + __m256i sign = _mm256_srai_epi32(prod, 31); + + __m256i lo = _mm256_unpacklo_epi32(prod, sign); + __m256i hi = _mm256_unpackhi_epi32(prod, sign); + + return v_int64x4(_mm256_add_epi64( + _mm256_unpacklo_epi64(lo, hi), + _mm256_unpackhi_epi64(lo, hi) + )); +} +inline v_int64x4 v_dotprod_expand(const v_int16x16& a, const v_int16x16& b, const v_int64x4& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x4 v_dotprod_expand(const v_int32x8& a, const v_int32x8& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x4 v_dotprod_expand(const v_int32x8& a, const v_int32x8& b, const v_float64x4& c) +{ return v_dotprod_expand(a, b) + c; } + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x8 v_dotprod_fast(const v_int16x16& a, const v_int16x16& b) +{ return v_dotprod(a, b); } +inline v_int32x8 v_dotprod_fast(const v_int16x16& a, const v_int16x16& b, const v_int32x8& c) +{ return v_dotprod(a, b, c); } + +// 32 >> 64 +inline v_int64x4 v_dotprod_fast(const v_int32x8& a, const v_int32x8& b) +{ return v_dotprod(a, b); } +inline v_int64x4 v_dotprod_fast(const v_int32x8& a, const v_int32x8& b, const v_int64x4& c) +{ return v_dotprod(a, b, c); } + +// 8 >> 32 +inline v_uint32x8 v_dotprod_expand_fast(const v_uint8x32& a, const v_uint8x32& b) +{ return v_dotprod_expand(a, b); } +inline v_uint32x8 v_dotprod_expand_fast(const v_uint8x32& a, const v_uint8x32& b, const v_uint32x8& c) +{ return v_dotprod_expand(a, b, c); } + +inline v_int32x8 v_dotprod_expand_fast(const v_int8x32& a, const v_int8x32& b) +{ return v_dotprod_expand(a, b); } +inline v_int32x8 v_dotprod_expand_fast(const v_int8x32& a, const v_int8x32& b, const v_int32x8& c) +{ return v_dotprod_expand(a, b, c); } + +// 16 >> 64 +inline v_uint64x4 v_dotprod_expand_fast(const v_uint16x16& a, const v_uint16x16& b) +{ + __m256i mullo = _mm256_mullo_epi16(a.val, b.val); + __m256i mulhi = _mm256_mulhi_epu16(a.val, b.val); + __m256i mul0 = _mm256_unpacklo_epi16(mullo, mulhi); + __m256i mul1 = _mm256_unpackhi_epi16(mullo, mulhi); + + __m256i p02 = _mm256_blend_epi32(mul0, _mm256_setzero_si256(), 0xAA); + __m256i p13 = _mm256_srli_epi64(mul0, 32); + __m256i p46 = _mm256_blend_epi32(mul1, _mm256_setzero_si256(), 0xAA); + __m256i p57 = _mm256_srli_epi64(mul1, 32); + + __m256i p15_ = _mm256_add_epi64(p02, p13); + __m256i p9d_ = _mm256_add_epi64(p46, p57); + + return v_uint64x4(_mm256_add_epi64(p15_, p9d_)); +} +inline v_uint64x4 v_dotprod_expand_fast(const v_uint16x16& a, const v_uint16x16& b, const v_uint64x4& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +inline v_int64x4 v_dotprod_expand_fast(const v_int16x16& a, const v_int16x16& b) +{ + __m256i prod = _mm256_madd_epi16(a.val, b.val); + __m256i sign = _mm256_srai_epi32(prod, 31); + __m256i lo = _mm256_unpacklo_epi32(prod, sign); + __m256i hi = _mm256_unpackhi_epi32(prod, sign); + return v_int64x4(_mm256_add_epi64(lo, hi)); +} +inline v_int64x4 v_dotprod_expand_fast(const v_int16x16& a, const v_int16x16& b, const v_int64x4& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 32 >> 64f +inline v_float64x4 v_dotprod_expand_fast(const v_int32x8& a, const v_int32x8& b) +{ return v_dotprod_expand(a, b); } +inline v_float64x4 v_dotprod_expand_fast(const v_int32x8& a, const v_int32x8& b, const v_float64x4& c) +{ return v_dotprod_expand(a, b, c); } + +#define OPENCV_HAL_AVX_SPLAT2_PS(a, im) \ + v_float32x8(_mm256_permute_ps(a.val, _MM_SHUFFLE(im, im, im, im))) + +inline v_float32x8 v_matmul(const v_float32x8& v, const v_float32x8& m0, + const v_float32x8& m1, const v_float32x8& m2, + const v_float32x8& m3) +{ + v_float32x8 v04 = OPENCV_HAL_AVX_SPLAT2_PS(v, 0); + v_float32x8 v15 = OPENCV_HAL_AVX_SPLAT2_PS(v, 1); + v_float32x8 v26 = OPENCV_HAL_AVX_SPLAT2_PS(v, 2); + v_float32x8 v37 = OPENCV_HAL_AVX_SPLAT2_PS(v, 3); + return v_fma(v04, m0, v_fma(v15, m1, v_fma(v26, m2, v37 * m3))); +} + +inline v_float32x8 v_matmuladd(const v_float32x8& v, const v_float32x8& m0, + const v_float32x8& m1, const v_float32x8& m2, + const v_float32x8& a) +{ + v_float32x8 v04 = OPENCV_HAL_AVX_SPLAT2_PS(v, 0); + v_float32x8 v15 = OPENCV_HAL_AVX_SPLAT2_PS(v, 1); + v_float32x8 v26 = OPENCV_HAL_AVX_SPLAT2_PS(v, 2); + return v_fma(v04, m0, v_fma(v15, m1, v_fma(v26, m2, a))); +} + +#define OPENCV_HAL_IMPL_AVX_TRANSPOSE4x4(_Tpvec, suffix, cast_from, cast_to) \ + inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \ + { \ + __m256i t0 = cast_from(_mm256_unpacklo_##suffix(a0.val, a1.val)); \ + __m256i t1 = cast_from(_mm256_unpacklo_##suffix(a2.val, a3.val)); \ + __m256i t2 = cast_from(_mm256_unpackhi_##suffix(a0.val, a1.val)); \ + __m256i t3 = cast_from(_mm256_unpackhi_##suffix(a2.val, a3.val)); \ + b0.val = cast_to(_mm256_unpacklo_epi64(t0, t1)); \ + b1.val = cast_to(_mm256_unpackhi_epi64(t0, t1)); \ + b2.val = cast_to(_mm256_unpacklo_epi64(t2, t3)); \ + b3.val = cast_to(_mm256_unpackhi_epi64(t2, t3)); \ + } + +OPENCV_HAL_IMPL_AVX_TRANSPOSE4x4(v_uint32x8, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_AVX_TRANSPOSE4x4(v_int32x8, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_AVX_TRANSPOSE4x4(v_float32x8, ps, _mm256_castps_si256, _mm256_castsi256_ps) + +//////////////// Value reordering /////////////// + +/* Expand */ +#define OPENCV_HAL_IMPL_AVX_EXPAND(_Tpvec, _Tpwvec, _Tp, intrin) \ + inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ + { \ + b0.val = intrin(_v256_extract_low(a.val)); \ + b1.val = intrin(_v256_extract_high(a.val)); \ + } \ + inline _Tpwvec v_expand_low(const _Tpvec& a) \ + { return _Tpwvec(intrin(_v256_extract_low(a.val))); } \ + inline _Tpwvec v_expand_high(const _Tpvec& a) \ + { return _Tpwvec(intrin(_v256_extract_high(a.val))); } \ + inline _Tpwvec v256_load_expand(const _Tp* ptr) \ + { \ + __m128i a = _mm_loadu_si128((const __m128i*)ptr); \ + return _Tpwvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_AVX_EXPAND(v_uint8x32, v_uint16x16, uchar, _mm256_cvtepu8_epi16) +OPENCV_HAL_IMPL_AVX_EXPAND(v_int8x32, v_int16x16, schar, _mm256_cvtepi8_epi16) +OPENCV_HAL_IMPL_AVX_EXPAND(v_uint16x16, v_uint32x8, ushort, _mm256_cvtepu16_epi32) +OPENCV_HAL_IMPL_AVX_EXPAND(v_int16x16, v_int32x8, short, _mm256_cvtepi16_epi32) +OPENCV_HAL_IMPL_AVX_EXPAND(v_uint32x8, v_uint64x4, unsigned, _mm256_cvtepu32_epi64) +OPENCV_HAL_IMPL_AVX_EXPAND(v_int32x8, v_int64x4, int, _mm256_cvtepi32_epi64) + +#define OPENCV_HAL_IMPL_AVX_EXPAND_Q(_Tpvec, _Tp, intrin) \ + inline _Tpvec v256_load_expand_q(const _Tp* ptr) \ + { \ + __m128i a = _mm_loadl_epi64((const __m128i*)ptr); \ + return _Tpvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_AVX_EXPAND_Q(v_uint32x8, uchar, _mm256_cvtepu8_epi32) +OPENCV_HAL_IMPL_AVX_EXPAND_Q(v_int32x8, schar, _mm256_cvtepi8_epi32) + +/* pack */ +// 16 +inline v_int8x32 v_pack(const v_int16x16& a, const v_int16x16& b) +{ return v_int8x32(_v256_shuffle_odd_64(_mm256_packs_epi16(a.val, b.val))); } + +inline v_uint8x32 v_pack(const v_uint16x16& a, const v_uint16x16& b) +{ + __m256i t = _mm256_set1_epi16(255); + __m256i a1 = _mm256_min_epu16(a.val, t); + __m256i b1 = _mm256_min_epu16(b.val, t); + return v_uint8x32(_v256_shuffle_odd_64(_mm256_packus_epi16(a1, b1))); +} + +inline v_uint8x32 v_pack_u(const v_int16x16& a, const v_int16x16& b) +{ + return v_uint8x32(_v256_shuffle_odd_64(_mm256_packus_epi16(a.val, b.val))); +} + +inline void v_pack_store(schar* ptr, const v_int16x16& a) +{ v_store_low(ptr, v_pack(a, a)); } + +inline void v_pack_store(uchar* ptr, const v_uint16x16& a) +{ + const __m256i m = _mm256_set1_epi16(255); + __m256i am = _mm256_min_epu16(a.val, m); + am = _v256_shuffle_odd_64(_mm256_packus_epi16(am, am)); + v_store_low(ptr, v_uint8x32(am)); +} + +inline void v_pack_u_store(uchar* ptr, const v_int16x16& a) +{ v_store_low(ptr, v_pack_u(a, a)); } + +template inline +v_uint8x32 v_rshr_pack(const v_uint16x16& a, const v_uint16x16& b) +{ + // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers. + v_uint16x16 delta = v256_setall_u16((short)(1 << (n-1))); + return v_pack_u(v_reinterpret_as_s16((a + delta) >> n), + v_reinterpret_as_s16((b + delta) >> n)); +} + +template inline +void v_rshr_pack_store(uchar* ptr, const v_uint16x16& a) +{ + v_uint16x16 delta = v256_setall_u16((short)(1 << (n-1))); + v_pack_u_store(ptr, v_reinterpret_as_s16((a + delta) >> n)); +} + +template inline +v_uint8x32 v_rshr_pack_u(const v_int16x16& a, const v_int16x16& b) +{ + v_int16x16 delta = v256_setall_s16((short)(1 << (n-1))); + return v_pack_u((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_u_store(uchar* ptr, const v_int16x16& a) +{ + v_int16x16 delta = v256_setall_s16((short)(1 << (n-1))); + v_pack_u_store(ptr, (a + delta) >> n); +} + +template inline +v_int8x32 v_rshr_pack(const v_int16x16& a, const v_int16x16& b) +{ + v_int16x16 delta = v256_setall_s16((short)(1 << (n-1))); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(schar* ptr, const v_int16x16& a) +{ + v_int16x16 delta = v256_setall_s16((short)(1 << (n-1))); + v_pack_store(ptr, (a + delta) >> n); +} + +// 32 +inline v_int16x16 v_pack(const v_int32x8& a, const v_int32x8& b) +{ return v_int16x16(_v256_shuffle_odd_64(_mm256_packs_epi32(a.val, b.val))); } + +inline v_uint16x16 v_pack(const v_uint32x8& a, const v_uint32x8& b) +{ return v_uint16x16(_v256_shuffle_odd_64(_v256_packs_epu32(a.val, b.val))); } + +inline v_uint16x16 v_pack_u(const v_int32x8& a, const v_int32x8& b) +{ return v_uint16x16(_v256_shuffle_odd_64(_mm256_packus_epi32(a.val, b.val))); } + +inline void v_pack_store(short* ptr, const v_int32x8& a) +{ v_store_low(ptr, v_pack(a, a)); } + +inline void v_pack_store(ushort* ptr, const v_uint32x8& a) +{ + const __m256i m = _mm256_set1_epi32(65535); + __m256i am = _mm256_min_epu32(a.val, m); + am = _v256_shuffle_odd_64(_mm256_packus_epi32(am, am)); + v_store_low(ptr, v_uint16x16(am)); +} + +inline void v_pack_u_store(ushort* ptr, const v_int32x8& a) +{ v_store_low(ptr, v_pack_u(a, a)); } + + +template inline +v_uint16x16 v_rshr_pack(const v_uint32x8& a, const v_uint32x8& b) +{ + // we assume that n > 0, and so the shifted 32-bit values can be treated as signed numbers. + v_uint32x8 delta = v256_setall_u32(1 << (n-1)); + return v_pack_u(v_reinterpret_as_s32((a + delta) >> n), + v_reinterpret_as_s32((b + delta) >> n)); +} + +template inline +void v_rshr_pack_store(ushort* ptr, const v_uint32x8& a) +{ + v_uint32x8 delta = v256_setall_u32(1 << (n-1)); + v_pack_u_store(ptr, v_reinterpret_as_s32((a + delta) >> n)); +} + +template inline +v_uint16x16 v_rshr_pack_u(const v_int32x8& a, const v_int32x8& b) +{ + v_int32x8 delta = v256_setall_s32(1 << (n-1)); + return v_pack_u((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_u_store(ushort* ptr, const v_int32x8& a) +{ + v_int32x8 delta = v256_setall_s32(1 << (n-1)); + v_pack_u_store(ptr, (a + delta) >> n); +} + +template inline +v_int16x16 v_rshr_pack(const v_int32x8& a, const v_int32x8& b) +{ + v_int32x8 delta = v256_setall_s32(1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(short* ptr, const v_int32x8& a) +{ + v_int32x8 delta = v256_setall_s32(1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +// 64 +// Non-saturating pack +inline v_uint32x8 v_pack(const v_uint64x4& a, const v_uint64x4& b) +{ + __m256i a0 = _mm256_shuffle_epi32(a.val, _MM_SHUFFLE(0, 0, 2, 0)); + __m256i b0 = _mm256_shuffle_epi32(b.val, _MM_SHUFFLE(0, 0, 2, 0)); + __m256i ab = _mm256_unpacklo_epi64(a0, b0); // a0, a1, b0, b1, a2, a3, b2, b3 + return v_uint32x8(_v256_shuffle_odd_64(ab)); +} + +inline v_int32x8 v_pack(const v_int64x4& a, const v_int64x4& b) +{ return v_reinterpret_as_s32(v_pack(v_reinterpret_as_u64(a), v_reinterpret_as_u64(b))); } + +inline void v_pack_store(unsigned* ptr, const v_uint64x4& a) +{ + __m256i a0 = _mm256_shuffle_epi32(a.val, _MM_SHUFFLE(0, 0, 2, 0)); + v_store_low(ptr, v_uint32x8(_v256_shuffle_odd_64(a0))); +} + +inline void v_pack_store(int* ptr, const v_int64x4& b) +{ v_pack_store((unsigned*)ptr, v_reinterpret_as_u64(b)); } + +template inline +v_uint32x8 v_rshr_pack(const v_uint64x4& a, const v_uint64x4& b) +{ + v_uint64x4 delta = v256_setall_u64((uint64)1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(unsigned* ptr, const v_uint64x4& a) +{ + v_uint64x4 delta = v256_setall_u64((uint64)1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +template inline +v_int32x8 v_rshr_pack(const v_int64x4& a, const v_int64x4& b) +{ + v_int64x4 delta = v256_setall_s64((int64)1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(int* ptr, const v_int64x4& a) +{ + v_int64x4 delta = v256_setall_s64((int64)1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +// pack boolean +inline v_uint8x32 v_pack_b(const v_uint16x16& a, const v_uint16x16& b) +{ + __m256i ab = _mm256_packs_epi16(a.val, b.val); + return v_uint8x32(_v256_shuffle_odd_64(ab)); +} + +inline v_uint8x32 v_pack_b(const v_uint32x8& a, const v_uint32x8& b, + const v_uint32x8& c, const v_uint32x8& d) +{ + __m256i ab = _mm256_packs_epi32(a.val, b.val); + __m256i cd = _mm256_packs_epi32(c.val, d.val); + + __m256i abcd = _v256_shuffle_odd_64(_mm256_packs_epi16(ab, cd)); + return v_uint8x32(_mm256_shuffle_epi32(abcd, _MM_SHUFFLE(3, 1, 2, 0))); +} + +inline v_uint8x32 v_pack_b(const v_uint64x4& a, const v_uint64x4& b, const v_uint64x4& c, + const v_uint64x4& d, const v_uint64x4& e, const v_uint64x4& f, + const v_uint64x4& g, const v_uint64x4& h) +{ + __m256i ab = _mm256_packs_epi32(a.val, b.val); + __m256i cd = _mm256_packs_epi32(c.val, d.val); + __m256i ef = _mm256_packs_epi32(e.val, f.val); + __m256i gh = _mm256_packs_epi32(g.val, h.val); + + __m256i abcd = _mm256_packs_epi32(ab, cd); + __m256i efgh = _mm256_packs_epi32(ef, gh); + __m256i pkall = _v256_shuffle_odd_64(_mm256_packs_epi16(abcd, efgh)); + + __m256i rev = _mm256_alignr_epi8(pkall, pkall, 8); + return v_uint8x32(_mm256_unpacklo_epi16(pkall, rev)); +} + +/* Recombine */ +// its up there with load and store operations + +/* Extract */ +#define OPENCV_HAL_IMPL_AVX_EXTRACT(_Tpvec) \ + template \ + inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \ + { return v_rotate_right(a, b); } + +OPENCV_HAL_IMPL_AVX_EXTRACT(v_uint8x32) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_int8x32) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_uint16x16) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_int16x16) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_uint32x8) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_int32x8) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_uint64x4) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_int64x4) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_float32x8) +OPENCV_HAL_IMPL_AVX_EXTRACT(v_float64x4) + +template +inline uchar v_extract_n(v_uint8x32 a) +{ + return (uchar)_v256_extract_epi8(a.val); +} + +template +inline schar v_extract_n(v_int8x32 a) +{ + return (schar)v_extract_n(v_reinterpret_as_u8(a)); +} + +template +inline ushort v_extract_n(v_uint16x16 a) +{ + return (ushort)_v256_extract_epi16(a.val); +} + +template +inline short v_extract_n(v_int16x16 a) +{ + return (short)v_extract_n(v_reinterpret_as_u16(a)); +} + +template +inline uint v_extract_n(v_uint32x8 a) +{ + return (uint)_v256_extract_epi32(a.val); +} + +template +inline int v_extract_n(v_int32x8 a) +{ + return (int)v_extract_n(v_reinterpret_as_u32(a)); +} + +template +inline uint64 v_extract_n(v_uint64x4 a) +{ + return (uint64)_v256_extract_epi64(a.val); +} + +template +inline int64 v_extract_n(v_int64x4 v) +{ + return (int64)v_extract_n(v_reinterpret_as_u64(v)); +} + +template +inline float v_extract_n(v_float32x8 v) +{ + union { uint iv; float fv; } d; + d.iv = v_extract_n(v_reinterpret_as_u32(v)); + return d.fv; +} + +template +inline double v_extract_n(v_float64x4 v) +{ + union { uint64 iv; double dv; } d; + d.iv = v_extract_n(v_reinterpret_as_u64(v)); + return d.dv; +} + +template +inline v_uint32x8 v_broadcast_element(v_uint32x8 a) +{ + static const __m256i perm = _mm256_set1_epi32((char)i); + return v_uint32x8(_mm256_permutevar8x32_epi32(a.val, perm)); +} + +template +inline v_int32x8 v_broadcast_element(const v_int32x8 &a) +{ return v_reinterpret_as_s32(v_broadcast_element(v_reinterpret_as_u32(a))); } + +template +inline v_float32x8 v_broadcast_element(const v_float32x8 &a) +{ return v_reinterpret_as_f32(v_broadcast_element(v_reinterpret_as_u32(a))); } + + +///////////////////// load deinterleave ///////////////////////////// + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& a, v_uint8x32& b ) +{ + __m256i ab0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i ab1 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); + + const __m256i sh = _mm256_setr_epi8(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15, + 0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15); + __m256i p0 = _mm256_shuffle_epi8(ab0, sh); + __m256i p1 = _mm256_shuffle_epi8(ab1, sh); + __m256i pl = _mm256_permute2x128_si256(p0, p1, 0 + 2*16); + __m256i ph = _mm256_permute2x128_si256(p0, p1, 1 + 3*16); + __m256i a0 = _mm256_unpacklo_epi64(pl, ph); + __m256i b0 = _mm256_unpackhi_epi64(pl, ph); + a = v_uint8x32(a0); + b = v_uint8x32(b0); +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& a, v_uint16x16& b ) +{ + __m256i ab0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i ab1 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); + + const __m256i sh = _mm256_setr_epi8(0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15, + 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15); + __m256i p0 = _mm256_shuffle_epi8(ab0, sh); + __m256i p1 = _mm256_shuffle_epi8(ab1, sh); + __m256i pl = _mm256_permute2x128_si256(p0, p1, 0 + 2*16); + __m256i ph = _mm256_permute2x128_si256(p0, p1, 1 + 3*16); + __m256i a0 = _mm256_unpacklo_epi64(pl, ph); + __m256i b0 = _mm256_unpackhi_epi64(pl, ph); + a = v_uint16x16(a0); + b = v_uint16x16(b0); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& a, v_uint32x8& b ) +{ + __m256i ab0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i ab1 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); + + const int sh = 0+2*4+1*16+3*64; + __m256i p0 = _mm256_shuffle_epi32(ab0, sh); + __m256i p1 = _mm256_shuffle_epi32(ab1, sh); + __m256i pl = _mm256_permute2x128_si256(p0, p1, 0 + 2*16); + __m256i ph = _mm256_permute2x128_si256(p0, p1, 1 + 3*16); + __m256i a0 = _mm256_unpacklo_epi64(pl, ph); + __m256i b0 = _mm256_unpackhi_epi64(pl, ph); + a = v_uint32x8(a0); + b = v_uint32x8(b0); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b ) +{ + __m256i ab0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i ab1 = _mm256_loadu_si256((const __m256i*)(ptr + 4)); + + __m256i pl = _mm256_permute2x128_si256(ab0, ab1, 0 + 2*16); + __m256i ph = _mm256_permute2x128_si256(ab0, ab1, 1 + 3*16); + __m256i a0 = _mm256_unpacklo_epi64(pl, ph); + __m256i b0 = _mm256_unpackhi_epi64(pl, ph); + a = v_uint64x4(a0); + b = v_uint64x4(b0); +} + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& a, v_uint8x32& b, v_uint8x32& c ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 64)); + + __m256i s02_low = _mm256_permute2x128_si256(bgr0, bgr2, 0 + 2*16); + __m256i s02_high = _mm256_permute2x128_si256(bgr0, bgr2, 1 + 3*16); + + const __m256i m0 = _mm256_setr_epi8(0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, + 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0); + const __m256i m1 = _mm256_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, + -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1); + + __m256i b0 = _mm256_blendv_epi8(_mm256_blendv_epi8(s02_low, s02_high, m0), bgr1, m1); + __m256i g0 = _mm256_blendv_epi8(_mm256_blendv_epi8(s02_high, s02_low, m1), bgr1, m0); + __m256i r0 = _mm256_blendv_epi8(_mm256_blendv_epi8(bgr1, s02_low, m0), s02_high, m1); + + const __m256i + sh_b = _mm256_setr_epi8(0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14, 1, 4, 7, 10, 13, + 0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14, 1, 4, 7, 10, 13), + sh_g = _mm256_setr_epi8(1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14, + 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14), + sh_r = _mm256_setr_epi8(2, 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, + 2, 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15); + b0 = _mm256_shuffle_epi8(b0, sh_b); + g0 = _mm256_shuffle_epi8(g0, sh_g); + r0 = _mm256_shuffle_epi8(r0, sh_r); + + a = v_uint8x32(b0); + b = v_uint8x32(g0); + c = v_uint8x32(r0); +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& a, v_uint16x16& b, v_uint16x16& c ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); + + __m256i s02_low = _mm256_permute2x128_si256(bgr0, bgr2, 0 + 2*16); + __m256i s02_high = _mm256_permute2x128_si256(bgr0, bgr2, 1 + 3*16); + + const __m256i m0 = _mm256_setr_epi8(0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, + 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0); + const __m256i m1 = _mm256_setr_epi8(0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, + -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0); + __m256i b0 = _mm256_blendv_epi8(_mm256_blendv_epi8(s02_low, s02_high, m0), bgr1, m1); + __m256i g0 = _mm256_blendv_epi8(_mm256_blendv_epi8(bgr1, s02_low, m0), s02_high, m1); + __m256i r0 = _mm256_blendv_epi8(_mm256_blendv_epi8(s02_high, s02_low, m1), bgr1, m0); + const __m256i sh_b = _mm256_setr_epi8(0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, + 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11); + const __m256i sh_g = _mm256_setr_epi8(2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, + 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1, 6, 7, 12, 13); + const __m256i sh_r = _mm256_setr_epi8(4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, + 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15); + b0 = _mm256_shuffle_epi8(b0, sh_b); + g0 = _mm256_shuffle_epi8(g0, sh_g); + r0 = _mm256_shuffle_epi8(r0, sh_r); + + a = v_uint16x16(b0); + b = v_uint16x16(g0); + c = v_uint16x16(r0); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& a, v_uint32x8& b, v_uint32x8& c ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); + + __m256i s02_low = _mm256_permute2x128_si256(bgr0, bgr2, 0 + 2*16); + __m256i s02_high = _mm256_permute2x128_si256(bgr0, bgr2, 1 + 3*16); + + __m256i b0 = _mm256_blend_epi32(_mm256_blend_epi32(s02_low, s02_high, 0x24), bgr1, 0x92); + __m256i g0 = _mm256_blend_epi32(_mm256_blend_epi32(s02_high, s02_low, 0x92), bgr1, 0x24); + __m256i r0 = _mm256_blend_epi32(_mm256_blend_epi32(bgr1, s02_low, 0x24), s02_high, 0x92); + + b0 = _mm256_shuffle_epi32(b0, 0x6c); + g0 = _mm256_shuffle_epi32(g0, 0xb1); + r0 = _mm256_shuffle_epi32(r0, 0xc6); + + a = v_uint32x8(b0); + b = v_uint32x8(g0); + c = v_uint32x8(r0); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b, v_uint64x4& c ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 4)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); + + __m256i s01 = _mm256_blend_epi32(bgr0, bgr1, 0xf0); + __m256i s12 = _mm256_blend_epi32(bgr1, bgr2, 0xf0); + __m256i s20r = _mm256_permute4x64_epi64(_mm256_blend_epi32(bgr2, bgr0, 0xf0), 0x1b); + __m256i b0 = _mm256_unpacklo_epi64(s01, s20r); + __m256i g0 = _mm256_alignr_epi8(s12, s01, 8); + __m256i r0 = _mm256_unpackhi_epi64(s20r, s12); + + a = v_uint64x4(b0); + b = v_uint64x4(g0); + c = v_uint64x4(r0); +} + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x32& a, v_uint8x32& b, v_uint8x32& c, v_uint8x32& d ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 64)); + __m256i bgr3 = _mm256_loadu_si256((const __m256i*)(ptr + 96)); + const __m256i sh = _mm256_setr_epi8(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15, + 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15); + + __m256i p0 = _mm256_shuffle_epi8(bgr0, sh); + __m256i p1 = _mm256_shuffle_epi8(bgr1, sh); + __m256i p2 = _mm256_shuffle_epi8(bgr2, sh); + __m256i p3 = _mm256_shuffle_epi8(bgr3, sh); + + __m256i p01l = _mm256_unpacklo_epi32(p0, p1); + __m256i p01h = _mm256_unpackhi_epi32(p0, p1); + __m256i p23l = _mm256_unpacklo_epi32(p2, p3); + __m256i p23h = _mm256_unpackhi_epi32(p2, p3); + + __m256i pll = _mm256_permute2x128_si256(p01l, p23l, 0 + 2*16); + __m256i plh = _mm256_permute2x128_si256(p01l, p23l, 1 + 3*16); + __m256i phl = _mm256_permute2x128_si256(p01h, p23h, 0 + 2*16); + __m256i phh = _mm256_permute2x128_si256(p01h, p23h, 1 + 3*16); + + __m256i b0 = _mm256_unpacklo_epi32(pll, plh); + __m256i g0 = _mm256_unpackhi_epi32(pll, plh); + __m256i r0 = _mm256_unpacklo_epi32(phl, phh); + __m256i a0 = _mm256_unpackhi_epi32(phl, phh); + + a = v_uint8x32(b0); + b = v_uint8x32(g0); + c = v_uint8x32(r0); + d = v_uint8x32(a0); +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x16& a, v_uint16x16& b, v_uint16x16& c, v_uint16x16& d ) +{ + __m256i bgr0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgr1 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); + __m256i bgr2 = _mm256_loadu_si256((const __m256i*)(ptr + 32)); + __m256i bgr3 = _mm256_loadu_si256((const __m256i*)(ptr + 48)); + const __m256i sh = _mm256_setr_epi8(0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, + 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15); + __m256i p0 = _mm256_shuffle_epi8(bgr0, sh); + __m256i p1 = _mm256_shuffle_epi8(bgr1, sh); + __m256i p2 = _mm256_shuffle_epi8(bgr2, sh); + __m256i p3 = _mm256_shuffle_epi8(bgr3, sh); + + __m256i p01l = _mm256_unpacklo_epi32(p0, p1); + __m256i p01h = _mm256_unpackhi_epi32(p0, p1); + __m256i p23l = _mm256_unpacklo_epi32(p2, p3); + __m256i p23h = _mm256_unpackhi_epi32(p2, p3); + + __m256i pll = _mm256_permute2x128_si256(p01l, p23l, 0 + 2*16); + __m256i plh = _mm256_permute2x128_si256(p01l, p23l, 1 + 3*16); + __m256i phl = _mm256_permute2x128_si256(p01h, p23h, 0 + 2*16); + __m256i phh = _mm256_permute2x128_si256(p01h, p23h, 1 + 3*16); + + __m256i b0 = _mm256_unpacklo_epi32(pll, plh); + __m256i g0 = _mm256_unpackhi_epi32(pll, plh); + __m256i r0 = _mm256_unpacklo_epi32(phl, phh); + __m256i a0 = _mm256_unpackhi_epi32(phl, phh); + + a = v_uint16x16(b0); + b = v_uint16x16(g0); + c = v_uint16x16(r0); + d = v_uint16x16(a0); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x8& a, v_uint32x8& b, v_uint32x8& c, v_uint32x8& d ) +{ + __m256i p0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i p1 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); + __m256i p2 = _mm256_loadu_si256((const __m256i*)(ptr + 16)); + __m256i p3 = _mm256_loadu_si256((const __m256i*)(ptr + 24)); + + __m256i p01l = _mm256_unpacklo_epi32(p0, p1); + __m256i p01h = _mm256_unpackhi_epi32(p0, p1); + __m256i p23l = _mm256_unpacklo_epi32(p2, p3); + __m256i p23h = _mm256_unpackhi_epi32(p2, p3); + + __m256i pll = _mm256_permute2x128_si256(p01l, p23l, 0 + 2*16); + __m256i plh = _mm256_permute2x128_si256(p01l, p23l, 1 + 3*16); + __m256i phl = _mm256_permute2x128_si256(p01h, p23h, 0 + 2*16); + __m256i phh = _mm256_permute2x128_si256(p01h, p23h, 1 + 3*16); + + __m256i b0 = _mm256_unpacklo_epi32(pll, plh); + __m256i g0 = _mm256_unpackhi_epi32(pll, plh); + __m256i r0 = _mm256_unpacklo_epi32(phl, phh); + __m256i a0 = _mm256_unpackhi_epi32(phl, phh); + + a = v_uint32x8(b0); + b = v_uint32x8(g0); + c = v_uint32x8(r0); + d = v_uint32x8(a0); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x4& a, v_uint64x4& b, v_uint64x4& c, v_uint64x4& d ) +{ + __m256i bgra0 = _mm256_loadu_si256((const __m256i*)ptr); + __m256i bgra1 = _mm256_loadu_si256((const __m256i*)(ptr + 4)); + __m256i bgra2 = _mm256_loadu_si256((const __m256i*)(ptr + 8)); + __m256i bgra3 = _mm256_loadu_si256((const __m256i*)(ptr + 12)); + + __m256i l02 = _mm256_permute2x128_si256(bgra0, bgra2, 0 + 2*16); + __m256i h02 = _mm256_permute2x128_si256(bgra0, bgra2, 1 + 3*16); + __m256i l13 = _mm256_permute2x128_si256(bgra1, bgra3, 0 + 2*16); + __m256i h13 = _mm256_permute2x128_si256(bgra1, bgra3, 1 + 3*16); + + __m256i b0 = _mm256_unpacklo_epi64(l02, l13); + __m256i g0 = _mm256_unpackhi_epi64(l02, l13); + __m256i r0 = _mm256_unpacklo_epi64(h02, h13); + __m256i a0 = _mm256_unpackhi_epi64(h02, h13); + + a = v_uint64x4(b0); + b = v_uint64x4(g0); + c = v_uint64x4(r0); + d = v_uint64x4(a0); +} + +///////////////////////////// store interleave ///////////////////////////////////// + +inline void v_store_interleave( uchar* ptr, const v_uint8x32& x, const v_uint8x32& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i xy_l = _mm256_unpacklo_epi8(x.val, y.val); + __m256i xy_h = _mm256_unpackhi_epi8(x.val, y.val); + + __m256i xy0 = _mm256_permute2x128_si256(xy_l, xy_h, 0 + 2*16); + __m256i xy1 = _mm256_permute2x128_si256(xy_l, xy_h, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, xy0); + _mm256_stream_si256((__m256i*)(ptr + 32), xy1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, xy0); + _mm256_store_si256((__m256i*)(ptr + 32), xy1); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, xy0); + _mm256_storeu_si256((__m256i*)(ptr + 32), xy1); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x16& x, const v_uint16x16& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i xy_l = _mm256_unpacklo_epi16(x.val, y.val); + __m256i xy_h = _mm256_unpackhi_epi16(x.val, y.val); + + __m256i xy0 = _mm256_permute2x128_si256(xy_l, xy_h, 0 + 2*16); + __m256i xy1 = _mm256_permute2x128_si256(xy_l, xy_h, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, xy0); + _mm256_stream_si256((__m256i*)(ptr + 16), xy1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, xy0); + _mm256_store_si256((__m256i*)(ptr + 16), xy1); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, xy0); + _mm256_storeu_si256((__m256i*)(ptr + 16), xy1); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x8& x, const v_uint32x8& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i xy_l = _mm256_unpacklo_epi32(x.val, y.val); + __m256i xy_h = _mm256_unpackhi_epi32(x.val, y.val); + + __m256i xy0 = _mm256_permute2x128_si256(xy_l, xy_h, 0 + 2*16); + __m256i xy1 = _mm256_permute2x128_si256(xy_l, xy_h, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, xy0); + _mm256_stream_si256((__m256i*)(ptr + 8), xy1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, xy0); + _mm256_store_si256((__m256i*)(ptr + 8), xy1); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, xy0); + _mm256_storeu_si256((__m256i*)(ptr + 8), xy1); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x4& x, const v_uint64x4& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i xy_l = _mm256_unpacklo_epi64(x.val, y.val); + __m256i xy_h = _mm256_unpackhi_epi64(x.val, y.val); + + __m256i xy0 = _mm256_permute2x128_si256(xy_l, xy_h, 0 + 2*16); + __m256i xy1 = _mm256_permute2x128_si256(xy_l, xy_h, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, xy0); + _mm256_stream_si256((__m256i*)(ptr + 4), xy1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, xy0); + _mm256_store_si256((__m256i*)(ptr + 4), xy1); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, xy0); + _mm256_storeu_si256((__m256i*)(ptr + 4), xy1); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x32& a, const v_uint8x32& b, const v_uint8x32& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + const __m256i sh_b = _mm256_setr_epi8( + 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, 5, + 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, 5); + const __m256i sh_g = _mm256_setr_epi8( + 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, + 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10); + const __m256i sh_r = _mm256_setr_epi8( + 10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, + 10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15); + + __m256i b0 = _mm256_shuffle_epi8(a.val, sh_b); + __m256i g0 = _mm256_shuffle_epi8(b.val, sh_g); + __m256i r0 = _mm256_shuffle_epi8(c.val, sh_r); + + const __m256i m0 = _mm256_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, + 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0); + const __m256i m1 = _mm256_setr_epi8(0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, + 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0); + + __m256i p0 = _mm256_blendv_epi8(_mm256_blendv_epi8(b0, g0, m0), r0, m1); + __m256i p1 = _mm256_blendv_epi8(_mm256_blendv_epi8(g0, r0, m0), b0, m1); + __m256i p2 = _mm256_blendv_epi8(_mm256_blendv_epi8(r0, b0, m0), g0, m1); + + __m256i bgr0 = _mm256_permute2x128_si256(p0, p1, 0 + 2*16); + __m256i bgr1 = _mm256_permute2x128_si256(p2, p0, 0 + 3*16); + __m256i bgr2 = _mm256_permute2x128_si256(p1, p2, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgr0); + _mm256_stream_si256((__m256i*)(ptr + 32), bgr1); + _mm256_stream_si256((__m256i*)(ptr + 64), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgr0); + _mm256_store_si256((__m256i*)(ptr + 32), bgr1); + _mm256_store_si256((__m256i*)(ptr + 64), bgr2); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgr0); + _mm256_storeu_si256((__m256i*)(ptr + 32), bgr1); + _mm256_storeu_si256((__m256i*)(ptr + 64), bgr2); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x16& a, const v_uint16x16& b, const v_uint16x16& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + const __m256i sh_b = _mm256_setr_epi8( + 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11, + 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11); + const __m256i sh_g = _mm256_setr_epi8( + 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, + 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5); + const __m256i sh_r = _mm256_setr_epi8( + 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, + 4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15); + + __m256i b0 = _mm256_shuffle_epi8(a.val, sh_b); + __m256i g0 = _mm256_shuffle_epi8(b.val, sh_g); + __m256i r0 = _mm256_shuffle_epi8(c.val, sh_r); + + const __m256i m0 = _mm256_setr_epi8(0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, + 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0); + const __m256i m1 = _mm256_setr_epi8(0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, + -1, -1, 0, 0, 0, 0, -1, -1, 0, 0, 0, 0, -1, -1, 0, 0); + + __m256i p0 = _mm256_blendv_epi8(_mm256_blendv_epi8(b0, g0, m0), r0, m1); + __m256i p1 = _mm256_blendv_epi8(_mm256_blendv_epi8(g0, r0, m0), b0, m1); + __m256i p2 = _mm256_blendv_epi8(_mm256_blendv_epi8(r0, b0, m0), g0, m1); + + __m256i bgr0 = _mm256_permute2x128_si256(p0, p2, 0 + 2*16); + //__m256i bgr1 = p1; + __m256i bgr2 = _mm256_permute2x128_si256(p0, p2, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgr0); + _mm256_stream_si256((__m256i*)(ptr + 16), p1); + _mm256_stream_si256((__m256i*)(ptr + 32), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgr0); + _mm256_store_si256((__m256i*)(ptr + 16), p1); + _mm256_store_si256((__m256i*)(ptr + 32), bgr2); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgr0); + _mm256_storeu_si256((__m256i*)(ptr + 16), p1); + _mm256_storeu_si256((__m256i*)(ptr + 32), bgr2); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x8& a, const v_uint32x8& b, const v_uint32x8& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i b0 = _mm256_shuffle_epi32(a.val, 0x6c); + __m256i g0 = _mm256_shuffle_epi32(b.val, 0xb1); + __m256i r0 = _mm256_shuffle_epi32(c.val, 0xc6); + + __m256i p0 = _mm256_blend_epi32(_mm256_blend_epi32(b0, g0, 0x92), r0, 0x24); + __m256i p1 = _mm256_blend_epi32(_mm256_blend_epi32(g0, r0, 0x92), b0, 0x24); + __m256i p2 = _mm256_blend_epi32(_mm256_blend_epi32(r0, b0, 0x92), g0, 0x24); + + __m256i bgr0 = _mm256_permute2x128_si256(p0, p1, 0 + 2*16); + //__m256i bgr1 = p2; + __m256i bgr2 = _mm256_permute2x128_si256(p0, p1, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgr0); + _mm256_stream_si256((__m256i*)(ptr + 8), p2); + _mm256_stream_si256((__m256i*)(ptr + 16), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgr0); + _mm256_store_si256((__m256i*)(ptr + 8), p2); + _mm256_store_si256((__m256i*)(ptr + 16), bgr2); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgr0); + _mm256_storeu_si256((__m256i*)(ptr + 8), p2); + _mm256_storeu_si256((__m256i*)(ptr + 16), bgr2); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x4& a, const v_uint64x4& b, const v_uint64x4& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i s01 = _mm256_unpacklo_epi64(a.val, b.val); + __m256i s12 = _mm256_unpackhi_epi64(b.val, c.val); + __m256i s20 = _mm256_blend_epi32(c.val, a.val, 0xcc); + + __m256i bgr0 = _mm256_permute2x128_si256(s01, s20, 0 + 2*16); + __m256i bgr1 = _mm256_blend_epi32(s01, s12, 0x0f); + __m256i bgr2 = _mm256_permute2x128_si256(s20, s12, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgr0); + _mm256_stream_si256((__m256i*)(ptr + 4), bgr1); + _mm256_stream_si256((__m256i*)(ptr + 8), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgr0); + _mm256_store_si256((__m256i*)(ptr + 4), bgr1); + _mm256_store_si256((__m256i*)(ptr + 8), bgr2); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgr0); + _mm256_storeu_si256((__m256i*)(ptr + 4), bgr1); + _mm256_storeu_si256((__m256i*)(ptr + 8), bgr2); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x32& a, const v_uint8x32& b, + const v_uint8x32& c, const v_uint8x32& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i bg0 = _mm256_unpacklo_epi8(a.val, b.val); + __m256i bg1 = _mm256_unpackhi_epi8(a.val, b.val); + __m256i ra0 = _mm256_unpacklo_epi8(c.val, d.val); + __m256i ra1 = _mm256_unpackhi_epi8(c.val, d.val); + + __m256i bgra0_ = _mm256_unpacklo_epi16(bg0, ra0); + __m256i bgra1_ = _mm256_unpackhi_epi16(bg0, ra0); + __m256i bgra2_ = _mm256_unpacklo_epi16(bg1, ra1); + __m256i bgra3_ = _mm256_unpackhi_epi16(bg1, ra1); + + __m256i bgra0 = _mm256_permute2x128_si256(bgra0_, bgra1_, 0 + 2*16); + __m256i bgra2 = _mm256_permute2x128_si256(bgra0_, bgra1_, 1 + 3*16); + __m256i bgra1 = _mm256_permute2x128_si256(bgra2_, bgra3_, 0 + 2*16); + __m256i bgra3 = _mm256_permute2x128_si256(bgra2_, bgra3_, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgra0); + _mm256_stream_si256((__m256i*)(ptr + 32), bgra1); + _mm256_stream_si256((__m256i*)(ptr + 64), bgra2); + _mm256_stream_si256((__m256i*)(ptr + 96), bgra3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgra0); + _mm256_store_si256((__m256i*)(ptr + 32), bgra1); + _mm256_store_si256((__m256i*)(ptr + 64), bgra2); + _mm256_store_si256((__m256i*)(ptr + 96), bgra3); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgra0); + _mm256_storeu_si256((__m256i*)(ptr + 32), bgra1); + _mm256_storeu_si256((__m256i*)(ptr + 64), bgra2); + _mm256_storeu_si256((__m256i*)(ptr + 96), bgra3); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x16& a, const v_uint16x16& b, + const v_uint16x16& c, const v_uint16x16& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i bg0 = _mm256_unpacklo_epi16(a.val, b.val); + __m256i bg1 = _mm256_unpackhi_epi16(a.val, b.val); + __m256i ra0 = _mm256_unpacklo_epi16(c.val, d.val); + __m256i ra1 = _mm256_unpackhi_epi16(c.val, d.val); + + __m256i bgra0_ = _mm256_unpacklo_epi32(bg0, ra0); + __m256i bgra1_ = _mm256_unpackhi_epi32(bg0, ra0); + __m256i bgra2_ = _mm256_unpacklo_epi32(bg1, ra1); + __m256i bgra3_ = _mm256_unpackhi_epi32(bg1, ra1); + + __m256i bgra0 = _mm256_permute2x128_si256(bgra0_, bgra1_, 0 + 2*16); + __m256i bgra2 = _mm256_permute2x128_si256(bgra0_, bgra1_, 1 + 3*16); + __m256i bgra1 = _mm256_permute2x128_si256(bgra2_, bgra3_, 0 + 2*16); + __m256i bgra3 = _mm256_permute2x128_si256(bgra2_, bgra3_, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgra0); + _mm256_stream_si256((__m256i*)(ptr + 16), bgra1); + _mm256_stream_si256((__m256i*)(ptr + 32), bgra2); + _mm256_stream_si256((__m256i*)(ptr + 48), bgra3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgra0); + _mm256_store_si256((__m256i*)(ptr + 16), bgra1); + _mm256_store_si256((__m256i*)(ptr + 32), bgra2); + _mm256_store_si256((__m256i*)(ptr + 48), bgra3); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgra0); + _mm256_storeu_si256((__m256i*)(ptr + 16), bgra1); + _mm256_storeu_si256((__m256i*)(ptr + 32), bgra2); + _mm256_storeu_si256((__m256i*)(ptr + 48), bgra3); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x8& a, const v_uint32x8& b, + const v_uint32x8& c, const v_uint32x8& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i bg0 = _mm256_unpacklo_epi32(a.val, b.val); + __m256i bg1 = _mm256_unpackhi_epi32(a.val, b.val); + __m256i ra0 = _mm256_unpacklo_epi32(c.val, d.val); + __m256i ra1 = _mm256_unpackhi_epi32(c.val, d.val); + + __m256i bgra0_ = _mm256_unpacklo_epi64(bg0, ra0); + __m256i bgra1_ = _mm256_unpackhi_epi64(bg0, ra0); + __m256i bgra2_ = _mm256_unpacklo_epi64(bg1, ra1); + __m256i bgra3_ = _mm256_unpackhi_epi64(bg1, ra1); + + __m256i bgra0 = _mm256_permute2x128_si256(bgra0_, bgra1_, 0 + 2*16); + __m256i bgra2 = _mm256_permute2x128_si256(bgra0_, bgra1_, 1 + 3*16); + __m256i bgra1 = _mm256_permute2x128_si256(bgra2_, bgra3_, 0 + 2*16); + __m256i bgra3 = _mm256_permute2x128_si256(bgra2_, bgra3_, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgra0); + _mm256_stream_si256((__m256i*)(ptr + 8), bgra1); + _mm256_stream_si256((__m256i*)(ptr + 16), bgra2); + _mm256_stream_si256((__m256i*)(ptr + 24), bgra3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgra0); + _mm256_store_si256((__m256i*)(ptr + 8), bgra1); + _mm256_store_si256((__m256i*)(ptr + 16), bgra2); + _mm256_store_si256((__m256i*)(ptr + 24), bgra3); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgra0); + _mm256_storeu_si256((__m256i*)(ptr + 8), bgra1); + _mm256_storeu_si256((__m256i*)(ptr + 16), bgra2); + _mm256_storeu_si256((__m256i*)(ptr + 24), bgra3); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x4& a, const v_uint64x4& b, + const v_uint64x4& c, const v_uint64x4& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m256i bg0 = _mm256_unpacklo_epi64(a.val, b.val); + __m256i bg1 = _mm256_unpackhi_epi64(a.val, b.val); + __m256i ra0 = _mm256_unpacklo_epi64(c.val, d.val); + __m256i ra1 = _mm256_unpackhi_epi64(c.val, d.val); + + __m256i bgra0 = _mm256_permute2x128_si256(bg0, ra0, 0 + 2*16); + __m256i bgra1 = _mm256_permute2x128_si256(bg1, ra1, 0 + 2*16); + __m256i bgra2 = _mm256_permute2x128_si256(bg0, ra0, 1 + 3*16); + __m256i bgra3 = _mm256_permute2x128_si256(bg1, ra1, 1 + 3*16); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm256_stream_si256((__m256i*)ptr, bgra0); + _mm256_stream_si256((__m256i*)(ptr + 4), bgra1); + _mm256_stream_si256((__m256i*)(ptr + 8), bgra2); + _mm256_stream_si256((__m256i*)(ptr + 12), bgra3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm256_store_si256((__m256i*)ptr, bgra0); + _mm256_store_si256((__m256i*)(ptr + 4), bgra1); + _mm256_store_si256((__m256i*)(ptr + 8), bgra2); + _mm256_store_si256((__m256i*)(ptr + 12), bgra3); + } + else + { + _mm256_storeu_si256((__m256i*)ptr, bgra0); + _mm256_storeu_si256((__m256i*)(ptr + 4), bgra1); + _mm256_storeu_si256((__m256i*)(ptr + 8), bgra2); + _mm256_storeu_si256((__m256i*)(ptr + 12), bgra3); + } +} + +#define OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(_Tpvec0, _Tp0, suffix0, _Tpvec1, _Tp1, suffix1) \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0 ) \ +{ \ + _Tpvec1 a1, b1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0 ) \ +{ \ + _Tpvec1 a1, b1, c1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0, _Tpvec0& d0 ) \ +{ \ + _Tpvec1 a1, b1, c1, d1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1, d1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ + d0 = v_reinterpret_as_##suffix0(d1); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, const _Tpvec0& c0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, const _Tpvec0& d0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + _Tpvec1 d1 = v_reinterpret_as_##suffix1(d0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, d1, mode); \ +} + +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int8x32, schar, s8, v_uint8x32, uchar, u8) +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int16x16, short, s16, v_uint16x16, ushort, u16) +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int32x8, int, s32, v_uint32x8, unsigned, u32) +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float32x8, float, f32, v_uint32x8, unsigned, u32) +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int64x4, int64, s64, v_uint64x4, uint64, u64) +OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float64x4, double, f64, v_uint64x4, uint64, u64) + +// +// FP16 +// + +inline v_float32x8 v256_load_expand(const float16_t* ptr) +{ +#if CV_FP16 + return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr))); +#else + float CV_DECL_ALIGNED(32) buf[8]; + for (int i = 0; i < 8; i++) + buf[i] = (float)ptr[i]; + return v256_load_aligned(buf); +#endif +} + +inline void v_pack_store(float16_t* ptr, const v_float32x8& a) +{ +#if CV_FP16 + __m128i ah = _mm256_cvtps_ph(a.val, 0); + _mm_storeu_si128((__m128i*)ptr, ah); +#else + float CV_DECL_ALIGNED(32) buf[8]; + v_store_aligned(buf, a); + for (int i = 0; i < 8; i++) + ptr[i] = float16_t(buf[i]); +#endif +} + +// +// end of FP16 +// + +inline void v256_cleanup() { _mm256_zeroall(); } + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} // cv:: + +#endif // OPENCV_HAL_INTRIN_AVX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx512.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx512.hpp new file mode 100644 index 00000000..75a3bd4b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_avx512.hpp @@ -0,0 +1,3078 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_HAL_INTRIN_AVX512_HPP +#define OPENCV_HAL_INTRIN_AVX512_HPP + +#if defined(_MSC_VER) && (_MSC_VER < 1920/*MSVS2019*/) +# pragma warning(disable:4146) // unary minus operator applied to unsigned type, result still unsigned +# pragma warning(disable:4309) // 'argument': truncation of constant value +# pragma warning(disable:4310) // cast truncates constant value +#endif + +#define CVT_ROUND_MODES_IMPLEMENTED 0 + +#define CV_SIMD512 1 +#define CV_SIMD512_64F 1 +#define CV_SIMD512_FP16 0 // no native operations with FP16 type. Only load/store from float32x8 are available (if CV_FP16 == 1) + +#define _v512_set_epu64(a7, a6, a5, a4, a3, a2, a1, a0) _mm512_set_epi64((int64)(a7),(int64)(a6),(int64)(a5),(int64)(a4),(int64)(a3),(int64)(a2),(int64)(a1),(int64)(a0)) +#define _v512_set_epu32(a15, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3, a2, a1, a0) \ + _mm512_set_epi64(((int64)(a15)<<32)|(int64)(a14), ((int64)(a13)<<32)|(int64)(a12), ((int64)(a11)<<32)|(int64)(a10), ((int64)( a9)<<32)|(int64)( a8), \ + ((int64)( a7)<<32)|(int64)( a6), ((int64)( a5)<<32)|(int64)( a4), ((int64)( a3)<<32)|(int64)( a2), ((int64)( a1)<<32)|(int64)( a0)) +#define _v512_set_epu16(a31, a30, a29, a28, a27, a26, a25, a24, a23, a22, a21, a20, a19, a18, a17, a16, \ + a15, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3, a2, a1, a0) \ + _v512_set_epu32(((unsigned)(a31)<<16)|(unsigned)(a30), ((unsigned)(a29)<<16)|(unsigned)(a28), ((unsigned)(a27)<<16)|(unsigned)(a26), ((unsigned)(a25)<<16)|(unsigned)(a24), \ + ((unsigned)(a23)<<16)|(unsigned)(a22), ((unsigned)(a21)<<16)|(unsigned)(a20), ((unsigned)(a19)<<16)|(unsigned)(a18), ((unsigned)(a17)<<16)|(unsigned)(a16), \ + ((unsigned)(a15)<<16)|(unsigned)(a14), ((unsigned)(a13)<<16)|(unsigned)(a12), ((unsigned)(a11)<<16)|(unsigned)(a10), ((unsigned)( a9)<<16)|(unsigned)( a8), \ + ((unsigned)( a7)<<16)|(unsigned)( a6), ((unsigned)( a5)<<16)|(unsigned)( a4), ((unsigned)( a3)<<16)|(unsigned)( a2), ((unsigned)( a1)<<16)|(unsigned)( a0)) +#define _v512_set_epu8(a63, a62, a61, a60, a59, a58, a57, a56, a55, a54, a53, a52, a51, a50, a49, a48, \ + a47, a46, a45, a44, a43, a42, a41, a40, a39, a38, a37, a36, a35, a34, a33, a32, \ + a31, a30, a29, a28, a27, a26, a25, a24, a23, a22, a21, a20, a19, a18, a17, a16, \ + a15, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3, a2, a1, a0) \ + _v512_set_epu32(((unsigned)(a63)<<24)|((unsigned)(a62)<<16)|((unsigned)(a61)<<8)|(unsigned)(a60),((unsigned)(a59)<<24)|((unsigned)(a58)<<16)|((unsigned)(a57)<<8)|(unsigned)(a56), \ + ((unsigned)(a55)<<24)|((unsigned)(a54)<<16)|((unsigned)(a53)<<8)|(unsigned)(a52),((unsigned)(a51)<<24)|((unsigned)(a50)<<16)|((unsigned)(a49)<<8)|(unsigned)(a48), \ + ((unsigned)(a47)<<24)|((unsigned)(a46)<<16)|((unsigned)(a45)<<8)|(unsigned)(a44),((unsigned)(a43)<<24)|((unsigned)(a42)<<16)|((unsigned)(a41)<<8)|(unsigned)(a40), \ + ((unsigned)(a39)<<24)|((unsigned)(a38)<<16)|((unsigned)(a37)<<8)|(unsigned)(a36),((unsigned)(a35)<<24)|((unsigned)(a34)<<16)|((unsigned)(a33)<<8)|(unsigned)(a32), \ + ((unsigned)(a31)<<24)|((unsigned)(a30)<<16)|((unsigned)(a29)<<8)|(unsigned)(a28),((unsigned)(a27)<<24)|((unsigned)(a26)<<16)|((unsigned)(a25)<<8)|(unsigned)(a24), \ + ((unsigned)(a23)<<24)|((unsigned)(a22)<<16)|((unsigned)(a21)<<8)|(unsigned)(a20),((unsigned)(a19)<<24)|((unsigned)(a18)<<16)|((unsigned)(a17)<<8)|(unsigned)(a16), \ + ((unsigned)(a15)<<24)|((unsigned)(a14)<<16)|((unsigned)(a13)<<8)|(unsigned)(a12),((unsigned)(a11)<<24)|((unsigned)(a10)<<16)|((unsigned)( a9)<<8)|(unsigned)( a8), \ + ((unsigned)( a7)<<24)|((unsigned)( a6)<<16)|((unsigned)( a5)<<8)|(unsigned)( a4),((unsigned)( a3)<<24)|((unsigned)( a2)<<16)|((unsigned)( a1)<<8)|(unsigned)( a0)) +#define _v512_set_epi8(a63, a62, a61, a60, a59, a58, a57, a56, a55, a54, a53, a52, a51, a50, a49, a48, \ + a47, a46, a45, a44, a43, a42, a41, a40, a39, a38, a37, a36, a35, a34, a33, a32, \ + a31, a30, a29, a28, a27, a26, a25, a24, a23, a22, a21, a20, a19, a18, a17, a16, \ + a15, a14, a13, a12, a11, a10, a9, a8, a7, a6, a5, a4, a3, a2, a1, a0) \ + _v512_set_epu8((uchar)(a63), (uchar)(a62), (uchar)(a61), (uchar)(a60), (uchar)(a59), (uchar)(a58), (uchar)(a57), (uchar)(a56), \ + (uchar)(a55), (uchar)(a54), (uchar)(a53), (uchar)(a52), (uchar)(a51), (uchar)(a50), (uchar)(a49), (uchar)(a48), \ + (uchar)(a47), (uchar)(a46), (uchar)(a45), (uchar)(a44), (uchar)(a43), (uchar)(a42), (uchar)(a41), (uchar)(a40), \ + (uchar)(a39), (uchar)(a38), (uchar)(a37), (uchar)(a36), (uchar)(a35), (uchar)(a34), (uchar)(a33), (uchar)(a32), \ + (uchar)(a31), (uchar)(a30), (uchar)(a29), (uchar)(a28), (uchar)(a27), (uchar)(a26), (uchar)(a25), (uchar)(a24), \ + (uchar)(a23), (uchar)(a22), (uchar)(a21), (uchar)(a20), (uchar)(a19), (uchar)(a18), (uchar)(a17), (uchar)(a16), \ + (uchar)(a15), (uchar)(a14), (uchar)(a13), (uchar)(a12), (uchar)(a11), (uchar)(a10), (uchar)( a9), (uchar)( a8), \ + (uchar)( a7), (uchar)( a6), (uchar)( a5), (uchar)( a4), (uchar)( a3), (uchar)( a2), (uchar)( a1), (uchar)( a0)) + +#ifndef _mm512_cvtpd_pslo +#ifdef _mm512_zextsi256_si512 +#define _mm512_cvtpd_pslo(a) _mm512_zextps256_ps512(_mm512_cvtpd_ps(a)) +#else +//if preferred way to extend with zeros is unavailable +#define _mm512_cvtpd_pslo(a) _mm512_castps256_ps512(_mm512_cvtpd_ps(a)) +#endif +#endif +///////// Utils //////////// + +namespace +{ + +inline __m512i _v512_combine(const __m256i& lo, const __m256i& hi) +{ return _mm512_inserti32x8(_mm512_castsi256_si512(lo), hi, 1); } + +inline __m512 _v512_combine(const __m256& lo, const __m256& hi) +{ return _mm512_insertf32x8(_mm512_castps256_ps512(lo), hi, 1); } + +inline __m512d _v512_combine(const __m256d& lo, const __m256d& hi) +{ return _mm512_insertf64x4(_mm512_castpd256_pd512(lo), hi, 1); } + +inline int _v_cvtsi512_si32(const __m512i& a) +{ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); } + +inline __m256i _v512_extract_high(const __m512i& v) +{ return _mm512_extracti32x8_epi32(v, 1); } + +inline __m256 _v512_extract_high(const __m512& v) +{ return _mm512_extractf32x8_ps(v, 1); } + +inline __m256d _v512_extract_high(const __m512d& v) +{ return _mm512_extractf64x4_pd(v, 1); } + +inline __m256i _v512_extract_low(const __m512i& v) +{ return _mm512_castsi512_si256(v); } + +inline __m256 _v512_extract_low(const __m512& v) +{ return _mm512_castps512_ps256(v); } + +inline __m256d _v512_extract_low(const __m512d& v) +{ return _mm512_castpd512_pd256(v); } + +inline __m512i _v512_insert(const __m512i& a, const __m256i& b) +{ return _mm512_inserti32x8(a, b, 0); } + +inline __m512 _v512_insert(const __m512& a, const __m256& b) +{ return _mm512_insertf32x8(a, b, 0); } + +inline __m512d _v512_insert(const __m512d& a, const __m256d& b) +{ return _mm512_insertf64x4(a, b, 0); } + +} + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +///////// Types //////////// + +struct v_uint8x64 +{ + typedef uchar lane_type; + enum { nlanes = 64 }; + __m512i val; + + explicit v_uint8x64(__m512i v) : val(v) {} + v_uint8x64(uchar v0, uchar v1, uchar v2, uchar v3, + uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, + uchar v12, uchar v13, uchar v14, uchar v15, + uchar v16, uchar v17, uchar v18, uchar v19, + uchar v20, uchar v21, uchar v22, uchar v23, + uchar v24, uchar v25, uchar v26, uchar v27, + uchar v28, uchar v29, uchar v30, uchar v31, + uchar v32, uchar v33, uchar v34, uchar v35, + uchar v36, uchar v37, uchar v38, uchar v39, + uchar v40, uchar v41, uchar v42, uchar v43, + uchar v44, uchar v45, uchar v46, uchar v47, + uchar v48, uchar v49, uchar v50, uchar v51, + uchar v52, uchar v53, uchar v54, uchar v55, + uchar v56, uchar v57, uchar v58, uchar v59, + uchar v60, uchar v61, uchar v62, uchar v63) + { + val = _v512_set_epu8(v63, v62, v61, v60, v59, v58, v57, v56, v55, v54, v53, v52, v51, v50, v49, v48, + v47, v46, v45, v44, v43, v42, v41, v40, v39, v38, v37, v36, v35, v34, v33, v32, + v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16, + v15, v14, v13, v12, v11, v10, v9, v8, v7, v6, v5, v4, v3, v2, v1, v0); + } + v_uint8x64() {} + + static inline v_uint8x64 zero() { return v_uint8x64(_mm512_setzero_si512()); } + + uchar get0() const { return (uchar)_v_cvtsi512_si32(val); } +}; + +struct v_int8x64 +{ + typedef schar lane_type; + enum { nlanes = 64 }; + __m512i val; + + explicit v_int8x64(__m512i v) : val(v) {} + v_int8x64(schar v0, schar v1, schar v2, schar v3, + schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, + schar v12, schar v13, schar v14, schar v15, + schar v16, schar v17, schar v18, schar v19, + schar v20, schar v21, schar v22, schar v23, + schar v24, schar v25, schar v26, schar v27, + schar v28, schar v29, schar v30, schar v31, + schar v32, schar v33, schar v34, schar v35, + schar v36, schar v37, schar v38, schar v39, + schar v40, schar v41, schar v42, schar v43, + schar v44, schar v45, schar v46, schar v47, + schar v48, schar v49, schar v50, schar v51, + schar v52, schar v53, schar v54, schar v55, + schar v56, schar v57, schar v58, schar v59, + schar v60, schar v61, schar v62, schar v63) + { + val = _v512_set_epi8(v63, v62, v61, v60, v59, v58, v57, v56, v55, v54, v53, v52, v51, v50, v49, v48, + v47, v46, v45, v44, v43, v42, v41, v40, v39, v38, v37, v36, v35, v34, v33, v32, + v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16, + v15, v14, v13, v12, v11, v10, v9, v8, v7, v6, v5, v4, v3, v2, v1, v0); + } + v_int8x64() {} + + static inline v_int8x64 zero() { return v_int8x64(_mm512_setzero_si512()); } + + schar get0() const { return (schar)_v_cvtsi512_si32(val); } +}; + +struct v_uint16x32 +{ + typedef ushort lane_type; + enum { nlanes = 32 }; + __m512i val; + + explicit v_uint16x32(__m512i v) : val(v) {} + v_uint16x32(ushort v0, ushort v1, ushort v2, ushort v3, + ushort v4, ushort v5, ushort v6, ushort v7, + ushort v8, ushort v9, ushort v10, ushort v11, + ushort v12, ushort v13, ushort v14, ushort v15, + ushort v16, ushort v17, ushort v18, ushort v19, + ushort v20, ushort v21, ushort v22, ushort v23, + ushort v24, ushort v25, ushort v26, ushort v27, + ushort v28, ushort v29, ushort v30, ushort v31) + { + val = _v512_set_epu16(v31, v30, v29, v28, v27, v26, v25, v24, v23, v22, v21, v20, v19, v18, v17, v16, + v15, v14, v13, v12, v11, v10, v9, v8, v7, v6, v5, v4, v3, v2, v1, v0); + } + v_uint16x32() {} + + static inline v_uint16x32 zero() { return v_uint16x32(_mm512_setzero_si512()); } + + ushort get0() const { return (ushort)_v_cvtsi512_si32(val); } +}; + +struct v_int16x32 +{ + typedef short lane_type; + enum { nlanes = 32 }; + __m512i val; + + explicit v_int16x32(__m512i v) : val(v) {} + v_int16x32(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7, + short v8, short v9, short v10, short v11, short v12, short v13, short v14, short v15, + short v16, short v17, short v18, short v19, short v20, short v21, short v22, short v23, + short v24, short v25, short v26, short v27, short v28, short v29, short v30, short v31) + { + val = _v512_set_epu16((ushort)v31, (ushort)v30, (ushort)v29, (ushort)v28, (ushort)v27, (ushort)v26, (ushort)v25, (ushort)v24, + (ushort)v23, (ushort)v22, (ushort)v21, (ushort)v20, (ushort)v19, (ushort)v18, (ushort)v17, (ushort)v16, + (ushort)v15, (ushort)v14, (ushort)v13, (ushort)v12, (ushort)v11, (ushort)v10, (ushort)v9 , (ushort)v8, + (ushort)v7 , (ushort)v6 , (ushort)v5 , (ushort)v4 , (ushort)v3 , (ushort)v2 , (ushort)v1 , (ushort)v0); + } + v_int16x32() {} + + static inline v_int16x32 zero() { return v_int16x32(_mm512_setzero_si512()); } + + short get0() const { return (short)_v_cvtsi512_si32(val); } +}; + +struct v_uint32x16 +{ + typedef unsigned lane_type; + enum { nlanes = 16 }; + __m512i val; + + explicit v_uint32x16(__m512i v) : val(v) {} + v_uint32x16(unsigned v0, unsigned v1, unsigned v2, unsigned v3, + unsigned v4, unsigned v5, unsigned v6, unsigned v7, + unsigned v8, unsigned v9, unsigned v10, unsigned v11, + unsigned v12, unsigned v13, unsigned v14, unsigned v15) + { + val = _mm512_setr_epi32((int)v0, (int)v1, (int)v2, (int)v3, (int)v4, (int)v5, (int)v6, (int)v7, + (int)v8, (int)v9, (int)v10, (int)v11, (int)v12, (int)v13, (int)v14, (int)v15); + } + v_uint32x16() {} + + static inline v_uint32x16 zero() { return v_uint32x16(_mm512_setzero_si512()); } + + unsigned get0() const { return (unsigned)_v_cvtsi512_si32(val); } +}; + +struct v_int32x16 +{ + typedef int lane_type; + enum { nlanes = 16 }; + __m512i val; + + explicit v_int32x16(__m512i v) : val(v) {} + v_int32x16(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, + int v8, int v9, int v10, int v11, int v12, int v13, int v14, int v15) + { + val = _mm512_setr_epi32(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15); + } + v_int32x16() {} + + static inline v_int32x16 zero() { return v_int32x16(_mm512_setzero_si512()); } + + int get0() const { return _v_cvtsi512_si32(val); } +}; + +struct v_float32x16 +{ + typedef float lane_type; + enum { nlanes = 16 }; + __m512 val; + + explicit v_float32x16(__m512 v) : val(v) {} + v_float32x16(float v0, float v1, float v2, float v3, float v4, float v5, float v6, float v7, + float v8, float v9, float v10, float v11, float v12, float v13, float v14, float v15) + { + val = _mm512_setr_ps(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15); + } + v_float32x16() {} + + static inline v_float32x16 zero() { return v_float32x16(_mm512_setzero_ps()); } + + float get0() const { return _mm_cvtss_f32(_mm512_castps512_ps128(val)); } +}; + +struct v_uint64x8 +{ + typedef uint64 lane_type; + enum { nlanes = 8 }; + __m512i val; + + explicit v_uint64x8(__m512i v) : val(v) {} + v_uint64x8(uint64 v0, uint64 v1, uint64 v2, uint64 v3, uint64 v4, uint64 v5, uint64 v6, uint64 v7) + { val = _mm512_setr_epi64((int64)v0, (int64)v1, (int64)v2, (int64)v3, (int64)v4, (int64)v5, (int64)v6, (int64)v7); } + v_uint64x8() {} + + static inline v_uint64x8 zero() { return v_uint64x8(_mm512_setzero_si512()); } + + uint64 get0() const + { + #if defined __x86_64__ || defined _M_X64 + return (uint64)_mm_cvtsi128_si64(_mm512_castsi512_si128(val)); + #else + int a = _mm_cvtsi128_si32(_mm512_castsi512_si128(val)); + int b = _mm_cvtsi128_si32(_mm512_castsi512_si128(_mm512_srli_epi64(val, 32))); + return (unsigned)a | ((uint64)(unsigned)b << 32); + #endif + } +}; + +struct v_int64x8 +{ + typedef int64 lane_type; + enum { nlanes = 8 }; + __m512i val; + + explicit v_int64x8(__m512i v) : val(v) {} + v_int64x8(int64 v0, int64 v1, int64 v2, int64 v3, int64 v4, int64 v5, int64 v6, int64 v7) + { val = _mm512_setr_epi64(v0, v1, v2, v3, v4, v5, v6, v7); } + v_int64x8() {} + + static inline v_int64x8 zero() { return v_int64x8(_mm512_setzero_si512()); } + + int64 get0() const + { + #if defined __x86_64__ || defined _M_X64 + return (int64)_mm_cvtsi128_si64(_mm512_castsi512_si128(val)); + #else + int a = _mm_cvtsi128_si32(_mm512_castsi512_si128(val)); + int b = _mm_cvtsi128_si32(_mm512_castsi512_si128(_mm512_srli_epi64(val, 32))); + return (int64)((unsigned)a | ((uint64)(unsigned)b << 32)); + #endif + } +}; + +struct v_float64x8 +{ + typedef double lane_type; + enum { nlanes = 8 }; + __m512d val; + + explicit v_float64x8(__m512d v) : val(v) {} + v_float64x8(double v0, double v1, double v2, double v3, double v4, double v5, double v6, double v7) + { val = _mm512_setr_pd(v0, v1, v2, v3, v4, v5, v6, v7); } + v_float64x8() {} + + static inline v_float64x8 zero() { return v_float64x8(_mm512_setzero_pd()); } + + double get0() const { return _mm_cvtsd_f64(_mm512_castpd512_pd128(val)); } +}; + +//////////////// Load and store operations /////////////// + +#define OPENCV_HAL_IMPL_AVX512_LOADSTORE(_Tpvec, _Tp) \ + inline _Tpvec v512_load(const _Tp* ptr) \ + { return _Tpvec(_mm512_loadu_si512((const __m512i*)ptr)); } \ + inline _Tpvec v512_load_aligned(const _Tp* ptr) \ + { return _Tpvec(_mm512_load_si512((const __m512i*)ptr)); } \ + inline _Tpvec v512_load_low(const _Tp* ptr) \ + { \ + __m256i v256 = _mm256_loadu_si256((const __m256i*)ptr); \ + return _Tpvec(_mm512_castsi256_si512(v256)); \ + } \ + inline _Tpvec v512_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ + { \ + __m256i vlo = _mm256_loadu_si256((const __m256i*)ptr0); \ + __m256i vhi = _mm256_loadu_si256((const __m256i*)ptr1); \ + return _Tpvec(_v512_combine(vlo, vhi)); \ + } \ + inline void v_store(_Tp* ptr, const _Tpvec& a) \ + { _mm512_storeu_si512((__m512i*)ptr, a.val); } \ + inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ + { _mm512_store_si512((__m512i*)ptr, a.val); } \ + inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ + { _mm512_stream_si512((__m512i*)ptr, a.val); } \ + inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ + { \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm512_storeu_si512((__m512i*)ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm512_stream_si512((__m512i*)ptr, a.val); \ + else \ + _mm512_store_si512((__m512i*)ptr, a.val); \ + } \ + inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_si256((__m256i*)ptr, _v512_extract_low(a.val)); } \ + inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_si256((__m256i*)ptr, _v512_extract_high(a.val)); } + +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_uint8x64, uchar) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_int8x64, schar) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_uint16x32, ushort) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_int16x32, short) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_uint32x16, unsigned) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_int32x16, int) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_uint64x8, uint64) +OPENCV_HAL_IMPL_AVX512_LOADSTORE(v_int64x8, int64) + +#define OPENCV_HAL_IMPL_AVX512_LOADSTORE_FLT(_Tpvec, _Tp, suffix, halfreg) \ + inline _Tpvec v512_load(const _Tp* ptr) \ + { return _Tpvec(_mm512_loadu_##suffix(ptr)); } \ + inline _Tpvec v512_load_aligned(const _Tp* ptr) \ + { return _Tpvec(_mm512_load_##suffix(ptr)); } \ + inline _Tpvec v512_load_low(const _Tp* ptr) \ + { \ + return _Tpvec(_mm512_cast##suffix##256_##suffix##512 \ + (_mm256_loadu_##suffix(ptr))); \ + } \ + inline _Tpvec v512_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ + { \ + halfreg vlo = _mm256_loadu_##suffix(ptr0); \ + halfreg vhi = _mm256_loadu_##suffix(ptr1); \ + return _Tpvec(_v512_combine(vlo, vhi)); \ + } \ + inline void v_store(_Tp* ptr, const _Tpvec& a) \ + { _mm512_storeu_##suffix(ptr, a.val); } \ + inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ + { _mm512_store_##suffix(ptr, a.val); } \ + inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ + { _mm512_stream_##suffix(ptr, a.val); } \ + inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ + { \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm512_storeu_##suffix(ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm512_stream_##suffix(ptr, a.val); \ + else \ + _mm512_store_##suffix(ptr, a.val); \ + } \ + inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_##suffix(ptr, _v512_extract_low(a.val)); } \ + inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ + { _mm256_storeu_##suffix(ptr, _v512_extract_high(a.val)); } + +OPENCV_HAL_IMPL_AVX512_LOADSTORE_FLT(v_float32x16, float, ps, __m256) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_FLT(v_float64x8, double, pd, __m256d) + +#define OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, _Tpvecf, suffix, cast) \ + inline _Tpvec v_reinterpret_as_##suffix(const _Tpvecf& a) \ + { return _Tpvec(cast(a.val)); } + +#define OPENCV_HAL_IMPL_AVX512_INIT(_Tpvec, _Tp, suffix, ssuffix, ctype_s) \ + inline _Tpvec v512_setzero_##suffix() \ + { return _Tpvec(_mm512_setzero_si512()); } \ + inline _Tpvec v512_setall_##suffix(_Tp v) \ + { return _Tpvec(_mm512_set1_##ssuffix((ctype_s)v)); } \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint8x64, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int8x64, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint16x32, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int16x32, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint32x16, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int32x16, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint64x8, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int64x8, suffix, OPENCV_HAL_NOP) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_float32x16, suffix, _mm512_castps_si512) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_float64x8, suffix, _mm512_castpd_si512) + +OPENCV_HAL_IMPL_AVX512_INIT(v_uint8x64, uchar, u8, epi8, char) +OPENCV_HAL_IMPL_AVX512_INIT(v_int8x64, schar, s8, epi8, char) +OPENCV_HAL_IMPL_AVX512_INIT(v_uint16x32, ushort, u16, epi16, short) +OPENCV_HAL_IMPL_AVX512_INIT(v_int16x32, short, s16, epi16, short) +OPENCV_HAL_IMPL_AVX512_INIT(v_uint32x16, unsigned, u32, epi32, int) +OPENCV_HAL_IMPL_AVX512_INIT(v_int32x16, int, s32, epi32, int) +OPENCV_HAL_IMPL_AVX512_INIT(v_uint64x8, uint64, u64, epi64, int64) +OPENCV_HAL_IMPL_AVX512_INIT(v_int64x8, int64, s64, epi64, int64) + +#define OPENCV_HAL_IMPL_AVX512_INIT_FLT(_Tpvec, _Tp, suffix, zsuffix, cast) \ + inline _Tpvec v512_setzero_##suffix() \ + { return _Tpvec(_mm512_setzero_##zsuffix()); } \ + inline _Tpvec v512_setall_##suffix(_Tp v) \ + { return _Tpvec(_mm512_set1_##zsuffix(v)); } \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint8x64, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int8x64, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint16x32, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int16x32, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint32x16, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int32x16, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint64x8, suffix, cast) \ + OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int64x8, suffix, cast) + +OPENCV_HAL_IMPL_AVX512_INIT_FLT(v_float32x16, float, f32, ps, _mm512_castsi512_ps) +OPENCV_HAL_IMPL_AVX512_INIT_FLT(v_float64x8, double, f64, pd, _mm512_castsi512_pd) + +inline v_float32x16 v_reinterpret_as_f32(const v_float32x16& a) +{ return a; } +inline v_float32x16 v_reinterpret_as_f32(const v_float64x8& a) +{ return v_float32x16(_mm512_castpd_ps(a.val)); } + +inline v_float64x8 v_reinterpret_as_f64(const v_float64x8& a) +{ return a; } +inline v_float64x8 v_reinterpret_as_f64(const v_float32x16& a) +{ return v_float64x8(_mm512_castps_pd(a.val)); } + +// FP16 +inline v_float32x16 v512_load_expand(const float16_t* ptr) +{ + return v_float32x16(_mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*)ptr))); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x16& a) +{ + __m256i ah = _mm512_cvtps_ph(a.val, 0); + _mm256_storeu_si256((__m256i*)ptr, ah); +} + +/* Recombine & ZIP */ +inline void v_zip(const v_int8x64& a, const v_int8x64& b, v_int8x64& ab0, v_int8x64& ab1) +{ +#if CV_AVX_512VBMI + __m512i mask0 = _v512_set_epu8( 95, 31, 94, 30, 93, 29, 92, 28, 91, 27, 90, 26, 89, 25, 88, 24, + 87, 23, 86, 22, 85, 21, 84, 20, 83, 19, 82, 18, 81, 17, 80, 16, + 79, 15, 78, 14, 77, 13, 76, 12, 75, 11, 74, 10, 73, 9, 72, 8, + 71, 7, 70, 6, 69, 5, 68, 4, 67, 3, 66, 2, 65, 1, 64, 0); + ab0 = v_int8x64(_mm512_permutex2var_epi8(a.val, mask0, b.val)); + __m512i mask1 = _v512_set_epu8(127, 63, 126, 62, 125, 61, 124, 60, 123, 59, 122, 58, 121, 57, 120, 56, + 119, 55, 118, 54, 117, 53, 116, 52, 115, 51, 114, 50, 113, 49, 112, 48, + 111, 47, 110, 46, 109, 45, 108, 44, 107, 43, 106, 42, 105, 41, 104, 40, + 103, 39, 102, 38, 101, 37, 100, 36, 99, 35, 98, 34, 97, 33, 96, 32); + ab1 = v_int8x64(_mm512_permutex2var_epi8(a.val, mask1, b.val)); +#else + __m512i low = _mm512_unpacklo_epi8(a.val, b.val); + __m512i high = _mm512_unpackhi_epi8(a.val, b.val); + ab0 = v_int8x64(_mm512_permutex2var_epi64(low, _v512_set_epu64(11, 10, 3, 2, 9, 8, 1, 0), high)); + ab1 = v_int8x64(_mm512_permutex2var_epi64(low, _v512_set_epu64(15, 14, 7, 6, 13, 12, 5, 4), high)); +#endif +} +inline void v_zip(const v_int16x32& a, const v_int16x32& b, v_int16x32& ab0, v_int16x32& ab1) +{ + __m512i mask0 = _v512_set_epu16(47, 15, 46, 14, 45, 13, 44, 12, 43, 11, 42, 10, 41, 9, 40, 8, + 39, 7, 38, 6, 37, 5, 36, 4, 35, 3, 34, 2, 33, 1, 32, 0); + ab0 = v_int16x32(_mm512_permutex2var_epi16(a.val, mask0, b.val)); + __m512i mask1 = _v512_set_epu16(63, 31, 62, 30, 61, 29, 60, 28, 59, 27, 58, 26, 57, 25, 56, 24, + 55, 23, 54, 22, 53, 21, 52, 20, 51, 19, 50, 18, 49, 17, 48, 16); + ab1 = v_int16x32(_mm512_permutex2var_epi16(a.val, mask1, b.val)); +} +inline void v_zip(const v_int32x16& a, const v_int32x16& b, v_int32x16& ab0, v_int32x16& ab1) +{ + __m512i mask0 = _v512_set_epu32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0); + ab0 = v_int32x16(_mm512_permutex2var_epi32(a.val, mask0, b.val)); + __m512i mask1 = _v512_set_epu32(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8); + ab1 = v_int32x16(_mm512_permutex2var_epi32(a.val, mask1, b.val)); +} +inline void v_zip(const v_int64x8& a, const v_int64x8& b, v_int64x8& ab0, v_int64x8& ab1) +{ + __m512i mask0 = _v512_set_epu64(11, 3, 10, 2, 9, 1, 8, 0); + ab0 = v_int64x8(_mm512_permutex2var_epi64(a.val, mask0, b.val)); + __m512i mask1 = _v512_set_epu64(15, 7, 14, 6, 13, 5, 12, 4); + ab1 = v_int64x8(_mm512_permutex2var_epi64(a.val, mask1, b.val)); +} + +inline void v_zip(const v_uint8x64& a, const v_uint8x64& b, v_uint8x64& ab0, v_uint8x64& ab1) +{ + v_int8x64 i0, i1; + v_zip(v_reinterpret_as_s8(a), v_reinterpret_as_s8(b), i0, i1); + ab0 = v_reinterpret_as_u8(i0); + ab1 = v_reinterpret_as_u8(i1); +} +inline void v_zip(const v_uint16x32& a, const v_uint16x32& b, v_uint16x32& ab0, v_uint16x32& ab1) +{ + v_int16x32 i0, i1; + v_zip(v_reinterpret_as_s16(a), v_reinterpret_as_s16(b), i0, i1); + ab0 = v_reinterpret_as_u16(i0); + ab1 = v_reinterpret_as_u16(i1); +} +inline void v_zip(const v_uint32x16& a, const v_uint32x16& b, v_uint32x16& ab0, v_uint32x16& ab1) +{ + v_int32x16 i0, i1; + v_zip(v_reinterpret_as_s32(a), v_reinterpret_as_s32(b), i0, i1); + ab0 = v_reinterpret_as_u32(i0); + ab1 = v_reinterpret_as_u32(i1); +} +inline void v_zip(const v_uint64x8& a, const v_uint64x8& b, v_uint64x8& ab0, v_uint64x8& ab1) +{ + v_int64x8 i0, i1; + v_zip(v_reinterpret_as_s64(a), v_reinterpret_as_s64(b), i0, i1); + ab0 = v_reinterpret_as_u64(i0); + ab1 = v_reinterpret_as_u64(i1); +} +inline void v_zip(const v_float32x16& a, const v_float32x16& b, v_float32x16& ab0, v_float32x16& ab1) +{ + v_int32x16 i0, i1; + v_zip(v_reinterpret_as_s32(a), v_reinterpret_as_s32(b), i0, i1); + ab0 = v_reinterpret_as_f32(i0); + ab1 = v_reinterpret_as_f32(i1); +} +inline void v_zip(const v_float64x8& a, const v_float64x8& b, v_float64x8& ab0, v_float64x8& ab1) +{ + v_int64x8 i0, i1; + v_zip(v_reinterpret_as_s64(a), v_reinterpret_as_s64(b), i0, i1); + ab0 = v_reinterpret_as_f64(i0); + ab1 = v_reinterpret_as_f64(i1); +} + +#define OPENCV_HAL_IMPL_AVX512_COMBINE(_Tpvec, suffix) \ + inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_v512_combine(_v512_extract_low(a.val), _v512_extract_low(b.val))); } \ + inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_v512_insert(b.val, _v512_extract_high(a.val))); } \ + inline void v_recombine(const _Tpvec& a, const _Tpvec& b, \ + _Tpvec& c, _Tpvec& d) \ + { \ + c.val = _v512_combine(_v512_extract_low(a.val),_v512_extract_low(b.val)); \ + d.val = _v512_insert(b.val,_v512_extract_high(a.val)); \ + } + + +OPENCV_HAL_IMPL_AVX512_COMBINE(v_uint8x64, epi8) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_int8x64, epi8) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_uint16x32, epi16) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_int16x32, epi16) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_uint32x16, epi32) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_int32x16, epi32) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_uint64x8, epi64) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_int64x8, epi64) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_float32x16, ps) +OPENCV_HAL_IMPL_AVX512_COMBINE(v_float64x8, pd) + +////////// Arithmetic, bitwise and comparison operations ///////// + +/* Element-wise binary and unary operations */ + +/** Non-saturating arithmetics **/ +#define OPENCV_HAL_IMPL_AVX512_BIN_FUNC(func, _Tpvec, intrin) \ + inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(intrin(a.val, b.val)); } + +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_add_wrap, v_uint8x64, _mm512_add_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_add_wrap, v_int8x64, _mm512_add_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_add_wrap, v_uint16x32, _mm512_add_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_add_wrap, v_int16x32, _mm512_add_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_sub_wrap, v_uint8x64, _mm512_sub_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_sub_wrap, v_int8x64, _mm512_sub_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_sub_wrap, v_uint16x32, _mm512_sub_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_sub_wrap, v_int16x32, _mm512_sub_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_mul_wrap, v_uint16x32, _mm512_mullo_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_mul_wrap, v_int16x32, _mm512_mullo_epi16) + +inline v_uint8x64 v_mul_wrap(const v_uint8x64& a, const v_uint8x64& b) +{ + __m512i ad = _mm512_srai_epi16(a.val, 8); + __m512i bd = _mm512_srai_epi16(b.val, 8); + __m512i p0 = _mm512_mullo_epi16(a.val, b.val); // even + __m512i p1 = _mm512_slli_epi16(_mm512_mullo_epi16(ad, bd), 8); // odd + return v_uint8x64(_mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, p0, p1)); +} +inline v_int8x64 v_mul_wrap(const v_int8x64& a, const v_int8x64& b) +{ + return v_reinterpret_as_s8(v_mul_wrap(v_reinterpret_as_u8(a), v_reinterpret_as_u8(b))); +} + +#define OPENCV_HAL_IMPL_AVX512_BIN_OP(bin_op, _Tpvec, intrin) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(intrin(a.val, b.val)); } \ + inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ + { a.val = intrin(a.val, b.val); return a; } + +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_uint32x16, _mm512_add_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_uint32x16, _mm512_sub_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_int32x16, _mm512_add_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_int32x16, _mm512_sub_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_uint64x8, _mm512_add_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_uint64x8, _mm512_sub_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_int64x8, _mm512_add_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_int64x8, _mm512_sub_epi64) + +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_uint32x16, _mm512_mullo_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_int32x16, _mm512_mullo_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_uint64x8, _mm512_mullo_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_int64x8, _mm512_mullo_epi64) + +/** Saturating arithmetics **/ +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_uint8x64, _mm512_adds_epu8) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_uint8x64, _mm512_subs_epu8) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_int8x64, _mm512_adds_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_int8x64, _mm512_subs_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_uint16x32, _mm512_adds_epu16) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_uint16x32, _mm512_subs_epu16) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_int16x32, _mm512_adds_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_int16x32, _mm512_subs_epi16) + +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_float32x16, _mm512_add_ps) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_float32x16, _mm512_sub_ps) +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_float32x16, _mm512_mul_ps) +OPENCV_HAL_IMPL_AVX512_BIN_OP(/, v_float32x16, _mm512_div_ps) +OPENCV_HAL_IMPL_AVX512_BIN_OP(+, v_float64x8, _mm512_add_pd) +OPENCV_HAL_IMPL_AVX512_BIN_OP(-, v_float64x8, _mm512_sub_pd) +OPENCV_HAL_IMPL_AVX512_BIN_OP(*, v_float64x8, _mm512_mul_pd) +OPENCV_HAL_IMPL_AVX512_BIN_OP(/, v_float64x8, _mm512_div_pd) + +// saturating multiply +inline v_uint8x64 operator * (const v_uint8x64& a, const v_uint8x64& b) +{ + v_uint16x32 c, d; + v_mul_expand(a, b, c, d); + return v_pack(c, d); +} +inline v_int8x64 operator * (const v_int8x64& a, const v_int8x64& b) +{ + v_int16x32 c, d; + v_mul_expand(a, b, c, d); + return v_pack(c, d); +} +inline v_uint16x32 operator * (const v_uint16x32& a, const v_uint16x32& b) +{ + __m512i pl = _mm512_mullo_epi16(a.val, b.val); + __m512i ph = _mm512_mulhi_epu16(a.val, b.val); + __m512i p0 = _mm512_unpacklo_epi16(pl, ph); + __m512i p1 = _mm512_unpackhi_epi16(pl, ph); + + const __m512i m = _mm512_set1_epi32(65535); + return v_uint16x32(_mm512_packus_epi32(_mm512_min_epu32(p0, m), _mm512_min_epu32(p1, m))); +} +inline v_int16x32 operator * (const v_int16x32& a, const v_int16x32& b) +{ + __m512i pl = _mm512_mullo_epi16(a.val, b.val); + __m512i ph = _mm512_mulhi_epi16(a.val, b.val); + __m512i p0 = _mm512_unpacklo_epi16(pl, ph); + __m512i p1 = _mm512_unpackhi_epi16(pl, ph); + return v_int16x32(_mm512_packs_epi32(p0, p1)); +} + +inline v_uint8x64& operator *= (v_uint8x64& a, const v_uint8x64& b) +{ a = a * b; return a; } +inline v_int8x64& operator *= (v_int8x64& a, const v_int8x64& b) +{ a = a * b; return a; } +inline v_uint16x32& operator *= (v_uint16x32& a, const v_uint16x32& b) +{ a = a * b; return a; } +inline v_int16x32& operator *= (v_int16x32& a, const v_int16x32& b) +{ a = a * b; return a; } + +inline v_int16x32 v_mul_hi(const v_int16x32& a, const v_int16x32& b) { return v_int16x32(_mm512_mulhi_epi16(a.val, b.val)); } +inline v_uint16x32 v_mul_hi(const v_uint16x32& a, const v_uint16x32& b) { return v_uint16x32(_mm512_mulhi_epu16(a.val, b.val)); } + +// Multiply and expand +inline void v_mul_expand(const v_uint8x64& a, const v_uint8x64& b, + v_uint16x32& c, v_uint16x32& d) +{ + v_uint16x32 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int8x64& a, const v_int8x64& b, + v_int16x32& c, v_int16x32& d) +{ + v_int16x32 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int16x32& a, const v_int16x32& b, + v_int32x16& c, v_int32x16& d) +{ + v_int16x32 v0, v1; + v_zip(v_mul_wrap(a, b), v_mul_hi(a, b), v0, v1); + + c = v_reinterpret_as_s32(v0); + d = v_reinterpret_as_s32(v1); +} + +inline void v_mul_expand(const v_uint16x32& a, const v_uint16x32& b, + v_uint32x16& c, v_uint32x16& d) +{ + v_uint16x32 v0, v1; + v_zip(v_mul_wrap(a, b), v_mul_hi(a, b), v0, v1); + + c = v_reinterpret_as_u32(v0); + d = v_reinterpret_as_u32(v1); +} + +inline void v_mul_expand(const v_uint32x16& a, const v_uint32x16& b, + v_uint64x8& c, v_uint64x8& d) +{ + v_zip(v_uint64x8(_mm512_mul_epu32(a.val, b.val)), + v_uint64x8(_mm512_mul_epu32(_mm512_srli_epi64(a.val, 32), _mm512_srli_epi64(b.val, 32))), c, d); +} + +inline void v_mul_expand(const v_int32x16& a, const v_int32x16& b, + v_int64x8& c, v_int64x8& d) +{ + v_zip(v_int64x8(_mm512_mul_epi32(a.val, b.val)), + v_int64x8(_mm512_mul_epi32(_mm512_srli_epi64(a.val, 32), _mm512_srli_epi64(b.val, 32))), c, d); +} + +/** Bitwise shifts **/ +#define OPENCV_HAL_IMPL_AVX512_SHIFT_OP(_Tpuvec, _Tpsvec, suffix) \ + inline _Tpuvec operator << (const _Tpuvec& a, int imm) \ + { return _Tpuvec(_mm512_slli_##suffix(a.val, imm)); } \ + inline _Tpsvec operator << (const _Tpsvec& a, int imm) \ + { return _Tpsvec(_mm512_slli_##suffix(a.val, imm)); } \ + inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \ + { return _Tpuvec(_mm512_srli_##suffix(a.val, imm)); } \ + inline _Tpsvec operator >> (const _Tpsvec& a, int imm) \ + { return _Tpsvec(_mm512_srai_##suffix(a.val, imm)); } \ + template \ + inline _Tpuvec v_shl(const _Tpuvec& a) \ + { return _Tpuvec(_mm512_slli_##suffix(a.val, imm)); } \ + template \ + inline _Tpsvec v_shl(const _Tpsvec& a) \ + { return _Tpsvec(_mm512_slli_##suffix(a.val, imm)); } \ + template \ + inline _Tpuvec v_shr(const _Tpuvec& a) \ + { return _Tpuvec(_mm512_srli_##suffix(a.val, imm)); } \ + template \ + inline _Tpsvec v_shr(const _Tpsvec& a) \ + { return _Tpsvec(_mm512_srai_##suffix(a.val, imm)); } + +OPENCV_HAL_IMPL_AVX512_SHIFT_OP(v_uint16x32, v_int16x32, epi16) +OPENCV_HAL_IMPL_AVX512_SHIFT_OP(v_uint32x16, v_int32x16, epi32) +OPENCV_HAL_IMPL_AVX512_SHIFT_OP(v_uint64x8, v_int64x8, epi64) + + +/** Bitwise logic **/ +#define OPENCV_HAL_IMPL_AVX512_LOGIC_OP(_Tpvec, suffix, not_const) \ + OPENCV_HAL_IMPL_AVX512_BIN_OP(&, _Tpvec, _mm512_and_##suffix) \ + OPENCV_HAL_IMPL_AVX512_BIN_OP(|, _Tpvec, _mm512_or_##suffix) \ + OPENCV_HAL_IMPL_AVX512_BIN_OP(^, _Tpvec, _mm512_xor_##suffix) \ + inline _Tpvec operator ~ (const _Tpvec& a) \ + { return _Tpvec(_mm512_xor_##suffix(a.val, not_const)); } + +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_uint8x64, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_int8x64, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_uint16x32, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_int16x32, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_uint32x16, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_int32x16, si512, _mm512_set1_epi32(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_uint64x8, si512, _mm512_set1_epi64(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_int64x8, si512, _mm512_set1_epi64(-1)) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_float32x16, ps, _mm512_castsi512_ps(_mm512_set1_epi32(-1))) +OPENCV_HAL_IMPL_AVX512_LOGIC_OP(v_float64x8, pd, _mm512_castsi512_pd(_mm512_set1_epi32(-1))) + +/** Select **/ +#define OPENCV_HAL_IMPL_AVX512_SELECT(_Tpvec, suffix, zsuf) \ + inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm512_mask_blend_##suffix(_mm512_cmp_##suffix##_mask(mask.val, _mm512_setzero_##zsuf(), _MM_CMPINT_EQ), a.val, b.val)); } + +OPENCV_HAL_IMPL_AVX512_SELECT(v_uint8x64, epi8, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_int8x64, epi8, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_uint16x32, epi16, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_int16x32, epi16, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_uint32x16, epi32, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_int32x16, epi32, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_uint64x8, epi64, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_int64x8, epi64, si512) +OPENCV_HAL_IMPL_AVX512_SELECT(v_float32x16, ps, ps) +OPENCV_HAL_IMPL_AVX512_SELECT(v_float64x8, pd, pd) + +/** Comparison **/ +#define OPENCV_HAL_IMPL_AVX512_CMP_INT(bin_op, imm8, _Tpvec, sufcmp, sufset, tval) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm512_maskz_set1_##sufset(_mm512_cmp_##sufcmp##_mask(a.val, b.val, imm8), tval)); } + +#define OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(_Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(==, _MM_CMPINT_EQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(!=, _MM_CMPINT_NE, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(<, _MM_CMPINT_LT, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(>, _MM_CMPINT_NLE, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(<=, _MM_CMPINT_LE, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_INT(>=, _MM_CMPINT_NLT, _Tpvec, sufcmp, sufset, tval) + +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_uint8x64, epu8, epi8, (char)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_int8x64, epi8, epi8, (char)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_uint16x32, epu16, epi16, (short)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_int16x32, epi16, epi16, (short)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_uint32x16, epu32, epi32, (int)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_int32x16, epi32, epi32, (int)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_uint64x8, epu64, epi64, (int64)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_INT(v_int64x8, epi64, epi64, (int64)-1) + +#define OPENCV_HAL_IMPL_AVX512_CMP_FLT(bin_op, imm8, _Tpvec, sufcmp, sufset, tval) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { return _Tpvec(_mm512_castsi512_##sufcmp(_mm512_maskz_set1_##sufset(_mm512_cmp_##sufcmp##_mask(a.val, b.val, imm8), tval))); } + +#define OPENCV_HAL_IMPL_AVX512_CMP_OP_FLT(_Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(==, _CMP_EQ_OQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(!=, _CMP_NEQ_OQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(<, _CMP_LT_OQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(>, _CMP_GT_OQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(<=, _CMP_LE_OQ, _Tpvec, sufcmp, sufset, tval) \ + OPENCV_HAL_IMPL_AVX512_CMP_FLT(>=, _CMP_GE_OQ, _Tpvec, sufcmp, sufset, tval) + +OPENCV_HAL_IMPL_AVX512_CMP_OP_FLT(v_float32x16, ps, epi32, (int)-1) +OPENCV_HAL_IMPL_AVX512_CMP_OP_FLT(v_float64x8, pd, epi64, (int64)-1) + +inline v_float32x16 v_not_nan(const v_float32x16& a) +{ return v_float32x16(_mm512_castsi512_ps(_mm512_maskz_set1_epi32(_mm512_cmp_ps_mask(a.val, a.val, _CMP_ORD_Q), (int)-1))); } +inline v_float64x8 v_not_nan(const v_float64x8& a) +{ return v_float64x8(_mm512_castsi512_pd(_mm512_maskz_set1_epi64(_mm512_cmp_pd_mask(a.val, a.val, _CMP_ORD_Q), (int64)-1))); } + +/** min/max **/ +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_uint8x64, _mm512_min_epu8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_uint8x64, _mm512_max_epu8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_int8x64, _mm512_min_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_int8x64, _mm512_max_epi8) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_uint16x32, _mm512_min_epu16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_uint16x32, _mm512_max_epu16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_int16x32, _mm512_min_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_int16x32, _mm512_max_epi16) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_uint32x16, _mm512_min_epu32) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_uint32x16, _mm512_max_epu32) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_int32x16, _mm512_min_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_int32x16, _mm512_max_epi32) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_uint64x8, _mm512_min_epu64) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_uint64x8, _mm512_max_epu64) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_int64x8, _mm512_min_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_int64x8, _mm512_max_epi64) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_float32x16, _mm512_min_ps) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_float32x16, _mm512_max_ps) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_min, v_float64x8, _mm512_min_pd) +OPENCV_HAL_IMPL_AVX512_BIN_FUNC(v_max, v_float64x8, _mm512_max_pd) + +/** Rotate **/ +namespace { + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64&) { return v_int8x64(); }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64& a, const v_int8x64& b) + { + return v_int8x64(_mm512_or_si512(_mm512_srli_epi32(_mm512_alignr_epi32(b.val, a.val, imm32 ), imm4 *8), + _mm512_slli_epi32(_mm512_alignr_epi32(b.val, a.val, imm32 + 1), (4-imm4)*8))); + }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64& a, const v_int8x64& b) + { + return v_int8x64(_mm512_or_si512(_mm512_srli_epi32(_mm512_alignr_epi32(b.val, a.val, 15), imm4 *8), + _mm512_slli_epi32( b.val, (4-imm4)*8))); + }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64& b) + { + return v_int8x64(_mm512_or_si512(_mm512_srli_epi32(_mm512_alignr_epi32(_mm512_setzero_si512(), b.val, imm32 - 16), imm4 *8), + _mm512_slli_epi32(_mm512_alignr_epi32(_mm512_setzero_si512(), b.val, imm32 - 15), (4-imm4)*8))); + }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64& b) + { return v_int8x64(_mm512_srli_epi32(_mm512_alignr_epi32(_mm512_setzero_si512(), b.val, 15), imm4*8)); }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64& a, const v_int8x64& b) + { return v_int8x64(_mm512_alignr_epi32(b.val, a.val, imm32)); }}; + template<> + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64& a, const v_int8x64&) { return a; }}; + template + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64& b) + { return v_int8x64(_mm512_alignr_epi32(_mm512_setzero_si512(), b.val, imm32 - 16)); }}; + template<> + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64& b) { return b; }}; + template<> + struct _v_rotate_right { static inline v_int8x64 eval(const v_int8x64&, const v_int8x64&) { return v_int8x64(); }}; +} +template inline v_int8x64 v_rotate_right(const v_int8x64& a, const v_int8x64& b) +{ + return imm >= 128 ? v_int8x64() : +#if CV_AVX_512VBMI + v_int8x64(_mm512_permutex2var_epi8(a.val, + _v512_set_epu8(0x3f + imm, 0x3e + imm, 0x3d + imm, 0x3c + imm, 0x3b + imm, 0x3a + imm, 0x39 + imm, 0x38 + imm, + 0x37 + imm, 0x36 + imm, 0x35 + imm, 0x34 + imm, 0x33 + imm, 0x32 + imm, 0x31 + imm, 0x30 + imm, + 0x2f + imm, 0x2e + imm, 0x2d + imm, 0x2c + imm, 0x2b + imm, 0x2a + imm, 0x29 + imm, 0x28 + imm, + 0x27 + imm, 0x26 + imm, 0x25 + imm, 0x24 + imm, 0x23 + imm, 0x22 + imm, 0x21 + imm, 0x20 + imm, + 0x1f + imm, 0x1e + imm, 0x1d + imm, 0x1c + imm, 0x1b + imm, 0x1a + imm, 0x19 + imm, 0x18 + imm, + 0x17 + imm, 0x16 + imm, 0x15 + imm, 0x14 + imm, 0x13 + imm, 0x12 + imm, 0x11 + imm, 0x10 + imm, + 0x0f + imm, 0x0e + imm, 0x0d + imm, 0x0c + imm, 0x0b + imm, 0x0a + imm, 0x09 + imm, 0x08 + imm, + 0x07 + imm, 0x06 + imm, 0x05 + imm, 0x04 + imm, 0x03 + imm, 0x02 + imm, 0x01 + imm, 0x00 + imm), b.val)); +#else + _v_rotate_right 15), imm/4>::eval(a, b); +#endif +} +template +inline v_int8x64 v_rotate_left(const v_int8x64& a, const v_int8x64& b) +{ + if (imm == 0) return a; + if (imm == 64) return b; + if (imm >= 128) return v_int8x64(); +#if CV_AVX_512VBMI + return v_int8x64(_mm512_permutex2var_epi8(b.val, + _v512_set_epi8(0x7f - imm,0x7e - imm,0x7d - imm,0x7c - imm,0x7b - imm,0x7a - imm,0x79 - imm,0x78 - imm, + 0x77 - imm,0x76 - imm,0x75 - imm,0x74 - imm,0x73 - imm,0x72 - imm,0x71 - imm,0x70 - imm, + 0x6f - imm,0x6e - imm,0x6d - imm,0x6c - imm,0x6b - imm,0x6a - imm,0x69 - imm,0x68 - imm, + 0x67 - imm,0x66 - imm,0x65 - imm,0x64 - imm,0x63 - imm,0x62 - imm,0x61 - imm,0x60 - imm, + 0x5f - imm,0x5e - imm,0x5d - imm,0x5c - imm,0x5b - imm,0x5a - imm,0x59 - imm,0x58 - imm, + 0x57 - imm,0x56 - imm,0x55 - imm,0x54 - imm,0x53 - imm,0x52 - imm,0x51 - imm,0x50 - imm, + 0x4f - imm,0x4e - imm,0x4d - imm,0x4c - imm,0x4b - imm,0x4a - imm,0x49 - imm,0x48 - imm, + 0x47 - imm,0x46 - imm,0x45 - imm,0x44 - imm,0x43 - imm,0x42 - imm,0x41 - imm,0x40 - imm), a.val)); +#else + return imm < 64 ? v_rotate_right<64 - imm>(b, a) : v_rotate_right<128 - imm>(v512_setzero_s8(), b); +#endif +} +template +inline v_int8x64 v_rotate_right(const v_int8x64& a) +{ + if (imm == 0) return a; + if (imm >= 64) return v_int8x64(); +#if CV_AVX_512VBMI + return v_int8x64(_mm512_maskz_permutexvar_epi8(0xFFFFFFFFFFFFFFFF >> imm, + _v512_set_epu8(0x3f + imm,0x3e + imm,0x3d + imm,0x3c + imm,0x3b + imm,0x3a + imm,0x39 + imm,0x38 + imm, + 0x37 + imm,0x36 + imm,0x35 + imm,0x34 + imm,0x33 + imm,0x32 + imm,0x31 + imm,0x30 + imm, + 0x2f + imm,0x2e + imm,0x2d + imm,0x2c + imm,0x2b + imm,0x2a + imm,0x29 + imm,0x28 + imm, + 0x27 + imm,0x26 + imm,0x25 + imm,0x24 + imm,0x23 + imm,0x22 + imm,0x21 + imm,0x20 + imm, + 0x1f + imm,0x1e + imm,0x1d + imm,0x1c + imm,0x1b + imm,0x1a + imm,0x19 + imm,0x18 + imm, + 0x17 + imm,0x16 + imm,0x15 + imm,0x14 + imm,0x13 + imm,0x12 + imm,0x11 + imm,0x10 + imm, + 0x0f + imm,0x0e + imm,0x0d + imm,0x0c + imm,0x0b + imm,0x0a + imm,0x09 + imm,0x08 + imm, + 0x07 + imm,0x06 + imm,0x05 + imm,0x04 + imm,0x03 + imm,0x02 + imm,0x01 + imm,0x00 + imm), a.val)); +#else + return v_rotate_right(a, v512_setzero_s8()); +#endif +} +template +inline v_int8x64 v_rotate_left(const v_int8x64& a) +{ + if (imm == 0) return a; + if (imm >= 64) return v_int8x64(); +#if CV_AVX_512VBMI + return v_int8x64(_mm512_maskz_permutexvar_epi8(0xFFFFFFFFFFFFFFFF << imm, + _v512_set_epi8(0x3f - imm,0x3e - imm,0x3d - imm,0x3c - imm,0x3b - imm,0x3a - imm,0x39 - imm,0x38 - imm, + 0x37 - imm,0x36 - imm,0x35 - imm,0x34 - imm,0x33 - imm,0x32 - imm,0x31 - imm,0x30 - imm, + 0x2f - imm,0x2e - imm,0x2d - imm,0x2c - imm,0x2b - imm,0x2a - imm,0x29 - imm,0x28 - imm, + 0x27 - imm,0x26 - imm,0x25 - imm,0x24 - imm,0x23 - imm,0x22 - imm,0x21 - imm,0x20 - imm, + 0x1f - imm,0x1e - imm,0x1d - imm,0x1c - imm,0x1b - imm,0x1a - imm,0x19 - imm,0x18 - imm, + 0x17 - imm,0x16 - imm,0x15 - imm,0x14 - imm,0x13 - imm,0x12 - imm,0x11 - imm,0x10 - imm, + 0x0f - imm,0x0e - imm,0x0d - imm,0x0c - imm,0x0b - imm,0x0a - imm,0x09 - imm,0x08 - imm, + 0x07 - imm,0x06 - imm,0x05 - imm,0x04 - imm,0x03 - imm,0x02 - imm,0x01 - imm,0x00 - imm), a.val)); +#else + return v_rotate_right<64 - imm>(v512_setzero_s8(), a); +#endif +} + +#define OPENCV_HAL_IMPL_AVX512_ROTATE_PM(_Tpvec, suffix) \ +template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ +{ return v_reinterpret_as_##suffix(v_rotate_left(v_reinterpret_as_s8(a), v_reinterpret_as_s8(b))); } \ +template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ +{ return v_reinterpret_as_##suffix(v_rotate_right(v_reinterpret_as_s8(a), v_reinterpret_as_s8(b))); } \ +template inline _Tpvec v_rotate_left(const _Tpvec& a) \ +{ return v_reinterpret_as_##suffix(v_rotate_left(v_reinterpret_as_s8(a))); } \ +template inline _Tpvec v_rotate_right(const _Tpvec& a) \ +{ return v_reinterpret_as_##suffix(v_rotate_right(v_reinterpret_as_s8(a))); } + +#define OPENCV_HAL_IMPL_AVX512_ROTATE_EC(_Tpvec, suffix) \ +template \ +inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ +{ \ + enum { SHIFT2 = (_Tpvec::nlanes - imm) }; \ + enum { MASK = ((1 << _Tpvec::nlanes) - 1) }; \ + if (imm == 0) return a; \ + if (imm == _Tpvec::nlanes) return b; \ + if (imm >= 2*_Tpvec::nlanes) return _Tpvec::zero(); \ + return _Tpvec(_mm512_mask_expand_##suffix(_mm512_maskz_compress_##suffix((MASK << SHIFT2)&MASK, b.val), (MASK << (imm))&MASK, a.val)); \ +} \ +template \ +inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ +{ \ + enum { SHIFT2 = (_Tpvec::nlanes - imm) }; \ + enum { MASK = ((1 << _Tpvec::nlanes) - 1) }; \ + if (imm == 0) return a; \ + if (imm == _Tpvec::nlanes) return b; \ + if (imm >= 2*_Tpvec::nlanes) return _Tpvec::zero(); \ + return _Tpvec(_mm512_mask_expand_##suffix(_mm512_maskz_compress_##suffix((MASK << (imm))&MASK, a.val), (MASK << SHIFT2)&MASK, b.val)); \ +} \ +template \ +inline _Tpvec v_rotate_left(const _Tpvec& a) \ +{ \ + if (imm == 0) return a; \ + if (imm >= _Tpvec::nlanes) return _Tpvec::zero(); \ + return _Tpvec(_mm512_maskz_expand_##suffix((1 << _Tpvec::nlanes) - (1 << (imm)), a.val)); \ +} \ +template \ +inline _Tpvec v_rotate_right(const _Tpvec& a) \ +{ \ + if (imm == 0) return a; \ + if (imm >= _Tpvec::nlanes) return _Tpvec::zero(); \ + return _Tpvec(_mm512_maskz_compress_##suffix((1 << _Tpvec::nlanes) - (1 << (imm)), a.val)); \ +} + +OPENCV_HAL_IMPL_AVX512_ROTATE_PM(v_uint8x64, u8) +OPENCV_HAL_IMPL_AVX512_ROTATE_PM(v_uint16x32, u16) +OPENCV_HAL_IMPL_AVX512_ROTATE_PM(v_int16x32, s16) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_uint32x16, epi32) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_int32x16, epi32) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_uint64x8, epi64) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_int64x8, epi64) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_float32x16, ps) +OPENCV_HAL_IMPL_AVX512_ROTATE_EC(v_float64x8, pd) + +/** Reverse **/ +inline v_uint8x64 v_reverse(const v_uint8x64 &a) +{ +#if CV_AVX_512VBMI + static const __m512i perm = _mm512_set_epi32( + 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f, + 0x10111213, 0x14151617, 0x18191a1b, 0x1c1d1e1f, + 0x20212223, 0x24252627, 0x28292a2b, 0x2c2d2e2f, + 0x30313233, 0x34353637, 0x38393a3b, 0x3c3d3e3f); + return v_uint8x64(_mm512_permutexvar_epi8(perm, a.val)); +#else + static const __m512i shuf = _mm512_set_epi32( + 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f, + 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f, + 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f, + 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f); + static const __m512i perm = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6); + __m512i vec = _mm512_shuffle_epi8(a.val, shuf); + return v_uint8x64(_mm512_permutexvar_epi64(perm, vec)); +#endif +} + +inline v_int8x64 v_reverse(const v_int8x64 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x32 v_reverse(const v_uint16x32 &a) +{ +#if CV_AVX_512VBMI + static const __m512i perm = _mm512_set_epi32( + 0x00000001, 0x00020003, 0x00040005, 0x00060007, + 0x00080009, 0x000a000b, 0x000c000d, 0x000e000f, + 0x00100011, 0x00120013, 0x00140015, 0x00160017, + 0x00180019, 0x001a001b, 0x001c001d, 0x001e001f); + return v_uint16x32(_mm512_permutexvar_epi16(perm, a.val)); +#else + static const __m512i shuf = _mm512_set_epi32( + 0x01000302, 0x05040706, 0x09080b0a, 0x0d0c0f0e, + 0x01000302, 0x05040706, 0x09080b0a, 0x0d0c0f0e, + 0x01000302, 0x05040706, 0x09080b0a, 0x0d0c0f0e, + 0x01000302, 0x05040706, 0x09080b0a, 0x0d0c0f0e); + static const __m512i perm = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6); + __m512i vec = _mm512_shuffle_epi8(a.val, shuf); + return v_uint16x32(_mm512_permutexvar_epi64(perm, vec)); +#endif +} + +inline v_int16x32 v_reverse(const v_int16x32 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x16 v_reverse(const v_uint32x16 &a) +{ + static const __m512i perm = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,14, 15); + return v_uint32x16(_mm512_permutexvar_epi32(perm, a.val)); +} + +inline v_int32x16 v_reverse(const v_int32x16 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x16 v_reverse(const v_float32x16 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x8 v_reverse(const v_uint64x8 &a) +{ + static const __m512i perm = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); + return v_uint64x8(_mm512_permutexvar_epi64(perm, a.val)); +} + +inline v_int64x8 v_reverse(const v_int64x8 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x8 v_reverse(const v_float64x8 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + +////////// Reduce ///////// + +/** Reduce **/ +#define OPENCV_HAL_IMPL_AVX512_REDUCE_ADD64(a, b) a + b +#define OPENCV_HAL_IMPL_AVX512_REDUCE_8(sctype, func, _Tpvec, ifunc, scop) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { __m256i half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + sctype CV_DECL_ALIGNED(64) idx[2]; \ + _mm_store_si128((__m128i*)idx, _mm_##ifunc(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1))); \ + return scop(idx[0], idx[1]); } +OPENCV_HAL_IMPL_AVX512_REDUCE_8(uint64, min, v_uint64x8, min_epu64, min) +OPENCV_HAL_IMPL_AVX512_REDUCE_8(uint64, max, v_uint64x8, max_epu64, max) +OPENCV_HAL_IMPL_AVX512_REDUCE_8(uint64, sum, v_uint64x8, add_epi64, OPENCV_HAL_IMPL_AVX512_REDUCE_ADD64) +OPENCV_HAL_IMPL_AVX512_REDUCE_8(int64, min, v_int64x8, min_epi64, min) +OPENCV_HAL_IMPL_AVX512_REDUCE_8(int64, max, v_int64x8, max_epi64, max) +OPENCV_HAL_IMPL_AVX512_REDUCE_8(int64, sum, v_int64x8, add_epi64, OPENCV_HAL_IMPL_AVX512_REDUCE_ADD64) + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_8F(func, ifunc, scop) \ + inline double v_reduce_##func(const v_float64x8& a) \ + { __m256d half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + double CV_DECL_ALIGNED(64) idx[2]; \ + _mm_store_pd(idx, _mm_##ifunc(_mm256_castpd256_pd128(half), _mm256_extractf128_pd(half, 1))); \ + return scop(idx[0], idx[1]); } +OPENCV_HAL_IMPL_AVX512_REDUCE_8F(min, min_pd, min) +OPENCV_HAL_IMPL_AVX512_REDUCE_8F(max, max_pd, max) +OPENCV_HAL_IMPL_AVX512_REDUCE_8F(sum, add_pd, OPENCV_HAL_IMPL_AVX512_REDUCE_ADD64) + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_16(sctype, func, _Tpvec, ifunc) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { __m256i half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + __m128i quarter = _mm_##ifunc(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 8)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 4)); \ + return (sctype)_mm_cvtsi128_si32(quarter); } +OPENCV_HAL_IMPL_AVX512_REDUCE_16(uint, min, v_uint32x16, min_epu32) +OPENCV_HAL_IMPL_AVX512_REDUCE_16(uint, max, v_uint32x16, max_epu32) +OPENCV_HAL_IMPL_AVX512_REDUCE_16(int, min, v_int32x16, min_epi32) +OPENCV_HAL_IMPL_AVX512_REDUCE_16(int, max, v_int32x16, max_epi32) + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_16F(func, ifunc) \ + inline float v_reduce_##func(const v_float32x16& a) \ + { __m256 half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + __m128 quarter = _mm_##ifunc(_mm256_castps256_ps128(half), _mm256_extractf128_ps(half, 1)); \ + quarter = _mm_##ifunc(quarter, _mm_permute_ps(quarter, _MM_SHUFFLE(0, 0, 3, 2))); \ + quarter = _mm_##ifunc(quarter, _mm_permute_ps(quarter, _MM_SHUFFLE(0, 0, 0, 1))); \ + return _mm_cvtss_f32(quarter); } +OPENCV_HAL_IMPL_AVX512_REDUCE_16F(min, min_ps) +OPENCV_HAL_IMPL_AVX512_REDUCE_16F(max, max_ps) + +inline float v_reduce_sum(const v_float32x16& a) +{ + __m256 half = _mm256_add_ps(_v512_extract_low(a.val), _v512_extract_high(a.val)); + __m128 quarter = _mm_add_ps(_mm256_castps256_ps128(half), _mm256_extractf128_ps(half, 1)); + quarter = _mm_hadd_ps(quarter, quarter); + return _mm_cvtss_f32(_mm_hadd_ps(quarter, quarter)); +} +inline int v_reduce_sum(const v_int32x16& a) +{ + __m256i half = _mm256_add_epi32(_v512_extract_low(a.val), _v512_extract_high(a.val)); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + quarter = _mm_hadd_epi32(quarter, quarter); + return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); +} +inline uint v_reduce_sum(const v_uint32x16& a) +{ return (uint)v_reduce_sum(v_reinterpret_as_s32(a)); } + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_32(sctype, func, _Tpvec, ifunc) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { __m256i half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + __m128i quarter = _mm_##ifunc(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 8)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 4)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 2)); \ + return (sctype)_mm_cvtsi128_si32(quarter); } +OPENCV_HAL_IMPL_AVX512_REDUCE_32(ushort, min, v_uint16x32, min_epu16) +OPENCV_HAL_IMPL_AVX512_REDUCE_32(ushort, max, v_uint16x32, max_epu16) +OPENCV_HAL_IMPL_AVX512_REDUCE_32(short, min, v_int16x32, min_epi16) +OPENCV_HAL_IMPL_AVX512_REDUCE_32(short, max, v_int16x32, max_epi16) + +inline int v_reduce_sum(const v_int16x32& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } +inline uint v_reduce_sum(const v_uint16x32& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_64(sctype, func, _Tpvec, ifunc) \ + inline sctype v_reduce_##func(const _Tpvec& a) \ + { __m256i half = _mm256_##ifunc(_v512_extract_low(a.val), _v512_extract_high(a.val)); \ + __m128i quarter = _mm_##ifunc(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 8)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 4)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 2)); \ + quarter = _mm_##ifunc(quarter, _mm_srli_si128(quarter, 1)); \ + return (sctype)_mm_cvtsi128_si32(quarter); } +OPENCV_HAL_IMPL_AVX512_REDUCE_64(uchar, min, v_uint8x64, min_epu8) +OPENCV_HAL_IMPL_AVX512_REDUCE_64(uchar, max, v_uint8x64, max_epu8) +OPENCV_HAL_IMPL_AVX512_REDUCE_64(schar, min, v_int8x64, min_epi8) +OPENCV_HAL_IMPL_AVX512_REDUCE_64(schar, max, v_int8x64, max_epi8) + +#define OPENCV_HAL_IMPL_AVX512_REDUCE_64_SUM(sctype, _Tpvec, suffix) \ + inline sctype v_reduce_sum(const _Tpvec& a) \ + { __m512i a16 = _mm512_add_epi16(_mm512_cvt##suffix##_epi16(_v512_extract_low(a.val)), \ + _mm512_cvt##suffix##_epi16(_v512_extract_high(a.val))); \ + a16 = _mm512_cvtepi16_epi32(_mm256_add_epi16(_v512_extract_low(a16), _v512_extract_high(a16))); \ + __m256i a8 = _mm256_add_epi32(_v512_extract_low(a16), _v512_extract_high(a16)); \ + __m128i a4 = _mm_add_epi32(_mm256_castsi256_si128(a8), _mm256_extracti128_si256(a8, 1)); \ + a4 = _mm_hadd_epi32(a4, a4); \ + return (sctype)_mm_cvtsi128_si32(_mm_hadd_epi32(a4, a4)); } +OPENCV_HAL_IMPL_AVX512_REDUCE_64_SUM(uint, v_uint8x64, epu8) +OPENCV_HAL_IMPL_AVX512_REDUCE_64_SUM(int, v_int8x64, epi8) + +inline v_float32x16 v_reduce_sum4(const v_float32x16& a, const v_float32x16& b, + const v_float32x16& c, const v_float32x16& d) +{ + __m256 abl = _mm256_hadd_ps(_v512_extract_low(a.val), _v512_extract_low(b.val)); + __m256 abh = _mm256_hadd_ps(_v512_extract_high(a.val), _v512_extract_high(b.val)); + __m256 cdl = _mm256_hadd_ps(_v512_extract_low(c.val), _v512_extract_low(d.val)); + __m256 cdh = _mm256_hadd_ps(_v512_extract_high(c.val), _v512_extract_high(d.val)); + return v_float32x16(_v512_combine(_mm256_hadd_ps(abl, cdl), _mm256_hadd_ps(abh, cdh))); +} + +inline unsigned v_reduce_sad(const v_uint8x64& a, const v_uint8x64& b) +{ + __m512i val = _mm512_sad_epu8(a.val, b.val); + __m256i half = _mm256_add_epi32(_v512_extract_low(val), _v512_extract_high(val)); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} +inline unsigned v_reduce_sad(const v_int8x64& a, const v_int8x64& b) +{ + __m512i val = _mm512_set1_epi8(-128); + val = _mm512_sad_epu8(_mm512_add_epi8(a.val, val), _mm512_add_epi8(b.val, val)); + __m256i half = _mm256_add_epi32(_v512_extract_low(val), _v512_extract_high(val)); + __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(quarter, _mm_unpackhi_epi64(quarter, quarter))); +} +inline unsigned v_reduce_sad(const v_uint16x32& a, const v_uint16x32& b) +{ return v_reduce_sum(v_add_wrap(a - b, b - a)); } +inline unsigned v_reduce_sad(const v_int16x32& a, const v_int16x32& b) +{ return v_reduce_sum(v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b)))); } +inline unsigned v_reduce_sad(const v_uint32x16& a, const v_uint32x16& b) +{ return v_reduce_sum(v_max(a, b) - v_min(a, b)); } +inline unsigned v_reduce_sad(const v_int32x16& a, const v_int32x16& b) +{ return v_reduce_sum(v_reinterpret_as_u32(v_max(a, b) - v_min(a, b))); } +inline float v_reduce_sad(const v_float32x16& a, const v_float32x16& b) +{ return v_reduce_sum((a - b) & v_float32x16(_mm512_castsi512_ps(_mm512_set1_epi32(0x7fffffff)))); } +inline double v_reduce_sad(const v_float64x8& a, const v_float64x8& b) +{ return v_reduce_sum((a - b) & v_float64x8(_mm512_castsi512_pd(_mm512_set1_epi64(0x7fffffffffffffff)))); } + +/** Popcount **/ +inline v_uint8x64 v_popcount(const v_int8x64& a) +{ +#if CV_AVX_512BITALG + return v_uint8x64(_mm512_popcnt_epi8(a.val)); +#elif CV_AVX_512VBMI + __m512i _popcnt_table0 = _v512_set_epu8(7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, + 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, + 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, + 4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0); + __m512i _popcnt_table1 = _v512_set_epu8(7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, + 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, + 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, + 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1); + return v_uint8x64(_mm512_sub_epi8(_mm512_permutex2var_epi8(_popcnt_table0, a.val, _popcnt_table1), _mm512_movm_epi8(_mm512_movepi8_mask(a.val)))); +#else + __m512i _popcnt_table = _mm512_set4_epi32(0x04030302, 0x03020201, 0x03020201, 0x02010100); + __m512i _popcnt_mask = _mm512_set1_epi8(0x0F); + + return v_uint8x64(_mm512_add_epi8(_mm512_shuffle_epi8(_popcnt_table, _mm512_and_si512( a.val, _popcnt_mask)), + _mm512_shuffle_epi8(_popcnt_table, _mm512_and_si512(_mm512_srli_epi16(a.val, 4), _popcnt_mask)))); +#endif +} +inline v_uint16x32 v_popcount(const v_int16x32& a) +{ +#if CV_AVX_512BITALG + return v_uint16x32(_mm512_popcnt_epi16(a.val)); +#elif CV_AVX_512VPOPCNTDQ + __m512i zero = _mm512_setzero_si512(); + return v_uint16x32(_mm512_packs_epi32(_mm512_popcnt_epi32(_mm512_unpacklo_epi16(a.val, zero)), + _mm512_popcnt_epi32(_mm512_unpackhi_epi16(a.val, zero)))); +#else + v_uint8x64 p = v_popcount(v_reinterpret_as_s8(a)); + p += v_rotate_right<1>(p); + return v_reinterpret_as_u16(p) & v512_setall_u16(0x00ff); +#endif +} +inline v_uint32x16 v_popcount(const v_int32x16& a) +{ +#if CV_AVX_512VPOPCNTDQ + return v_uint32x16(_mm512_popcnt_epi32(a.val)); +#else + v_uint8x64 p = v_popcount(v_reinterpret_as_s8(a)); + p += v_rotate_right<1>(p); + p += v_rotate_right<2>(p); + return v_reinterpret_as_u32(p) & v512_setall_u32(0x000000ff); +#endif +} +inline v_uint64x8 v_popcount(const v_int64x8& a) +{ +#if CV_AVX_512VPOPCNTDQ + return v_uint64x8(_mm512_popcnt_epi64(a.val)); +#else + return v_uint64x8(_mm512_sad_epu8(v_popcount(v_reinterpret_as_s8(a)).val, _mm512_setzero_si512())); +#endif +} + + +inline v_uint8x64 v_popcount(const v_uint8x64& a) { return v_popcount(v_reinterpret_as_s8 (a)); } +inline v_uint16x32 v_popcount(const v_uint16x32& a) { return v_popcount(v_reinterpret_as_s16(a)); } +inline v_uint32x16 v_popcount(const v_uint32x16& a) { return v_popcount(v_reinterpret_as_s32(a)); } +inline v_uint64x8 v_popcount(const v_uint64x8& a) { return v_popcount(v_reinterpret_as_s64(a)); } + + +////////// Other math ///////// + +/** Some frequent operations **/ +#define OPENCV_HAL_IMPL_AVX512_MULADD(_Tpvec, suffix) \ + inline _Tpvec v_fma(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ + { return _Tpvec(_mm512_fmadd_##suffix(a.val, b.val, c.val)); } \ + inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ + { return _Tpvec(_mm512_fmadd_##suffix(a.val, b.val, c.val)); } \ + inline _Tpvec v_sqrt(const _Tpvec& x) \ + { return _Tpvec(_mm512_sqrt_##suffix(x.val)); } \ + inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ + { return v_fma(a, a, b * b); } \ + inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ + { return v_sqrt(v_fma(a, a, b * b)); } + +OPENCV_HAL_IMPL_AVX512_MULADD(v_float32x16, ps) +OPENCV_HAL_IMPL_AVX512_MULADD(v_float64x8, pd) + +inline v_int32x16 v_fma(const v_int32x16& a, const v_int32x16& b, const v_int32x16& c) +{ return a * b + c; } +inline v_int32x16 v_muladd(const v_int32x16& a, const v_int32x16& b, const v_int32x16& c) +{ return v_fma(a, b, c); } + +inline v_float32x16 v_invsqrt(const v_float32x16& x) +{ +#if CV_AVX_512ER + return v_float32x16(_mm512_rsqrt28_ps(x.val)); +#else + v_float32x16 half = x * v512_setall_f32(0.5); + v_float32x16 t = v_float32x16(_mm512_rsqrt14_ps(x.val)); + t *= v512_setall_f32(1.5) - ((t * t) * half); + return t; +#endif +} + +inline v_float64x8 v_invsqrt(const v_float64x8& x) +{ +#if CV_AVX_512ER + return v_float64x8(_mm512_rsqrt28_pd(x.val)); +#else + return v512_setall_f64(1.) / v_sqrt(x); +// v_float64x8 half = x * v512_setall_f64(0.5); +// v_float64x8 t = v_float64x8(_mm512_rsqrt14_pd(x.val)); +// t *= v512_setall_f64(1.5) - ((t * t) * half); +// t *= v512_setall_f64(1.5) - ((t * t) * half); +// return t; +#endif +} + +/** Absolute values **/ +#define OPENCV_HAL_IMPL_AVX512_ABS(_Tpvec, _Tpuvec, suffix) \ + inline _Tpuvec v_abs(const _Tpvec& x) \ + { return _Tpuvec(_mm512_abs_##suffix(x.val)); } + +OPENCV_HAL_IMPL_AVX512_ABS(v_int8x64, v_uint8x64, epi8) +OPENCV_HAL_IMPL_AVX512_ABS(v_int16x32, v_uint16x32, epi16) +OPENCV_HAL_IMPL_AVX512_ABS(v_int32x16, v_uint32x16, epi32) +OPENCV_HAL_IMPL_AVX512_ABS(v_int64x8, v_uint64x8, epi64) + +inline v_float32x16 v_abs(const v_float32x16& x) +{ +#ifdef _mm512_abs_pd + return v_float32x16(_mm512_abs_ps(x.val)); +#else + return v_float32x16(_mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(x.val), + _v512_set_epu64(0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF, + 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF, 0x7FFFFFFF7FFFFFFF)))); +#endif +} + +inline v_float64x8 v_abs(const v_float64x8& x) +{ +#ifdef _mm512_abs_pd + #if defined __GNUC__ && (__GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ <= 3) || (__GNUC__ == 8 && __GNUC_MINOR__ <= 2)) + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87476 + return v_float64x8(_mm512_abs_pd(_mm512_castpd_ps(x.val))); + #else + return v_float64x8(_mm512_abs_pd(x.val)); + #endif +#else + return v_float64x8(_mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(x.val), + _v512_set_epu64(0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, + 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF)))); +#endif +} + +/** Absolute difference **/ +inline v_uint8x64 v_absdiff(const v_uint8x64& a, const v_uint8x64& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint16x32 v_absdiff(const v_uint16x32& a, const v_uint16x32& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint32x16 v_absdiff(const v_uint32x16& a, const v_uint32x16& b) +{ return v_max(a, b) - v_min(a, b); } + +inline v_uint8x64 v_absdiff(const v_int8x64& a, const v_int8x64& b) +{ + v_int8x64 d = v_sub_wrap(a, b); + v_int8x64 m = a < b; + return v_reinterpret_as_u8(v_sub_wrap(d ^ m, m)); +} + +inline v_uint16x32 v_absdiff(const v_int16x32& a, const v_int16x32& b) +{ return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))); } + +inline v_uint32x16 v_absdiff(const v_int32x16& a, const v_int32x16& b) +{ + v_int32x16 d = a - b; + v_int32x16 m = a < b; + return v_reinterpret_as_u32((d ^ m) - m); +} + +inline v_float32x16 v_absdiff(const v_float32x16& a, const v_float32x16& b) +{ return v_abs(a - b); } + +inline v_float64x8 v_absdiff(const v_float64x8& a, const v_float64x8& b) +{ return v_abs(a - b); } + +/** Saturating absolute difference **/ +inline v_int8x64 v_absdiffs(const v_int8x64& a, const v_int8x64& b) +{ + v_int8x64 d = a - b; + v_int8x64 m = a < b; + return (d ^ m) - m; +} +inline v_int16x32 v_absdiffs(const v_int16x32& a, const v_int16x32& b) +{ return v_max(a, b) - v_min(a, b); } + +////////// Conversions ///////// + +/** Rounding **/ +inline v_int32x16 v_round(const v_float32x16& a) +{ return v_int32x16(_mm512_cvtps_epi32(a.val)); } + +inline v_int32x16 v_round(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvtpd_epi32(a.val))); } + +inline v_int32x16 v_round(const v_float64x8& a, const v_float64x8& b) +{ return v_int32x16(_v512_combine(_mm512_cvtpd_epi32(a.val), _mm512_cvtpd_epi32(b.val))); } + +inline v_int32x16 v_trunc(const v_float32x16& a) +{ return v_int32x16(_mm512_cvttps_epi32(a.val)); } + +inline v_int32x16 v_trunc(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvttpd_epi32(a.val))); } + +#if CVT_ROUND_MODES_IMPLEMENTED +inline v_int32x16 v_floor(const v_float32x16& a) +{ return v_int32x16(_mm512_cvt_roundps_epi32(a.val, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)); } + +inline v_int32x16 v_floor(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvt_roundpd_epi32(a.val, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC))); } + +inline v_int32x16 v_ceil(const v_float32x16& a) +{ return v_int32x16(_mm512_cvt_roundps_epi32(a.val, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)); } + +inline v_int32x16 v_ceil(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvt_roundpd_epi32(a.val, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC))); } +#else +inline v_int32x16 v_floor(const v_float32x16& a) +{ return v_int32x16(_mm512_cvtps_epi32(_mm512_roundscale_ps(a.val, 1))); } + +inline v_int32x16 v_floor(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvtpd_epi32(_mm512_roundscale_pd(a.val, 1)))); } + +inline v_int32x16 v_ceil(const v_float32x16& a) +{ return v_int32x16(_mm512_cvtps_epi32(_mm512_roundscale_ps(a.val, 2))); } + +inline v_int32x16 v_ceil(const v_float64x8& a) +{ return v_int32x16(_mm512_castsi256_si512(_mm512_cvtpd_epi32(_mm512_roundscale_pd(a.val, 2)))); } +#endif + +/** To float **/ +inline v_float32x16 v_cvt_f32(const v_int32x16& a) +{ return v_float32x16(_mm512_cvtepi32_ps(a.val)); } + +inline v_float32x16 v_cvt_f32(const v_float64x8& a) +{ return v_float32x16(_mm512_cvtpd_pslo(a.val)); } + +inline v_float32x16 v_cvt_f32(const v_float64x8& a, const v_float64x8& b) +{ return v_float32x16(_v512_combine(_mm512_cvtpd_ps(a.val), _mm512_cvtpd_ps(b.val))); } + +inline v_float64x8 v_cvt_f64(const v_int32x16& a) +{ return v_float64x8(_mm512_cvtepi32_pd(_v512_extract_low(a.val))); } + +inline v_float64x8 v_cvt_f64_high(const v_int32x16& a) +{ return v_float64x8(_mm512_cvtepi32_pd(_v512_extract_high(a.val))); } + +inline v_float64x8 v_cvt_f64(const v_float32x16& a) +{ return v_float64x8(_mm512_cvtps_pd(_v512_extract_low(a.val))); } + +inline v_float64x8 v_cvt_f64_high(const v_float32x16& a) +{ return v_float64x8(_mm512_cvtps_pd(_v512_extract_high(a.val))); } + +// from (Mysticial and wim) https://stackoverflow.com/q/41144668 +inline v_float64x8 v_cvt_f64(const v_int64x8& v) +{ +#if CV_AVX_512DQ + return v_float64x8(_mm512_cvtepi64_pd(v.val)); +#else + // constants encoded as floating-point + __m512i magic_i_lo = _mm512_set1_epi64(0x4330000000000000); // 2^52 + __m512i magic_i_hi32 = _mm512_set1_epi64(0x4530000080000000); // 2^84 + 2^63 + __m512i magic_i_all = _mm512_set1_epi64(0x4530000080100000); // 2^84 + 2^63 + 2^52 + __m512d magic_d_all = _mm512_castsi512_pd(magic_i_all); + + // Blend the 32 lowest significant bits of v with magic_int_lo + __m512i v_lo = _mm512_mask_blend_epi32(0x5555, magic_i_lo, v.val); + // Extract the 32 most significant bits of v + __m512i v_hi = _mm512_srli_epi64(v.val, 32); + // Flip the msb of v_hi and blend with 0x45300000 + v_hi = _mm512_xor_si512(v_hi, magic_i_hi32); + // Compute in double precision + __m512d v_hi_dbl = _mm512_sub_pd(_mm512_castsi512_pd(v_hi), magic_d_all); + // (v_hi - magic_d_all) + v_lo Do not assume associativity of floating point addition + __m512d result = _mm512_add_pd(v_hi_dbl, _mm512_castsi512_pd(v_lo)); + return v_float64x8(result); +#endif +} + +////////////// Lookup table access //////////////////// + +inline v_int8x64 v512_lut(const schar* tab, const int* idx) +{ + __m128i p0 = _mm512_cvtepi32_epi8(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx ), (const int *)tab, 1)); + __m128i p1 = _mm512_cvtepi32_epi8(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx + 1), (const int *)tab, 1)); + __m128i p2 = _mm512_cvtepi32_epi8(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx + 2), (const int *)tab, 1)); + __m128i p3 = _mm512_cvtepi32_epi8(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx + 3), (const int *)tab, 1)); + return v_int8x64(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_castsi128_si512(p0), p1, 1), p2, 2), p3, 3)); +} +inline v_int8x64 v512_lut_pairs(const schar* tab, const int* idx) +{ + __m256i p0 = _mm512_cvtepi32_epi16(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx ), (const int *)tab, 1)); + __m256i p1 = _mm512_cvtepi32_epi16(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx + 1), (const int *)tab, 1)); + return v_int8x64(_v512_combine(p0, p1)); +} +inline v_int8x64 v512_lut_quads(const schar* tab, const int* idx) +{ + return v_int8x64(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx), (const int *)tab, 1)); +} +inline v_uint8x64 v512_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v512_lut((const schar *)tab, idx)); } +inline v_uint8x64 v512_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v512_lut_pairs((const schar *)tab, idx)); } +inline v_uint8x64 v512_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v512_lut_quads((const schar *)tab, idx)); } + +inline v_int16x32 v512_lut(const short* tab, const int* idx) +{ + __m256i p0 = _mm512_cvtepi32_epi16(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx ), (const int *)tab, 2)); + __m256i p1 = _mm512_cvtepi32_epi16(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx + 1), (const int *)tab, 2)); + return v_int16x32(_v512_combine(p0, p1)); +} +inline v_int16x32 v512_lut_pairs(const short* tab, const int* idx) +{ + return v_int16x32(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx), (const int *)tab, 2)); +} +inline v_int16x32 v512_lut_quads(const short* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int16x32(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), (const long long int*)tab, 2)); +#else + return v_int16x32(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), (const int64*)tab, 2)); +#endif +} +inline v_uint16x32 v512_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v512_lut((const short *)tab, idx)); } +inline v_uint16x32 v512_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v512_lut_pairs((const short *)tab, idx)); } +inline v_uint16x32 v512_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v512_lut_quads((const short *)tab, idx)); } + +inline v_int32x16 v512_lut(const int* tab, const int* idx) +{ + return v_int32x16(_mm512_i32gather_epi32(_mm512_loadu_si512((const __m512i*)idx), tab, 4)); +} +inline v_int32x16 v512_lut_pairs(const int* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int32x16(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), (const long long int*)tab, 4)); +#else + return v_int32x16(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), (const int64*)tab, 4)); +#endif +} +inline v_int32x16 v512_lut_quads(const int* tab, const int* idx) +{ + return v_int32x16(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_castsi128_si512( + _mm_loadu_si128((const __m128i*)(tab + idx[0]))), + _mm_loadu_si128((const __m128i*)(tab + idx[1])), 1), + _mm_loadu_si128((const __m128i*)(tab + idx[2])), 2), + _mm_loadu_si128((const __m128i*)(tab + idx[3])), 3)); +} +inline v_uint32x16 v512_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v512_lut((const int *)tab, idx)); } +inline v_uint32x16 v512_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v512_lut_pairs((const int *)tab, idx)); } +inline v_uint32x16 v512_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v512_lut_quads((const int *)tab, idx)); } + +inline v_int64x8 v512_lut(const int64* tab, const int* idx) +{ +#if defined(__GNUC__) + return v_int64x8(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), (const long long int*)tab, 8)); +#else + return v_int64x8(_mm512_i32gather_epi64(_mm256_loadu_si256((const __m256i*)idx), tab , 8)); +#endif +} +inline v_int64x8 v512_lut_pairs(const int64* tab, const int* idx) +{ + return v_int64x8(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_inserti32x4(_mm512_castsi128_si512( + _mm_loadu_si128((const __m128i*)(tab + idx[0]))), + _mm_loadu_si128((const __m128i*)(tab + idx[1])), 1), + _mm_loadu_si128((const __m128i*)(tab + idx[2])), 2), + _mm_loadu_si128((const __m128i*)(tab + idx[3])), 3)); +} +inline v_uint64x8 v512_lut(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v512_lut((const int64 *)tab, idx)); } +inline v_uint64x8 v512_lut_pairs(const uint64* tab, const int* idx) { return v_reinterpret_as_u64(v512_lut_pairs((const int64 *)tab, idx)); } + +inline v_float32x16 v512_lut(const float* tab, const int* idx) +{ + return v_float32x16(_mm512_i32gather_ps(_mm512_loadu_si512((const __m512i*)idx), tab, 4)); +} +inline v_float32x16 v512_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v512_lut_pairs((const int *)tab, idx)); } +inline v_float32x16 v512_lut_quads(const float* tab, const int* idx) { return v_reinterpret_as_f32(v512_lut_quads((const int *)tab, idx)); } + +inline v_float64x8 v512_lut(const double* tab, const int* idx) +{ + return v_float64x8(_mm512_i32gather_pd(_mm256_loadu_si256((const __m256i*)idx), tab, 8)); +} +inline v_float64x8 v512_lut_pairs(const double* tab, const int* idx) +{ + return v_float64x8(_mm512_insertf64x2(_mm512_insertf64x2(_mm512_insertf64x2(_mm512_castpd128_pd512( + _mm_loadu_pd(tab + idx[0])), + _mm_loadu_pd(tab + idx[1]), 1), + _mm_loadu_pd(tab + idx[2]), 2), + _mm_loadu_pd(tab + idx[3]), 3)); +} + +inline v_int32x16 v_lut(const int* tab, const v_int32x16& idxvec) +{ + return v_int32x16(_mm512_i32gather_epi32(idxvec.val, tab, 4)); +} + +inline v_uint32x16 v_lut(const unsigned* tab, const v_int32x16& idxvec) +{ + return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec)); +} + +inline v_float32x16 v_lut(const float* tab, const v_int32x16& idxvec) +{ + return v_float32x16(_mm512_i32gather_ps(idxvec.val, tab, 4)); +} + +inline v_float64x8 v_lut(const double* tab, const v_int32x16& idxvec) +{ + return v_float64x8(_mm512_i32gather_pd(_v512_extract_low(idxvec.val), tab, 8)); +} + +inline void v_lut_deinterleave(const float* tab, const v_int32x16& idxvec, v_float32x16& x, v_float32x16& y) +{ + x.val = _mm512_i32gather_ps(idxvec.val, tab, 4); + y.val = _mm512_i32gather_ps(idxvec.val, &tab[1], 4); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x16& idxvec, v_float64x8& x, v_float64x8& y) +{ + x.val = _mm512_i32gather_pd(_v512_extract_low(idxvec.val), tab, 8); + y.val = _mm512_i32gather_pd(_v512_extract_low(idxvec.val), &tab[1], 8); +} + +inline v_int8x64 v_interleave_pairs(const v_int8x64& vec) +{ + return v_int8x64(_mm512_shuffle_epi8(vec.val, _mm512_set4_epi32(0x0f0d0e0c, 0x0b090a08, 0x07050604, 0x03010200))); +} +inline v_uint8x64 v_interleave_pairs(const v_uint8x64& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x64 v_interleave_quads(const v_int8x64& vec) +{ + return v_int8x64(_mm512_shuffle_epi8(vec.val, _mm512_set4_epi32(0x0f0b0e0a, 0x0d090c08, 0x07030602, 0x05010400))); +} +inline v_uint8x64 v_interleave_quads(const v_uint8x64& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x32 v_interleave_pairs(const v_int16x32& vec) +{ + return v_int16x32(_mm512_shuffle_epi8(vec.val, _mm512_set4_epi32(0x0f0e0b0a, 0x0d0c0908, 0x07060302, 0x05040100))); +} +inline v_uint16x32 v_interleave_pairs(const v_uint16x32& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } +inline v_int16x32 v_interleave_quads(const v_int16x32& vec) +{ + return v_int16x32(_mm512_shuffle_epi8(vec.val, _mm512_set4_epi32(0x0f0e0706, 0x0d0c0504, 0x0b0a0302, 0x09080100))); +} +inline v_uint16x32 v_interleave_quads(const v_uint16x32& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x16 v_interleave_pairs(const v_int32x16& vec) +{ + return v_int32x16(_mm512_shuffle_epi32(vec.val, _MM_PERM_ACBD)); +} +inline v_uint32x16 v_interleave_pairs(const v_uint32x16& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x16 v_interleave_pairs(const v_float32x16& vec) { return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x64 v_pack_triplets(const v_int8x64& vec) +{ + return v_int8x64(_mm512_permutexvar_epi32(_v512_set_epu64(0x0000000f0000000f, 0x0000000f0000000f, 0x0000000e0000000d, 0x0000000c0000000a, + 0x0000000900000008, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000), + _mm512_shuffle_epi8(vec.val, _mm512_set4_epi32(0xffffff0f, 0x0e0d0c0a, 0x09080605, 0x04020100)))); +} +inline v_uint8x64 v_pack_triplets(const v_uint8x64& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x32 v_pack_triplets(const v_int16x32& vec) +{ + return v_int16x32(_mm512_permutexvar_epi16(_v512_set_epu64(0x001f001f001f001f, 0x001f001f001f001f, 0x001e001d001c001a, 0x0019001800160015, + 0x0014001200110010, 0x000e000d000c000a, 0x0009000800060005, 0x0004000200010000), vec.val)); +} +inline v_uint16x32 v_pack_triplets(const v_uint16x32& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x16 v_pack_triplets(const v_int32x16& vec) +{ + return v_int32x16(_mm512_permutexvar_epi32(_v512_set_epu64(0x0000000f0000000f, 0x0000000f0000000f, 0x0000000e0000000d, 0x0000000c0000000a, + 0x0000000900000008, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000), vec.val)); +} +inline v_uint32x16 v_pack_triplets(const v_uint32x16& vec) { return v_reinterpret_as_u32(v_pack_triplets(v_reinterpret_as_s32(vec))); } +inline v_float32x16 v_pack_triplets(const v_float32x16& vec) +{ + return v_float32x16(_mm512_permutexvar_ps(_v512_set_epu64(0x0000000f0000000f, 0x0000000f0000000f, 0x0000000e0000000d, 0x0000000c0000000a, + 0x0000000900000008, 0x0000000600000005, 0x0000000400000002, 0x0000000100000000), vec.val)); +} + +////////// Matrix operations ///////// + +//////// Dot Product //////// + +// 16 >> 32 +inline v_int32x16 v_dotprod(const v_int16x32& a, const v_int16x32& b) +{ return v_int32x16(_mm512_madd_epi16(a.val, b.val)); } +inline v_int32x16 v_dotprod(const v_int16x32& a, const v_int16x32& b, const v_int32x16& c) +{ return v_dotprod(a, b) + c; } + +// 32 >> 64 +inline v_int64x8 v_dotprod(const v_int32x16& a, const v_int32x16& b) +{ + __m512i even = _mm512_mul_epi32(a.val, b.val); + __m512i odd = _mm512_mul_epi32(_mm512_srli_epi64(a.val, 32), _mm512_srli_epi64(b.val, 32)); + return v_int64x8(_mm512_add_epi64(even, odd)); +} +inline v_int64x8 v_dotprod(const v_int32x16& a, const v_int32x16& b, const v_int64x8& c) +{ return v_dotprod(a, b) + c; } + +// 8 >> 32 +inline v_uint32x16 v_dotprod_expand(const v_uint8x64& a, const v_uint8x64& b) +{ + __m512i even_a = _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, a.val, _mm512_setzero_si512()); + __m512i odd_a = _mm512_srli_epi16(a.val, 8); + + __m512i even_b = _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, b.val, _mm512_setzero_si512()); + __m512i odd_b = _mm512_srli_epi16(b.val, 8); + + __m512i prod0 = _mm512_madd_epi16(even_a, even_b); + __m512i prod1 = _mm512_madd_epi16(odd_a, odd_b); + return v_uint32x16(_mm512_add_epi32(prod0, prod1)); +} +inline v_uint32x16 v_dotprod_expand(const v_uint8x64& a, const v_uint8x64& b, const v_uint32x16& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int32x16 v_dotprod_expand(const v_int8x64& a, const v_int8x64& b) +{ + __m512i even_a = _mm512_srai_epi16(_mm512_bslli_epi128(a.val, 1), 8); + __m512i odd_a = _mm512_srai_epi16(a.val, 8); + + __m512i even_b = _mm512_srai_epi16(_mm512_bslli_epi128(b.val, 1), 8); + __m512i odd_b = _mm512_srai_epi16(b.val, 8); + + __m512i prod0 = _mm512_madd_epi16(even_a, even_b); + __m512i prod1 = _mm512_madd_epi16(odd_a, odd_b); + return v_int32x16(_mm512_add_epi32(prod0, prod1)); +} +inline v_int32x16 v_dotprod_expand(const v_int8x64& a, const v_int8x64& b, const v_int32x16& c) +{ return v_dotprod_expand(a, b) + c; } + +// 16 >> 64 +inline v_uint64x8 v_dotprod_expand(const v_uint16x32& a, const v_uint16x32& b) +{ + __m512i mullo = _mm512_mullo_epi16(a.val, b.val); + __m512i mulhi = _mm512_mulhi_epu16(a.val, b.val); + __m512i mul0 = _mm512_unpacklo_epi16(mullo, mulhi); + __m512i mul1 = _mm512_unpackhi_epi16(mullo, mulhi); + + __m512i p02 = _mm512_mask_blend_epi32(0xAAAA, mul0, _mm512_setzero_si512()); + __m512i p13 = _mm512_srli_epi64(mul0, 32); + __m512i p46 = _mm512_mask_blend_epi32(0xAAAA, mul1, _mm512_setzero_si512()); + __m512i p57 = _mm512_srli_epi64(mul1, 32); + + __m512i p15_ = _mm512_add_epi64(p02, p13); + __m512i p9d_ = _mm512_add_epi64(p46, p57); + + return v_uint64x8(_mm512_add_epi64( + _mm512_unpacklo_epi64(p15_, p9d_), + _mm512_unpackhi_epi64(p15_, p9d_) + )); +} +inline v_uint64x8 v_dotprod_expand(const v_uint16x32& a, const v_uint16x32& b, const v_uint64x8& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x8 v_dotprod_expand(const v_int16x32& a, const v_int16x32& b) +{ + __m512i prod = _mm512_madd_epi16(a.val, b.val); + __m512i even = _mm512_srai_epi64(_mm512_bslli_epi128(prod, 4), 32); + __m512i odd = _mm512_srai_epi64(prod, 32); + return v_int64x8(_mm512_add_epi64(even, odd)); +} +inline v_int64x8 v_dotprod_expand(const v_int16x32& a, const v_int16x32& b, const v_int64x8& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x8 v_dotprod_expand(const v_int32x16& a, const v_int32x16& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x8 v_dotprod_expand(const v_int32x16& a, const v_int32x16& b, const v_float64x8& c) +{ return v_dotprod_expand(a, b) + c; } + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x16 v_dotprod_fast(const v_int16x32& a, const v_int16x32& b) +{ return v_dotprod(a, b); } +inline v_int32x16 v_dotprod_fast(const v_int16x32& a, const v_int16x32& b, const v_int32x16& c) +{ return v_dotprod(a, b, c); } + +// 32 >> 64 +inline v_int64x8 v_dotprod_fast(const v_int32x16& a, const v_int32x16& b) +{ return v_dotprod(a, b); } +inline v_int64x8 v_dotprod_fast(const v_int32x16& a, const v_int32x16& b, const v_int64x8& c) +{ return v_dotprod(a, b, c); } + +// 8 >> 32 +inline v_uint32x16 v_dotprod_expand_fast(const v_uint8x64& a, const v_uint8x64& b) +{ return v_dotprod_expand(a, b); } +inline v_uint32x16 v_dotprod_expand_fast(const v_uint8x64& a, const v_uint8x64& b, const v_uint32x16& c) +{ return v_dotprod_expand(a, b, c); } + +inline v_int32x16 v_dotprod_expand_fast(const v_int8x64& a, const v_int8x64& b) +{ return v_dotprod_expand(a, b); } +inline v_int32x16 v_dotprod_expand_fast(const v_int8x64& a, const v_int8x64& b, const v_int32x16& c) +{ return v_dotprod_expand(a, b, c); } + +// 16 >> 64 +inline v_uint64x8 v_dotprod_expand_fast(const v_uint16x32& a, const v_uint16x32& b) +{ + __m512i mullo = _mm512_mullo_epi16(a.val, b.val); + __m512i mulhi = _mm512_mulhi_epu16(a.val, b.val); + __m512i mul0 = _mm512_unpacklo_epi16(mullo, mulhi); + __m512i mul1 = _mm512_unpackhi_epi16(mullo, mulhi); + + __m512i p02 = _mm512_mask_blend_epi32(0xAAAA, mul0, _mm512_setzero_si512()); + __m512i p13 = _mm512_srli_epi64(mul0, 32); + __m512i p46 = _mm512_mask_blend_epi32(0xAAAA, mul1, _mm512_setzero_si512()); + __m512i p57 = _mm512_srli_epi64(mul1, 32); + + __m512i p15_ = _mm512_add_epi64(p02, p13); + __m512i p9d_ = _mm512_add_epi64(p46, p57); + return v_uint64x8(_mm512_add_epi64(p15_, p9d_)); +} +inline v_uint64x8 v_dotprod_expand_fast(const v_uint16x32& a, const v_uint16x32& b, const v_uint64x8& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +inline v_int64x8 v_dotprod_expand_fast(const v_int16x32& a, const v_int16x32& b) +{ return v_dotprod_expand(a, b); } +inline v_int64x8 v_dotprod_expand_fast(const v_int16x32& a, const v_int16x32& b, const v_int64x8& c) +{ return v_dotprod_expand(a, b, c); } + +// 32 >> 64f +inline v_float64x8 v_dotprod_expand_fast(const v_int32x16& a, const v_int32x16& b) +{ return v_dotprod_expand(a, b); } +inline v_float64x8 v_dotprod_expand_fast(const v_int32x16& a, const v_int32x16& b, const v_float64x8& c) +{ return v_dotprod_expand(a, b) + c; } + + +#define OPENCV_HAL_AVX512_SPLAT2_PS(a, im) \ + v_float32x16(_mm512_permute_ps(a.val, _MM_SHUFFLE(im, im, im, im))) + +inline v_float32x16 v_matmul(const v_float32x16& v, + const v_float32x16& m0, const v_float32x16& m1, + const v_float32x16& m2, const v_float32x16& m3) +{ + v_float32x16 v04 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 0); + v_float32x16 v15 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 1); + v_float32x16 v26 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 2); + v_float32x16 v37 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 3); + return v_fma(v04, m0, v_fma(v15, m1, v_fma(v26, m2, v37 * m3))); +} + +inline v_float32x16 v_matmuladd(const v_float32x16& v, + const v_float32x16& m0, const v_float32x16& m1, + const v_float32x16& m2, const v_float32x16& a) +{ + v_float32x16 v04 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 0); + v_float32x16 v15 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 1); + v_float32x16 v26 = OPENCV_HAL_AVX512_SPLAT2_PS(v, 2); + return v_fma(v04, m0, v_fma(v15, m1, v_fma(v26, m2, a))); +} + +#define OPENCV_HAL_IMPL_AVX512_TRANSPOSE4x4(_Tpvec, suffix, cast_from, cast_to) \ + inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \ + { \ + __m512i t0 = cast_from(_mm512_unpacklo_##suffix(a0.val, a1.val)); \ + __m512i t1 = cast_from(_mm512_unpacklo_##suffix(a2.val, a3.val)); \ + __m512i t2 = cast_from(_mm512_unpackhi_##suffix(a0.val, a1.val)); \ + __m512i t3 = cast_from(_mm512_unpackhi_##suffix(a2.val, a3.val)); \ + b0.val = cast_to(_mm512_unpacklo_epi64(t0, t1)); \ + b1.val = cast_to(_mm512_unpackhi_epi64(t0, t1)); \ + b2.val = cast_to(_mm512_unpacklo_epi64(t2, t3)); \ + b3.val = cast_to(_mm512_unpackhi_epi64(t2, t3)); \ + } + +OPENCV_HAL_IMPL_AVX512_TRANSPOSE4x4(v_uint32x16, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_AVX512_TRANSPOSE4x4(v_int32x16, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_AVX512_TRANSPOSE4x4(v_float32x16, ps, _mm512_castps_si512, _mm512_castsi512_ps) + +//////////////// Value reordering /////////////// + +/* Expand */ +#define OPENCV_HAL_IMPL_AVX512_EXPAND(_Tpvec, _Tpwvec, _Tp, intrin) \ + inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ + { \ + b0.val = intrin(_v512_extract_low(a.val)); \ + b1.val = intrin(_v512_extract_high(a.val)); \ + } \ + inline _Tpwvec v_expand_low(const _Tpvec& a) \ + { return _Tpwvec(intrin(_v512_extract_low(a.val))); } \ + inline _Tpwvec v_expand_high(const _Tpvec& a) \ + { return _Tpwvec(intrin(_v512_extract_high(a.val))); } \ + inline _Tpwvec v512_load_expand(const _Tp* ptr) \ + { \ + __m256i a = _mm256_loadu_si256((const __m256i*)ptr); \ + return _Tpwvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_AVX512_EXPAND(v_uint8x64, v_uint16x32, uchar, _mm512_cvtepu8_epi16) +OPENCV_HAL_IMPL_AVX512_EXPAND(v_int8x64, v_int16x32, schar, _mm512_cvtepi8_epi16) +OPENCV_HAL_IMPL_AVX512_EXPAND(v_uint16x32, v_uint32x16, ushort, _mm512_cvtepu16_epi32) +OPENCV_HAL_IMPL_AVX512_EXPAND(v_int16x32, v_int32x16, short, _mm512_cvtepi16_epi32) +OPENCV_HAL_IMPL_AVX512_EXPAND(v_uint32x16, v_uint64x8, unsigned, _mm512_cvtepu32_epi64) +OPENCV_HAL_IMPL_AVX512_EXPAND(v_int32x16, v_int64x8, int, _mm512_cvtepi32_epi64) + +#define OPENCV_HAL_IMPL_AVX512_EXPAND_Q(_Tpvec, _Tp, intrin) \ + inline _Tpvec v512_load_expand_q(const _Tp* ptr) \ + { \ + __m128i a = _mm_loadu_si128((const __m128i*)ptr); \ + return _Tpvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_AVX512_EXPAND_Q(v_uint32x16, uchar, _mm512_cvtepu8_epi32) +OPENCV_HAL_IMPL_AVX512_EXPAND_Q(v_int32x16, schar, _mm512_cvtepi8_epi32) + +/* pack */ +// 16 +inline v_int8x64 v_pack(const v_int16x32& a, const v_int16x32& b) +{ return v_int8x64(_mm512_permutexvar_epi64(_v512_set_epu64(7, 5, 3, 1, 6, 4, 2, 0), _mm512_packs_epi16(a.val, b.val))); } + +inline v_uint8x64 v_pack(const v_uint16x32& a, const v_uint16x32& b) +{ + const __m512i t = _mm512_set1_epi16(255); + return v_uint8x64(_v512_combine(_mm512_cvtepi16_epi8(_mm512_min_epu16(a.val, t)), _mm512_cvtepi16_epi8(_mm512_min_epu16(b.val, t)))); +} + +inline v_uint8x64 v_pack_u(const v_int16x32& a, const v_int16x32& b) +{ + return v_uint8x64(_mm512_permutexvar_epi64(_v512_set_epu64(7, 5, 3, 1, 6, 4, 2, 0), _mm512_packus_epi16(a.val, b.val))); +} + +inline void v_pack_store(schar* ptr, const v_int16x32& a) +{ v_store_low(ptr, v_pack(a, a)); } + +inline void v_pack_store(uchar* ptr, const v_uint16x32& a) +{ + const __m512i m = _mm512_set1_epi16(255); + _mm256_storeu_si256((__m256i*)ptr, _mm512_cvtepi16_epi8(_mm512_min_epu16(a.val, m))); +} + +inline void v_pack_u_store(uchar* ptr, const v_int16x32& a) +{ v_store_low(ptr, v_pack_u(a, a)); } + +template inline +v_uint8x64 v_rshr_pack(const v_uint16x32& a, const v_uint16x32& b) +{ + // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers. + v_uint16x32 delta = v512_setall_u16((short)(1 << (n-1))); + return v_pack_u(v_reinterpret_as_s16((a + delta) >> n), + v_reinterpret_as_s16((b + delta) >> n)); +} + +template inline +void v_rshr_pack_store(uchar* ptr, const v_uint16x32& a) +{ + v_uint16x32 delta = v512_setall_u16((short)(1 << (n-1))); + v_pack_u_store(ptr, v_reinterpret_as_s16((a + delta) >> n)); +} + +template inline +v_uint8x64 v_rshr_pack_u(const v_int16x32& a, const v_int16x32& b) +{ + v_int16x32 delta = v512_setall_s16((short)(1 << (n-1))); + return v_pack_u((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_u_store(uchar* ptr, const v_int16x32& a) +{ + v_int16x32 delta = v512_setall_s16((short)(1 << (n-1))); + v_pack_u_store(ptr, (a + delta) >> n); +} + +template inline +v_int8x64 v_rshr_pack(const v_int16x32& a, const v_int16x32& b) +{ + v_int16x32 delta = v512_setall_s16((short)(1 << (n-1))); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(schar* ptr, const v_int16x32& a) +{ + v_int16x32 delta = v512_setall_s16((short)(1 << (n-1))); + v_pack_store(ptr, (a + delta) >> n); +} + +// 32 +inline v_int16x32 v_pack(const v_int32x16& a, const v_int32x16& b) +{ return v_int16x32(_mm512_permutexvar_epi64(_v512_set_epu64(7, 5, 3, 1, 6, 4, 2, 0), _mm512_packs_epi32(a.val, b.val))); } + +inline v_uint16x32 v_pack(const v_uint32x16& a, const v_uint32x16& b) +{ + const __m512i m = _mm512_set1_epi32(65535); + return v_uint16x32(_v512_combine(_mm512_cvtepi32_epi16(_mm512_min_epu32(a.val, m)), _mm512_cvtepi32_epi16(_mm512_min_epu32(b.val, m)))); +} + +inline v_uint16x32 v_pack_u(const v_int32x16& a, const v_int32x16& b) +{ return v_uint16x32(_mm512_permutexvar_epi64(_v512_set_epu64(7, 5, 3, 1, 6, 4, 2, 0), _mm512_packus_epi32(a.val, b.val))); } + +inline void v_pack_store(short* ptr, const v_int32x16& a) +{ v_store_low(ptr, v_pack(a, a)); } + +inline void v_pack_store(ushort* ptr, const v_uint32x16& a) +{ + const __m512i m = _mm512_set1_epi32(65535); + _mm256_storeu_si256((__m256i*)ptr, _mm512_cvtepi32_epi16(_mm512_min_epu32(a.val, m))); +} + +inline void v_pack_u_store(ushort* ptr, const v_int32x16& a) +{ v_store_low(ptr, v_pack_u(a, a)); } + + +template inline +v_uint16x32 v_rshr_pack(const v_uint32x16& a, const v_uint32x16& b) +{ + v_uint32x16 delta = v512_setall_u32(1 << (n-1)); + return v_pack_u(v_reinterpret_as_s32((a + delta) >> n), + v_reinterpret_as_s32((b + delta) >> n)); +} + +template inline +void v_rshr_pack_store(ushort* ptr, const v_uint32x16& a) +{ + v_uint32x16 delta = v512_setall_u32(1 << (n-1)); + v_pack_u_store(ptr, v_reinterpret_as_s32((a + delta) >> n)); +} + +template inline +v_uint16x32 v_rshr_pack_u(const v_int32x16& a, const v_int32x16& b) +{ + v_int32x16 delta = v512_setall_s32(1 << (n-1)); + return v_pack_u((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_u_store(ushort* ptr, const v_int32x16& a) +{ + v_int32x16 delta = v512_setall_s32(1 << (n-1)); + v_pack_u_store(ptr, (a + delta) >> n); +} + +template inline +v_int16x32 v_rshr_pack(const v_int32x16& a, const v_int32x16& b) +{ + v_int32x16 delta = v512_setall_s32(1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(short* ptr, const v_int32x16& a) +{ + v_int32x16 delta = v512_setall_s32(1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +// 64 +// Non-saturating pack +inline v_uint32x16 v_pack(const v_uint64x8& a, const v_uint64x8& b) +{ return v_uint32x16(_v512_combine(_mm512_cvtepi64_epi32(a.val), _mm512_cvtepi64_epi32(b.val))); } + +inline v_int32x16 v_pack(const v_int64x8& a, const v_int64x8& b) +{ return v_reinterpret_as_s32(v_pack(v_reinterpret_as_u64(a), v_reinterpret_as_u64(b))); } + +inline void v_pack_store(unsigned* ptr, const v_uint64x8& a) +{ _mm256_storeu_si256((__m256i*)ptr, _mm512_cvtepi64_epi32(a.val)); } + +inline void v_pack_store(int* ptr, const v_int64x8& b) +{ v_pack_store((unsigned*)ptr, v_reinterpret_as_u64(b)); } + +template inline +v_uint32x16 v_rshr_pack(const v_uint64x8& a, const v_uint64x8& b) +{ + v_uint64x8 delta = v512_setall_u64((uint64)1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(unsigned* ptr, const v_uint64x8& a) +{ + v_uint64x8 delta = v512_setall_u64((uint64)1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +template inline +v_int32x16 v_rshr_pack(const v_int64x8& a, const v_int64x8& b) +{ + v_int64x8 delta = v512_setall_s64((int64)1 << (n-1)); + return v_pack((a + delta) >> n, (b + delta) >> n); +} + +template inline +void v_rshr_pack_store(int* ptr, const v_int64x8& a) +{ + v_int64x8 delta = v512_setall_s64((int64)1 << (n-1)); + v_pack_store(ptr, (a + delta) >> n); +} + +// pack boolean +inline v_uint8x64 v_pack_b(const v_uint16x32& a, const v_uint16x32& b) +{ return v_uint8x64(_mm512_permutexvar_epi64(_v512_set_epu64(7, 5, 3, 1, 6, 4, 2, 0), _mm512_packs_epi16(a.val, b.val))); } + +inline v_uint8x64 v_pack_b(const v_uint32x16& a, const v_uint32x16& b, + const v_uint32x16& c, const v_uint32x16& d) +{ + __m512i ab = _mm512_packs_epi32(a.val, b.val); + __m512i cd = _mm512_packs_epi32(c.val, d.val); + + return v_uint8x64(_mm512_permutexvar_epi32(_v512_set_epu32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0), _mm512_packs_epi16(ab, cd))); +} + +inline v_uint8x64 v_pack_b(const v_uint64x8& a, const v_uint64x8& b, const v_uint64x8& c, + const v_uint64x8& d, const v_uint64x8& e, const v_uint64x8& f, + const v_uint64x8& g, const v_uint64x8& h) +{ + __m512i ab = _mm512_packs_epi32(a.val, b.val); + __m512i cd = _mm512_packs_epi32(c.val, d.val); + __m512i ef = _mm512_packs_epi32(e.val, f.val); + __m512i gh = _mm512_packs_epi32(g.val, h.val); + + __m512i abcd = _mm512_packs_epi32(ab, cd); + __m512i efgh = _mm512_packs_epi32(ef, gh); + + return v_uint8x64(_mm512_permutexvar_epi16(_v512_set_epu16(31, 23, 15, 7, 30, 22, 14, 6, 29, 21, 13, 5, 28, 20, 12, 4, + 27, 19, 11, 3, 26, 18, 10, 2, 25, 17, 9, 1, 24, 16, 8, 0), _mm512_packs_epi16(abcd, efgh))); +} + +/* Recombine */ +// its up there with load and store operations + +/* Extract */ +#define OPENCV_HAL_IMPL_AVX512_EXTRACT(_Tpvec) \ + template \ + inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \ + { return v_rotate_right(a, b); } + +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_uint8x64) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_int8x64) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_uint16x32) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_int16x32) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_uint32x16) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_int32x16) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_uint64x8) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_int64x8) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_float32x16) +OPENCV_HAL_IMPL_AVX512_EXTRACT(v_float64x8) + +#define OPENCV_HAL_IMPL_AVX512_EXTRACT_N(_Tpvec, _Tp) \ +template inline _Tp v_extract_n(_Tpvec v) { return v_rotate_right(v).get0(); } + +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_uint8x64, uchar) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_int8x64, schar) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_uint16x32, ushort) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_int16x32, short) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_uint32x16, uint) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_int32x16, int) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_uint64x8, uint64) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_int64x8, int64) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_float32x16, float) +OPENCV_HAL_IMPL_AVX512_EXTRACT_N(v_float64x8, double) + +template +inline v_uint32x16 v_broadcast_element(v_uint32x16 a) +{ + static const __m512i perm = _mm512_set1_epi32((char)i); + return v_uint32x16(_mm512_permutexvar_epi32(perm, a.val)); +} + +template +inline v_int32x16 v_broadcast_element(const v_int32x16 &a) +{ return v_reinterpret_as_s32(v_broadcast_element(v_reinterpret_as_u32(a))); } + +template +inline v_float32x16 v_broadcast_element(const v_float32x16 &a) +{ return v_reinterpret_as_f32(v_broadcast_element(v_reinterpret_as_u32(a))); } + + +///////////////////// load deinterleave ///////////////////////////// + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x64& a, v_uint8x64& b ) +{ + __m512i ab0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i ab1 = _mm512_loadu_si512((const __m512i*)(ptr + 64)); +#if CV_AVX_512VBMI + __m512i mask0 = _v512_set_epu8(126, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, + 94, 92, 90, 88, 86, 84, 82, 80, 78, 76, 74, 72, 70, 68, 66, 64, + 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, + 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu8(127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, 105, 103, 101, 99, 97, + 95, 93, 91, 89, 87, 85, 83, 81, 79, 77, 75, 73, 71, 69, 67, 65, + 63, 61, 59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, + 31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + a = v_uint8x64(_mm512_permutex2var_epi8(ab0, mask0, ab1)); + b = v_uint8x64(_mm512_permutex2var_epi8(ab0, mask1, ab1)); +#else + __m512i mask0 = _mm512_set4_epi32(0x0f0d0b09, 0x07050301, 0x0e0c0a08, 0x06040200); + __m512i a0b0 = _mm512_shuffle_epi8(ab0, mask0); + __m512i a1b1 = _mm512_shuffle_epi8(ab1, mask0); + __m512i mask1 = _v512_set_epu64(14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask2 = _v512_set_epu64(15, 13, 11, 9, 7, 5, 3, 1); + a = v_uint8x64(_mm512_permutex2var_epi64(a0b0, mask1, a1b1)); + b = v_uint8x64(_mm512_permutex2var_epi64(a0b0, mask2, a1b1)); +#endif +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x32& a, v_uint16x32& b ) +{ + __m512i ab0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i ab1 = _mm512_loadu_si512((const __m512i*)(ptr + 32)); + __m512i mask0 = _v512_set_epu16(62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, + 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu16(63, 61, 59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, + 31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + a = v_uint16x32(_mm512_permutex2var_epi16(ab0, mask0, ab1)); + b = v_uint16x32(_mm512_permutex2var_epi16(ab0, mask1, ab1)); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x16& a, v_uint32x16& b ) +{ + __m512i ab0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i ab1 = _mm512_loadu_si512((const __m512i*)(ptr + 16)); + __m512i mask0 = _v512_set_epu32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + a = v_uint32x16(_mm512_permutex2var_epi32(ab0, mask0, ab1)); + b = v_uint32x16(_mm512_permutex2var_epi32(ab0, mask1, ab1)); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x8& a, v_uint64x8& b ) +{ + __m512i ab0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i ab1 = _mm512_loadu_si512((const __m512i*)(ptr + 8)); + __m512i mask0 = _v512_set_epu64(14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu64(15, 13, 11, 9, 7, 5, 3, 1); + a = v_uint64x8(_mm512_permutex2var_epi64(ab0, mask0, ab1)); + b = v_uint64x8(_mm512_permutex2var_epi64(ab0, mask1, ab1)); +} + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x64& a, v_uint8x64& b, v_uint8x64& c ) +{ + __m512i bgr0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgr1 = _mm512_loadu_si512((const __m512i*)(ptr + 64)); + __m512i bgr2 = _mm512_loadu_si512((const __m512i*)(ptr + 128)); + +#if CV_AVX_512VBMI2 + __m512i mask0 = _v512_set_epu8(126, 123, 120, 117, 114, 111, 108, 105, 102, 99, 96, 93, 90, 87, 84, 81, + 78, 75, 72, 69, 66, 63, 60, 57, 54, 51, 48, 45, 42, 39, 36, 33, + 30, 27, 24, 21, 18, 15, 12, 9, 6, 3, 0, 62, 59, 56, 53, 50, + 47, 44, 41, 38, 35, 32, 29, 26, 23, 20, 17, 14, 11, 8, 5, 2); + __m512i r0b01 = _mm512_permutex2var_epi8(bgr0, mask0, bgr1); + __m512i b1g12 = _mm512_permutex2var_epi8(bgr1, mask0, bgr2); + __m512i r12b2 = _mm512_permutex2var_epi8(bgr1, + _v512_set_epu8(125, 122, 119, 116, 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, + 77, 74, 71, 68, 65, 127, 124, 121, 118, 115, 112, 109, 106, 103, 100, 97, + 94, 91, 88, 85, 82, 79, 76, 73, 70, 67, 64, 61, 58, 55, 52, 49, + 46, 43, 40, 37, 34, 31, 28, 25, 22, 19, 16, 13, 10, 7, 4, 1), bgr2); + a = v_uint8x64(_mm512_mask_compress_epi8(r12b2, 0xffffffffffe00000, r0b01)); + b = v_uint8x64(_mm512_mask_compress_epi8(b1g12, 0x2492492492492492, bgr0)); + c = v_uint8x64(_mm512_mask_expand_epi8(r0b01, 0xffffffffffe00000, r12b2)); +#elif CV_AVX_512VBMI + __m512i b0g0b1 = _mm512_mask_blend_epi8(0xb6db6db6db6db6db, bgr1, bgr0); + __m512i g1r1g2 = _mm512_mask_blend_epi8(0xb6db6db6db6db6db, bgr2, bgr1); + __m512i r2b2r0 = _mm512_mask_blend_epi8(0xb6db6db6db6db6db, bgr0, bgr2); + a = v_uint8x64(_mm512_permutex2var_epi8(b0g0b1, _v512_set_epu8(125, 122, 119, 116, 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, + 77, 74, 71, 68, 65, 63, 61, 60, 58, 57, 55, 54, 52, 51, 49, 48, + 46, 45, 43, 42, 40, 39, 37, 36, 34, 33, 31, 30, 28, 27, 25, 24, + 23, 21, 20, 18, 17, 15, 14, 12, 11, 9, 8, 6, 5, 3, 2, 0), bgr2)); + b = v_uint8x64(_mm512_permutex2var_epi8(g1r1g2, _v512_set_epu8( 63, 61, 60, 58, 57, 55, 54, 52, 51, 49, 48, 46, 45, 43, 42, 40, + 39, 37, 36, 34, 33, 31, 30, 28, 27, 25, 24, 23, 21, 20, 18, 17, + 15, 14, 12, 11, 9, 8, 6, 5, 3, 2, 0, 126, 123, 120, 117, 114, + 111, 108, 105, 102, 99, 96, 93, 90, 87, 84, 81, 78, 75, 72, 69, 66), bgr0)); + c = v_uint8x64(_mm512_permutex2var_epi8(r2b2r0, _v512_set_epu8( 63, 60, 57, 54, 51, 48, 45, 42, 39, 36, 33, 30, 27, 24, 21, 18, + 15, 12, 9, 6, 3, 0, 125, 122, 119, 116, 113, 110, 107, 104, 101, 98, + 95, 92, 89, 86, 83, 80, 77, 74, 71, 68, 65, 62, 59, 56, 53, 50, + 47, 44, 41, 38, 35, 32, 29, 26, 23, 20, 17, 14, 11, 8, 5, 2), bgr1)); +#else + __m512i mask0 = _v512_set_epu16(61, 58, 55, 52, 49, 46, 43, 40, 37, 34, 63, 60, 57, 54, 51, 48, + 45, 42, 39, 36, 33, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3, 0); + __m512i b01g1 = _mm512_permutex2var_epi16(bgr0, mask0, bgr1); + __m512i r12b2 = _mm512_permutex2var_epi16(bgr1, mask0, bgr2); + __m512i g20r0 = _mm512_permutex2var_epi16(bgr2, mask0, bgr0); + + __m512i b0g0 = _mm512_mask_blend_epi32(0xf800, b01g1, r12b2); + __m512i r0b1 = _mm512_permutex2var_epi16(bgr1, _v512_set_epu16(42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 29, 26, 23, 20, 17, + 14, 11, 8, 5, 2, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43), g20r0); + __m512i g1r1 = _mm512_alignr_epi32(r12b2, g20r0, 11); + a = v_uint8x64(_mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, b0g0, r0b1)); + c = v_uint8x64(_mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, r0b1, g1r1)); + b = v_uint8x64(_mm512_shuffle_epi8(_mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, g1r1, b0g0), _mm512_set4_epi32(0x0e0f0c0d, 0x0a0b0809, 0x06070405, 0x02030001))); +#endif +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x32& a, v_uint16x32& b, v_uint16x32& c ) +{ + __m512i bgr0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgr1 = _mm512_loadu_si512((const __m512i*)(ptr + 32)); + __m512i bgr2 = _mm512_loadu_si512((const __m512i*)(ptr + 64)); + + __m512i mask0 = _v512_set_epu16(61, 58, 55, 52, 49, 46, 43, 40, 37, 34, 63, 60, 57, 54, 51, 48, + 45, 42, 39, 36, 33, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3, 0); + __m512i b01g1 = _mm512_permutex2var_epi16(bgr0, mask0, bgr1); + __m512i r12b2 = _mm512_permutex2var_epi16(bgr1, mask0, bgr2); + __m512i g20r0 = _mm512_permutex2var_epi16(bgr2, mask0, bgr0); + + a = v_uint16x32(_mm512_mask_blend_epi32(0xf800, b01g1, r12b2)); + b = v_uint16x32(_mm512_permutex2var_epi16(bgr1, _v512_set_epu16(42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 29, 26, 23, 20, 17, + 14, 11, 8, 5, 2, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43), g20r0)); + c = v_uint16x32(_mm512_alignr_epi32(r12b2, g20r0, 11)); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x16& a, v_uint32x16& b, v_uint32x16& c ) +{ + __m512i bgr0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgr1 = _mm512_loadu_si512((const __m512i*)(ptr + 16)); + __m512i bgr2 = _mm512_loadu_si512((const __m512i*)(ptr + 32)); + + __m512i mask0 = _v512_set_epu32(29, 26, 23, 20, 17, 30, 27, 24, 21, 18, 15, 12, 9, 6, 3, 0); + __m512i b01r1 = _mm512_permutex2var_epi32(bgr0, mask0, bgr1); + __m512i g12b2 = _mm512_permutex2var_epi32(bgr1, mask0, bgr2); + __m512i r20g0 = _mm512_permutex2var_epi32(bgr2, mask0, bgr0); + + a = v_uint32x16(_mm512_mask_blend_epi32(0xf800, b01r1, g12b2)); + b = v_uint32x16(_mm512_alignr_epi32(g12b2, r20g0, 11)); + c = v_uint32x16(_mm512_permutex2var_epi32(bgr1, _v512_set_epu32(21, 20, 19, 18, 17, 16, 13, 10, 7, 4, 1, 26, 25, 24, 23, 22), r20g0)); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x8& a, v_uint64x8& b, v_uint64x8& c ) +{ + __m512i bgr0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgr1 = _mm512_loadu_si512((const __m512i*)(ptr + 8)); + __m512i bgr2 = _mm512_loadu_si512((const __m512i*)(ptr + 16)); + + __m512i mask0 = _v512_set_epu64(13, 10, 15, 12, 9, 6, 3, 0); + __m512i b01g1 = _mm512_permutex2var_epi64(bgr0, mask0, bgr1); + __m512i r12b2 = _mm512_permutex2var_epi64(bgr1, mask0, bgr2); + __m512i g20r0 = _mm512_permutex2var_epi64(bgr2, mask0, bgr0); + + a = v_uint64x8(_mm512_mask_blend_epi64(0xc0, b01g1, r12b2)); + c = v_uint64x8(_mm512_alignr_epi64(r12b2, g20r0, 6)); + b = v_uint64x8(_mm512_permutex2var_epi64(bgr1, _v512_set_epu64(10, 9, 8, 5, 2, 13, 12, 11), g20r0)); +} + +inline void v_load_deinterleave( const uchar* ptr, v_uint8x64& a, v_uint8x64& b, v_uint8x64& c, v_uint8x64& d ) +{ + __m512i bgra0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgra1 = _mm512_loadu_si512((const __m512i*)(ptr + 64)); + __m512i bgra2 = _mm512_loadu_si512((const __m512i*)(ptr + 128)); + __m512i bgra3 = _mm512_loadu_si512((const __m512i*)(ptr + 192)); + +#if CV_AVX_512VBMI + __m512i mask0 = _v512_set_epu8(126, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, 104, 102, 100, 98, 96, + 94, 92, 90, 88, 86, 84, 82, 80, 78, 76, 74, 72, 70, 68, 66, 64, + 62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, + 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu8(127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, 105, 103, 101, 99, 97, + 95, 93, 91, 89, 87, 85, 83, 81, 79, 77, 75, 73, 71, 69, 67, 65, + 63, 61, 59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, + 31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + + __m512i br01 = _mm512_permutex2var_epi8(bgra0, mask0, bgra1); + __m512i ga01 = _mm512_permutex2var_epi8(bgra0, mask1, bgra1); + __m512i br23 = _mm512_permutex2var_epi8(bgra2, mask0, bgra3); + __m512i ga23 = _mm512_permutex2var_epi8(bgra2, mask1, bgra3); + + a = v_uint8x64(_mm512_permutex2var_epi8(br01, mask0, br23)); + c = v_uint8x64(_mm512_permutex2var_epi8(br01, mask1, br23)); + b = v_uint8x64(_mm512_permutex2var_epi8(ga01, mask0, ga23)); + d = v_uint8x64(_mm512_permutex2var_epi8(ga01, mask1, ga23)); +#else + __m512i mask = _mm512_set4_epi32(0x0f0b0703, 0x0e0a0602, 0x0d090501, 0x0c080400); + __m512i b0g0r0a0 = _mm512_shuffle_epi8(bgra0, mask); + __m512i b1g1r1a1 = _mm512_shuffle_epi8(bgra1, mask); + __m512i b2g2r2a2 = _mm512_shuffle_epi8(bgra2, mask); + __m512i b3g3r3a3 = _mm512_shuffle_epi8(bgra3, mask); + + __m512i mask0 = _v512_set_epu32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + + __m512i br01 = _mm512_permutex2var_epi32(b0g0r0a0, mask0, b1g1r1a1); + __m512i ga01 = _mm512_permutex2var_epi32(b0g0r0a0, mask1, b1g1r1a1); + __m512i br23 = _mm512_permutex2var_epi32(b2g2r2a2, mask0, b3g3r3a3); + __m512i ga23 = _mm512_permutex2var_epi32(b2g2r2a2, mask1, b3g3r3a3); + + a = v_uint8x64(_mm512_permutex2var_epi32(br01, mask0, br23)); + c = v_uint8x64(_mm512_permutex2var_epi32(br01, mask1, br23)); + b = v_uint8x64(_mm512_permutex2var_epi32(ga01, mask0, ga23)); + d = v_uint8x64(_mm512_permutex2var_epi32(ga01, mask1, ga23)); +#endif +} + +inline void v_load_deinterleave( const ushort* ptr, v_uint16x32& a, v_uint16x32& b, v_uint16x32& c, v_uint16x32& d ) +{ + __m512i bgra0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgra1 = _mm512_loadu_si512((const __m512i*)(ptr + 32)); + __m512i bgra2 = _mm512_loadu_si512((const __m512i*)(ptr + 64)); + __m512i bgra3 = _mm512_loadu_si512((const __m512i*)(ptr + 96)); + + __m512i mask0 = _v512_set_epu16(62, 60, 58, 56, 54, 52, 50, 48, 46, 44, 42, 40, 38, 36, 34, 32, + 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu16(63, 61, 59, 57, 55, 53, 51, 49, 47, 45, 43, 41, 39, 37, 35, 33, + 31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + + __m512i br01 = _mm512_permutex2var_epi16(bgra0, mask0, bgra1); + __m512i ga01 = _mm512_permutex2var_epi16(bgra0, mask1, bgra1); + __m512i br23 = _mm512_permutex2var_epi16(bgra2, mask0, bgra3); + __m512i ga23 = _mm512_permutex2var_epi16(bgra2, mask1, bgra3); + + a = v_uint16x32(_mm512_permutex2var_epi16(br01, mask0, br23)); + c = v_uint16x32(_mm512_permutex2var_epi16(br01, mask1, br23)); + b = v_uint16x32(_mm512_permutex2var_epi16(ga01, mask0, ga23)); + d = v_uint16x32(_mm512_permutex2var_epi16(ga01, mask1, ga23)); +} + +inline void v_load_deinterleave( const unsigned* ptr, v_uint32x16& a, v_uint32x16& b, v_uint32x16& c, v_uint32x16& d ) +{ + __m512i bgra0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgra1 = _mm512_loadu_si512((const __m512i*)(ptr + 16)); + __m512i bgra2 = _mm512_loadu_si512((const __m512i*)(ptr + 32)); + __m512i bgra3 = _mm512_loadu_si512((const __m512i*)(ptr + 48)); + + __m512i mask0 = _v512_set_epu32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); + + __m512i br01 = _mm512_permutex2var_epi32(bgra0, mask0, bgra1); + __m512i ga01 = _mm512_permutex2var_epi32(bgra0, mask1, bgra1); + __m512i br23 = _mm512_permutex2var_epi32(bgra2, mask0, bgra3); + __m512i ga23 = _mm512_permutex2var_epi32(bgra2, mask1, bgra3); + + a = v_uint32x16(_mm512_permutex2var_epi32(br01, mask0, br23)); + c = v_uint32x16(_mm512_permutex2var_epi32(br01, mask1, br23)); + b = v_uint32x16(_mm512_permutex2var_epi32(ga01, mask0, ga23)); + d = v_uint32x16(_mm512_permutex2var_epi32(ga01, mask1, ga23)); +} + +inline void v_load_deinterleave( const uint64* ptr, v_uint64x8& a, v_uint64x8& b, v_uint64x8& c, v_uint64x8& d ) +{ + __m512i bgra0 = _mm512_loadu_si512((const __m512i*)ptr); + __m512i bgra1 = _mm512_loadu_si512((const __m512i*)(ptr + 8)); + __m512i bgra2 = _mm512_loadu_si512((const __m512i*)(ptr + 16)); + __m512i bgra3 = _mm512_loadu_si512((const __m512i*)(ptr + 24)); + + __m512i mask0 = _v512_set_epu64(14, 12, 10, 8, 6, 4, 2, 0); + __m512i mask1 = _v512_set_epu64(15, 13, 11, 9, 7, 5, 3, 1); + + __m512i br01 = _mm512_permutex2var_epi64(bgra0, mask0, bgra1); + __m512i ga01 = _mm512_permutex2var_epi64(bgra0, mask1, bgra1); + __m512i br23 = _mm512_permutex2var_epi64(bgra2, mask0, bgra3); + __m512i ga23 = _mm512_permutex2var_epi64(bgra2, mask1, bgra3); + + a = v_uint64x8(_mm512_permutex2var_epi64(br01, mask0, br23)); + c = v_uint64x8(_mm512_permutex2var_epi64(br01, mask1, br23)); + b = v_uint64x8(_mm512_permutex2var_epi64(ga01, mask0, ga23)); + d = v_uint64x8(_mm512_permutex2var_epi64(ga01, mask1, ga23)); +} + +///////////////////////////// store interleave ///////////////////////////////////// + +inline void v_store_interleave( uchar* ptr, const v_uint8x64& x, const v_uint8x64& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint8x64 low, high; + v_zip(x, y, low, high); + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, low.val); + _mm512_stream_si512((__m512i*)(ptr + 64), high.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, low.val); + _mm512_store_si512((__m512i*)(ptr + 64), high.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, low.val); + _mm512_storeu_si512((__m512i*)(ptr + 64), high.val); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x32& x, const v_uint16x32& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint16x32 low, high; + v_zip(x, y, low, high); + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, low.val); + _mm512_stream_si512((__m512i*)(ptr + 32), high.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, low.val); + _mm512_store_si512((__m512i*)(ptr + 32), high.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, low.val); + _mm512_storeu_si512((__m512i*)(ptr + 32), high.val); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x16& x, const v_uint32x16& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint32x16 low, high; + v_zip(x, y, low, high); + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, low.val); + _mm512_stream_si512((__m512i*)(ptr + 16), high.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, low.val); + _mm512_store_si512((__m512i*)(ptr + 16), high.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, low.val); + _mm512_storeu_si512((__m512i*)(ptr + 16), high.val); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x8& x, const v_uint64x8& y, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint64x8 low, high; + v_zip(x, y, low, high); + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, low.val); + _mm512_stream_si512((__m512i*)(ptr + 8), high.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, low.val); + _mm512_store_si512((__m512i*)(ptr + 8), high.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, low.val); + _mm512_storeu_si512((__m512i*)(ptr + 8), high.val); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x64& a, const v_uint8x64& b, const v_uint8x64& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ +#if CV_AVX_512VBMI + __m512i mask0 = _v512_set_epu8(127, 84, 20, 126, 83, 19, 125, 82, 18, 124, 81, 17, 123, 80, 16, 122, + 79, 15, 121, 78, 14, 120, 77, 13, 119, 76, 12, 118, 75, 11, 117, 74, + 10, 116, 73, 9, 115, 72, 8, 114, 71, 7, 113, 70, 6, 112, 69, 5, + 111, 68, 4, 110, 67, 3, 109, 66, 2, 108, 65, 1, 107, 64, 0, 106); + __m512i mask1 = _v512_set_epu8( 21, 42, 105, 20, 41, 104, 19, 40, 103, 18, 39, 102, 17, 38, 101, 16, + 37, 100, 15, 36, 99, 14, 35, 98, 13, 34, 97, 12, 33, 96, 11, 32, + 95, 10, 31, 94, 9, 30, 93, 8, 29, 92, 7, 28, 91, 6, 27, 90, + 5, 26, 89, 4, 25, 88, 3, 24, 87, 2, 23, 86, 1, 22, 85, 0); + __m512i mask2 = _v512_set_epu8(106, 127, 63, 105, 126, 62, 104, 125, 61, 103, 124, 60, 102, 123, 59, 101, + 122, 58, 100, 121, 57, 99, 120, 56, 98, 119, 55, 97, 118, 54, 96, 117, + 53, 95, 116, 52, 94, 115, 51, 93, 114, 50, 92, 113, 49, 91, 112, 48, + 90, 111, 47, 89, 110, 46, 88, 109, 45, 87, 108, 44, 86, 107, 43, 85); + __m512i r2g0r0 = _mm512_permutex2var_epi8(b.val, mask0, c.val); + __m512i b0r1b1 = _mm512_permutex2var_epi8(a.val, mask1, c.val); + __m512i g1b2g2 = _mm512_permutex2var_epi8(a.val, mask2, b.val); + + __m512i bgr0 = _mm512_mask_blend_epi8(0x9249249249249249, r2g0r0, b0r1b1); + __m512i bgr1 = _mm512_mask_blend_epi8(0x9249249249249249, b0r1b1, g1b2g2); + __m512i bgr2 = _mm512_mask_blend_epi8(0x9249249249249249, g1b2g2, r2g0r0); +#else + __m512i g1g0 = _mm512_shuffle_epi8(b.val, _mm512_set4_epi32(0x0e0f0c0d, 0x0a0b0809, 0x06070405, 0x02030001)); + __m512i b0g0 = _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, a.val, g1g0); + __m512i r0b1 = _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, c.val, a.val); + __m512i g1r1 = _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, g1g0, c.val); + + __m512i mask0 = _v512_set_epu16(42, 10, 31, 41, 9, 30, 40, 8, 29, 39, 7, 28, 38, 6, 27, 37, + 5, 26, 36, 4, 25, 35, 3, 24, 34, 2, 23, 33, 1, 22, 32, 0); + __m512i mask1 = _v512_set_epu16(21, 52, 41, 20, 51, 40, 19, 50, 39, 18, 49, 38, 17, 48, 37, 16, + 47, 36, 15, 46, 35, 14, 45, 34, 13, 44, 33, 12, 43, 32, 11, 42); + __m512i mask2 = _v512_set_epu16(63, 31, 20, 62, 30, 19, 61, 29, 18, 60, 28, 17, 59, 27, 16, 58, + 26, 15, 57, 25, 14, 56, 24, 13, 55, 23, 12, 54, 22, 11, 53, 21); + __m512i b0g0b2 = _mm512_permutex2var_epi16(b0g0, mask0, r0b1); + __m512i r1b1r0 = _mm512_permutex2var_epi16(b0g0, mask1, g1r1); + __m512i g2r2g1 = _mm512_permutex2var_epi16(r0b1, mask2, g1r1); + + __m512i bgr0 = _mm512_mask_blend_epi16(0x24924924, b0g0b2, r1b1r0); + __m512i bgr1 = _mm512_mask_blend_epi16(0x24924924, r1b1r0, g2r2g1); + __m512i bgr2 = _mm512_mask_blend_epi16(0x24924924, g2r2g1, b0g0b2); +#endif + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgr0); + _mm512_stream_si512((__m512i*)(ptr + 64), bgr1); + _mm512_stream_si512((__m512i*)(ptr + 128), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgr0); + _mm512_store_si512((__m512i*)(ptr + 64), bgr1); + _mm512_store_si512((__m512i*)(ptr + 128), bgr2); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgr0); + _mm512_storeu_si512((__m512i*)(ptr + 64), bgr1); + _mm512_storeu_si512((__m512i*)(ptr + 128), bgr2); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x32& a, const v_uint16x32& b, const v_uint16x32& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m512i mask0 = _v512_set_epu16(42, 10, 31, 41, 9, 30, 40, 8, 29, 39, 7, 28, 38, 6, 27, 37, + 5, 26, 36, 4, 25, 35, 3, 24, 34, 2, 23, 33, 1, 22, 32, 0); + __m512i mask1 = _v512_set_epu16(21, 52, 41, 20, 51, 40, 19, 50, 39, 18, 49, 38, 17, 48, 37, 16, + 47, 36, 15, 46, 35, 14, 45, 34, 13, 44, 33, 12, 43, 32, 11, 42); + __m512i mask2 = _v512_set_epu16(63, 31, 20, 62, 30, 19, 61, 29, 18, 60, 28, 17, 59, 27, 16, 58, + 26, 15, 57, 25, 14, 56, 24, 13, 55, 23, 12, 54, 22, 11, 53, 21); + __m512i b0g0b2 = _mm512_permutex2var_epi16(a.val, mask0, b.val); + __m512i r1b1r0 = _mm512_permutex2var_epi16(a.val, mask1, c.val); + __m512i g2r2g1 = _mm512_permutex2var_epi16(b.val, mask2, c.val); + + __m512i bgr0 = _mm512_mask_blend_epi16(0x24924924, b0g0b2, r1b1r0); + __m512i bgr1 = _mm512_mask_blend_epi16(0x24924924, r1b1r0, g2r2g1); + __m512i bgr2 = _mm512_mask_blend_epi16(0x24924924, g2r2g1, b0g0b2); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgr0); + _mm512_stream_si512((__m512i*)(ptr + 32), bgr1); + _mm512_stream_si512((__m512i*)(ptr + 64), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgr0); + _mm512_store_si512((__m512i*)(ptr + 32), bgr1); + _mm512_store_si512((__m512i*)(ptr + 64), bgr2); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgr0); + _mm512_storeu_si512((__m512i*)(ptr + 32), bgr1); + _mm512_storeu_si512((__m512i*)(ptr + 64), bgr2); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x16& a, const v_uint32x16& b, const v_uint32x16& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m512i mask0 = _v512_set_epu32(26, 31, 15, 25, 30, 14, 24, 29, 13, 23, 28, 12, 22, 27, 11, 21); + __m512i mask1 = _v512_set_epu32(31, 10, 25, 30, 9, 24, 29, 8, 23, 28, 7, 22, 27, 6, 21, 26); + __m512i g1b2g2 = _mm512_permutex2var_epi32(a.val, mask0, b.val); + __m512i r2r1b1 = _mm512_permutex2var_epi32(a.val, mask1, c.val); + + __m512i bgr0 = _mm512_mask_expand_epi32(_mm512_mask_expand_epi32(_mm512_maskz_expand_epi32(0x9249, a.val), 0x2492, b.val), 0x4924, c.val); + __m512i bgr1 = _mm512_mask_blend_epi32(0x9249, r2r1b1, g1b2g2); + __m512i bgr2 = _mm512_mask_blend_epi32(0x9249, g1b2g2, r2r1b1); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgr0); + _mm512_stream_si512((__m512i*)(ptr + 16), bgr1); + _mm512_stream_si512((__m512i*)(ptr + 32), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgr0); + _mm512_store_si512((__m512i*)(ptr + 16), bgr1); + _mm512_store_si512((__m512i*)(ptr + 32), bgr2); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgr0); + _mm512_storeu_si512((__m512i*)(ptr + 16), bgr1); + _mm512_storeu_si512((__m512i*)(ptr + 32), bgr2); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x8& a, const v_uint64x8& b, const v_uint64x8& c, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + __m512i mask0 = _v512_set_epu64( 5, 12, 7, 4, 11, 6, 3, 10); + __m512i mask1 = _v512_set_epu64(15, 7, 4, 14, 6, 3, 13, 5); + __m512i r1b1b2 = _mm512_permutex2var_epi64(a.val, mask0, c.val); + __m512i g2r2g1 = _mm512_permutex2var_epi64(b.val, mask1, c.val); + + __m512i bgr0 = _mm512_mask_expand_epi64(_mm512_mask_expand_epi64(_mm512_maskz_expand_epi64(0x49, a.val), 0x92, b.val), 0x24, c.val); + __m512i bgr1 = _mm512_mask_blend_epi64(0xdb, g2r2g1, r1b1b2); + __m512i bgr2 = _mm512_mask_blend_epi64(0xdb, r1b1b2, g2r2g1); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgr0); + _mm512_stream_si512((__m512i*)(ptr + 8), bgr1); + _mm512_stream_si512((__m512i*)(ptr + 16), bgr2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgr0); + _mm512_store_si512((__m512i*)(ptr + 8), bgr1); + _mm512_store_si512((__m512i*)(ptr + 16), bgr2); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgr0); + _mm512_storeu_si512((__m512i*)(ptr + 8), bgr1); + _mm512_storeu_si512((__m512i*)(ptr + 16), bgr2); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x64& a, const v_uint8x64& b, + const v_uint8x64& c, const v_uint8x64& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint8x64 br01, br23, ga01, ga23; + v_zip(a, c, br01, br23); + v_zip(b, d, ga01, ga23); + v_uint8x64 bgra0, bgra1, bgra2, bgra3; + v_zip(br01, ga01, bgra0, bgra1); + v_zip(br23, ga23, bgra2, bgra3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgra0.val); + _mm512_stream_si512((__m512i*)(ptr + 64), bgra1.val); + _mm512_stream_si512((__m512i*)(ptr + 128), bgra2.val); + _mm512_stream_si512((__m512i*)(ptr + 192), bgra3.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgra0.val); + _mm512_store_si512((__m512i*)(ptr + 64), bgra1.val); + _mm512_store_si512((__m512i*)(ptr + 128), bgra2.val); + _mm512_store_si512((__m512i*)(ptr + 192), bgra3.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgra0.val); + _mm512_storeu_si512((__m512i*)(ptr + 64), bgra1.val); + _mm512_storeu_si512((__m512i*)(ptr + 128), bgra2.val); + _mm512_storeu_si512((__m512i*)(ptr + 192), bgra3.val); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x32& a, const v_uint16x32& b, + const v_uint16x32& c, const v_uint16x32& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint16x32 br01, br23, ga01, ga23; + v_zip(a, c, br01, br23); + v_zip(b, d, ga01, ga23); + v_uint16x32 bgra0, bgra1, bgra2, bgra3; + v_zip(br01, ga01, bgra0, bgra1); + v_zip(br23, ga23, bgra2, bgra3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgra0.val); + _mm512_stream_si512((__m512i*)(ptr + 32), bgra1.val); + _mm512_stream_si512((__m512i*)(ptr + 64), bgra2.val); + _mm512_stream_si512((__m512i*)(ptr + 96), bgra3.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgra0.val); + _mm512_store_si512((__m512i*)(ptr + 32), bgra1.val); + _mm512_store_si512((__m512i*)(ptr + 64), bgra2.val); + _mm512_store_si512((__m512i*)(ptr + 96), bgra3.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgra0.val); + _mm512_storeu_si512((__m512i*)(ptr + 32), bgra1.val); + _mm512_storeu_si512((__m512i*)(ptr + 64), bgra2.val); + _mm512_storeu_si512((__m512i*)(ptr + 96), bgra3.val); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x16& a, const v_uint32x16& b, + const v_uint32x16& c, const v_uint32x16& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint32x16 br01, br23, ga01, ga23; + v_zip(a, c, br01, br23); + v_zip(b, d, ga01, ga23); + v_uint32x16 bgra0, bgra1, bgra2, bgra3; + v_zip(br01, ga01, bgra0, bgra1); + v_zip(br23, ga23, bgra2, bgra3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgra0.val); + _mm512_stream_si512((__m512i*)(ptr + 16), bgra1.val); + _mm512_stream_si512((__m512i*)(ptr + 32), bgra2.val); + _mm512_stream_si512((__m512i*)(ptr + 48), bgra3.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgra0.val); + _mm512_store_si512((__m512i*)(ptr + 16), bgra1.val); + _mm512_store_si512((__m512i*)(ptr + 32), bgra2.val); + _mm512_store_si512((__m512i*)(ptr + 48), bgra3.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgra0.val); + _mm512_storeu_si512((__m512i*)(ptr + 16), bgra1.val); + _mm512_storeu_si512((__m512i*)(ptr + 32), bgra2.val); + _mm512_storeu_si512((__m512i*)(ptr + 48), bgra3.val); + } +} + +inline void v_store_interleave( uint64* ptr, const v_uint64x8& a, const v_uint64x8& b, + const v_uint64x8& c, const v_uint64x8& d, + hal::StoreMode mode=hal::STORE_UNALIGNED ) +{ + v_uint64x8 br01, br23, ga01, ga23; + v_zip(a, c, br01, br23); + v_zip(b, d, ga01, ga23); + v_uint64x8 bgra0, bgra1, bgra2, bgra3; + v_zip(br01, ga01, bgra0, bgra1); + v_zip(br23, ga23, bgra2, bgra3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm512_stream_si512((__m512i*)ptr, bgra0.val); + _mm512_stream_si512((__m512i*)(ptr + 8), bgra1.val); + _mm512_stream_si512((__m512i*)(ptr + 16), bgra2.val); + _mm512_stream_si512((__m512i*)(ptr + 24), bgra3.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm512_store_si512((__m512i*)ptr, bgra0.val); + _mm512_store_si512((__m512i*)(ptr + 8), bgra1.val); + _mm512_store_si512((__m512i*)(ptr + 16), bgra2.val); + _mm512_store_si512((__m512i*)(ptr + 24), bgra3.val); + } + else + { + _mm512_storeu_si512((__m512i*)ptr, bgra0.val); + _mm512_storeu_si512((__m512i*)(ptr + 8), bgra1.val); + _mm512_storeu_si512((__m512i*)(ptr + 16), bgra2.val); + _mm512_storeu_si512((__m512i*)(ptr + 24), bgra3.val); + } +} + +#define OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(_Tpvec0, _Tp0, suffix0, _Tpvec1, _Tp1, suffix1) \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0 ) \ +{ \ + _Tpvec1 a1, b1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0 ) \ +{ \ + _Tpvec1 a1, b1, c1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0, _Tpvec0& d0 ) \ +{ \ + _Tpvec1 a1, b1, c1, d1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1, d1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ + d0 = v_reinterpret_as_##suffix0(d1); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, const _Tpvec0& c0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, const _Tpvec0& d0, \ + hal::StoreMode mode=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + _Tpvec1 d1 = v_reinterpret_as_##suffix1(d0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, d1, mode); \ +} + +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_int8x64, schar, s8, v_uint8x64, uchar, u8) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_int16x32, short, s16, v_uint16x32, ushort, u16) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_int32x16, int, s32, v_uint32x16, unsigned, u32) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_float32x16, float, f32, v_uint32x16, unsigned, u32) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_int64x8, int64, s64, v_uint64x8, uint64, u64) +OPENCV_HAL_IMPL_AVX512_LOADSTORE_INTERLEAVE(v_float64x8, double, f64, v_uint64x8, uint64, u64) + +////////// Mask and checks ///////// + +/** Mask **/ +inline int64 v_signmask(const v_int8x64& a) { return (int64)_mm512_movepi8_mask(a.val); } +inline int v_signmask(const v_int16x32& a) { return (int)_mm512_cmp_epi16_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } +inline int v_signmask(const v_int32x16& a) { return (int)_mm512_cmp_epi32_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } +inline int v_signmask(const v_int64x8& a) { return (int)_mm512_cmp_epi64_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } + +inline int64 v_signmask(const v_uint8x64& a) { return v_signmask(v_reinterpret_as_s8(a)); } +inline int v_signmask(const v_uint16x32& a) { return v_signmask(v_reinterpret_as_s16(a)); } +inline int v_signmask(const v_uint32x16& a) { return v_signmask(v_reinterpret_as_s32(a)); } +inline int v_signmask(const v_uint64x8& a) { return v_signmask(v_reinterpret_as_s64(a)); } +inline int v_signmask(const v_float32x16& a) { return v_signmask(v_reinterpret_as_s32(a)); } +inline int v_signmask(const v_float64x8& a) { return v_signmask(v_reinterpret_as_s64(a)); } + +/** Checks **/ +inline bool v_check_all(const v_int8x64& a) { return !(bool)_mm512_cmp_epi8_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_NLT); } +inline bool v_check_any(const v_int8x64& a) { return (bool)_mm512_movepi8_mask(a.val); } +inline bool v_check_all(const v_int16x32& a) { return !(bool)_mm512_cmp_epi16_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_NLT); } +inline bool v_check_any(const v_int16x32& a) { return (bool)_mm512_cmp_epi16_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } +inline bool v_check_all(const v_int32x16& a) { return !(bool)_mm512_cmp_epi32_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_NLT); } +inline bool v_check_any(const v_int32x16& a) { return (bool)_mm512_cmp_epi32_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } +inline bool v_check_all(const v_int64x8& a) { return !(bool)_mm512_cmp_epi64_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_NLT); } +inline bool v_check_any(const v_int64x8& a) { return (bool)_mm512_cmp_epi64_mask(a.val, _mm512_setzero_si512(), _MM_CMPINT_LT); } + +inline bool v_check_all(const v_float32x16& a) { return v_check_all(v_reinterpret_as_s32(a)); } +inline bool v_check_any(const v_float32x16& a) { return v_check_any(v_reinterpret_as_s32(a)); } +inline bool v_check_all(const v_float64x8& a) { return v_check_all(v_reinterpret_as_s64(a)); } +inline bool v_check_any(const v_float64x8& a) { return v_check_any(v_reinterpret_as_s64(a)); } +inline bool v_check_all(const v_uint8x64& a) { return v_check_all(v_reinterpret_as_s8(a)); } +inline bool v_check_all(const v_uint16x32& a) { return v_check_all(v_reinterpret_as_s16(a)); } +inline bool v_check_all(const v_uint32x16& a) { return v_check_all(v_reinterpret_as_s32(a)); } +inline bool v_check_all(const v_uint64x8& a) { return v_check_all(v_reinterpret_as_s64(a)); } +inline bool v_check_any(const v_uint8x64& a) { return v_check_any(v_reinterpret_as_s8(a)); } +inline bool v_check_any(const v_uint16x32& a) { return v_check_any(v_reinterpret_as_s16(a)); } +inline bool v_check_any(const v_uint32x16& a) { return v_check_any(v_reinterpret_as_s32(a)); } +inline bool v_check_any(const v_uint64x8& a) { return v_check_any(v_reinterpret_as_s64(a)); } + +inline int v_scan_forward(const v_int8x64& a) +{ + int64 mask = _mm512_movepi8_mask(a.val); + int mask32 = (int)mask; + return mask != 0 ? mask32 != 0 ? trailingZeros32(mask32) : 32 + trailingZeros32((int)(mask >> 32)) : 0; +} +inline int v_scan_forward(const v_uint8x64& a) { return v_scan_forward(v_reinterpret_as_s8(a)); } +inline int v_scan_forward(const v_int16x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))); } +inline int v_scan_forward(const v_uint16x32& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))); } +inline int v_scan_forward(const v_int32x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 2; } +inline int v_scan_forward(const v_uint32x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 2; } +inline int v_scan_forward(const v_float32x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 2; } +inline int v_scan_forward(const v_int64x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 4; } +inline int v_scan_forward(const v_uint64x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 4; } +inline int v_scan_forward(const v_float64x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s16(a))) / 4; } + +inline void v512_cleanup() { _mm256_zeroall(); } + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} // cv:: + +#endif // OPENCV_HAL_INTRIN_AVX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_cpp.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_cpp.hpp new file mode 100644 index 00000000..46222140 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_cpp.hpp @@ -0,0 +1,3320 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_INTRIN_CPP_HPP +#define OPENCV_HAL_INTRIN_CPP_HPP + +#include +#include +#include +#include "opencv2/core/saturate.hpp" + +//! @cond IGNORED +#define CV_SIMD128_CPP 1 +#if defined(CV_FORCE_SIMD128_CPP) +#define CV_SIMD128 1 +#define CV_SIMD128_64F 1 +#endif +#if defined(CV_DOXYGEN) +#define CV_SIMD128 1 +#define CV_SIMD128_64F 1 +#define CV_SIMD256 1 +#define CV_SIMD256_64F 1 +#define CV_SIMD512 1 +#define CV_SIMD512_64F 1 +#else +#define CV_SIMD256 0 // Explicitly disable SIMD256 and SIMD512 support for scalar intrinsic implementation +#define CV_SIMD512 0 // to avoid warnings during compilation +#endif +//! @endcond + +namespace cv +{ + +#ifndef CV_DOXYGEN +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN +#endif + +/** @addtogroup core_hal_intrin + +"Universal intrinsics" is a types and functions set intended to simplify vectorization of code on +different platforms. Currently a few different SIMD extensions on different architectures are supported. +128 bit registers of various types support is implemented for a wide range of architectures +including x86(__SSE/SSE2/SSE4.2__), ARM(__NEON__), PowerPC(__VSX__), MIPS(__MSA__). +256 bit long registers are supported on x86(__AVX2__) and 512 bit long registers are supported on x86(__AVX512__). +In case when there is no SIMD extension available during compilation, fallback C++ implementation of intrinsics +will be chosen and code will work as expected although it could be slower. + +### Types + +There are several types representing packed values vector registers, each type is +implemented as a structure based on a one SIMD register. + +- cv::v_uint8 and cv::v_int8: 8-bit integer values (unsigned/signed) - char +- cv::v_uint16 and cv::v_int16: 16-bit integer values (unsigned/signed) - short +- cv::v_uint32 and cv::v_int32: 32-bit integer values (unsigned/signed) - int +- cv::v_uint64 and cv::v_int64: 64-bit integer values (unsigned/signed) - int64 +- cv::v_float32: 32-bit floating point values (signed) - float +- cv::v_float64: 64-bit floating point values (signed) - double + +Exact bit length(and value quantity) of listed types is compile time deduced and depends on architecture SIMD +capabilities chosen as available during compilation of the library. All the types contains __nlanes__ enumeration +to check for exact value quantity of the type. + +In case the exact bit length of the type is important it is possible to use specific fixed length register types. + +There are several types representing 128-bit registers. + +- cv::v_uint8x16 and cv::v_int8x16: sixteen 8-bit integer values (unsigned/signed) - char +- cv::v_uint16x8 and cv::v_int16x8: eight 16-bit integer values (unsigned/signed) - short +- cv::v_uint32x4 and cv::v_int32x4: four 32-bit integer values (unsigned/signed) - int +- cv::v_uint64x2 and cv::v_int64x2: two 64-bit integer values (unsigned/signed) - int64 +- cv::v_float32x4: four 32-bit floating point values (signed) - float +- cv::v_float64x2: two 64-bit floating point values (signed) - double + +There are several types representing 256-bit registers. + +- cv::v_uint8x32 and cv::v_int8x32: thirty two 8-bit integer values (unsigned/signed) - char +- cv::v_uint16x16 and cv::v_int16x16: sixteen 16-bit integer values (unsigned/signed) - short +- cv::v_uint32x8 and cv::v_int32x8: eight 32-bit integer values (unsigned/signed) - int +- cv::v_uint64x4 and cv::v_int64x4: four 64-bit integer values (unsigned/signed) - int64 +- cv::v_float32x8: eight 32-bit floating point values (signed) - float +- cv::v_float64x4: four 64-bit floating point values (signed) - double + +@note +256 bit registers at the moment implemented for AVX2 SIMD extension only, if you want to use this type directly, +don't forget to check the CV_SIMD256 preprocessor definition: +@code +#if CV_SIMD256 +//... +#endif +@endcode + +There are several types representing 512-bit registers. + +- cv::v_uint8x64 and cv::v_int8x64: sixty four 8-bit integer values (unsigned/signed) - char +- cv::v_uint16x32 and cv::v_int16x32: thirty two 16-bit integer values (unsigned/signed) - short +- cv::v_uint32x16 and cv::v_int32x16: sixteen 32-bit integer values (unsigned/signed) - int +- cv::v_uint64x8 and cv::v_int64x8: eight 64-bit integer values (unsigned/signed) - int64 +- cv::v_float32x16: sixteen 32-bit floating point values (signed) - float +- cv::v_float64x8: eight 64-bit floating point values (signed) - double +@note +512 bit registers at the moment implemented for AVX512 SIMD extension only, if you want to use this type directly, +don't forget to check the CV_SIMD512 preprocessor definition. + +@note +cv::v_float64x2 is not implemented in NEON variant, if you want to use this type, don't forget to +check the CV_SIMD128_64F preprocessor definition. + +### Load and store operations + +These operations allow to set contents of the register explicitly or by loading it from some memory +block and to save contents of the register to memory block. + +There are variable size register load operations that provide result of maximum available size +depending on chosen platform capabilities. +- Constructors: +@ref v_reg::v_reg(const _Tp *ptr) "from memory", +- Other create methods: +vx_setall_s8, vx_setall_u8, ..., +vx_setzero_u8, vx_setzero_s8, ... +- Memory load operations: +vx_load, vx_load_aligned, vx_load_low, vx_load_halves, +- Memory operations with expansion of values: +vx_load_expand, vx_load_expand_q + +Also there are fixed size register load/store operations. + +For 128 bit registers +- Constructors: +@ref v_reg::v_reg(const _Tp *ptr) "from memory", +@ref v_reg::v_reg(_Tp s0, _Tp s1) "from two values", ... +- Other create methods: +@ref v_setall_s8, @ref v_setall_u8, ..., +@ref v_setzero_u8, @ref v_setzero_s8, ... +- Memory load operations: +@ref v_load, @ref v_load_aligned, @ref v_load_low, @ref v_load_halves, +- Memory operations with expansion of values: +@ref v_load_expand, @ref v_load_expand_q + +For 256 bit registers(check CV_SIMD256 preprocessor definition) +- Constructors: +@ref v_reg::v_reg(const _Tp *ptr) "from memory", +@ref v_reg::v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3) "from four values", ... +- Other create methods: +@ref v256_setall_s8, @ref v256_setall_u8, ..., +@ref v256_setzero_u8, @ref v256_setzero_s8, ... +- Memory load operations: +@ref v256_load, @ref v256_load_aligned, @ref v256_load_low, @ref v256_load_halves, +- Memory operations with expansion of values: +@ref v256_load_expand, @ref v256_load_expand_q + +For 512 bit registers(check CV_SIMD512 preprocessor definition) +- Constructors: +@ref v_reg::v_reg(const _Tp *ptr) "from memory", +@ref v_reg::v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3, _Tp s4, _Tp s5, _Tp s6, _Tp s7) "from eight values", ... +- Other create methods: +@ref v512_setall_s8, @ref v512_setall_u8, ..., +@ref v512_setzero_u8, @ref v512_setzero_s8, ... +- Memory load operations: +@ref v512_load, @ref v512_load_aligned, @ref v512_load_low, @ref v512_load_halves, +- Memory operations with expansion of values: +@ref v512_load_expand, @ref v512_load_expand_q + +Store to memory operations are similar across different platform capabilities: +@ref v_store, @ref v_store_aligned, +@ref v_store_high, @ref v_store_low + +### Value reordering + +These operations allow to reorder or recombine elements in one or multiple vectors. + +- Interleave, deinterleave (2, 3 and 4 channels): @ref v_load_deinterleave, @ref v_store_interleave +- Expand: @ref v_expand, @ref v_expand_low, @ref v_expand_high +- Pack: @ref v_pack, @ref v_pack_u, @ref v_pack_b, @ref v_rshr_pack, @ref v_rshr_pack_u, +@ref v_pack_store, @ref v_pack_u_store, @ref v_rshr_pack_store, @ref v_rshr_pack_u_store +- Recombine: @ref v_zip, @ref v_recombine, @ref v_combine_low, @ref v_combine_high +- Reverse: @ref v_reverse +- Extract: @ref v_extract + + +### Arithmetic, bitwise and comparison operations + +Element-wise binary and unary operations. + +- Arithmetics: +@ref operator +(const v_reg &a, const v_reg &b) "+", +@ref operator -(const v_reg &a, const v_reg &b) "-", +@ref operator *(const v_reg &a, const v_reg &b) "*", +@ref operator /(const v_reg &a, const v_reg &b) "/", +@ref v_mul_expand + +- Non-saturating arithmetics: @ref v_add_wrap, @ref v_sub_wrap + +- Bitwise shifts: +@ref operator <<(const v_reg &a, int s) "<<", +@ref operator >>(const v_reg &a, int s) ">>", +@ref v_shl, @ref v_shr + +- Bitwise logic: +@ref operator &(const v_reg &a, const v_reg &b) "&", +@ref operator |(const v_reg &a, const v_reg &b) "|", +@ref operator ^(const v_reg &a, const v_reg &b) "^", +@ref operator ~(const v_reg &a) "~" + +- Comparison: +@ref operator >(const v_reg &a, const v_reg &b) ">", +@ref operator >=(const v_reg &a, const v_reg &b) ">=", +@ref operator <(const v_reg &a, const v_reg &b) "<", +@ref operator <=(const v_reg &a, const v_reg &b) "<=", +@ref operator ==(const v_reg &a, const v_reg &b) "==", +@ref operator !=(const v_reg &a, const v_reg &b) "!=" + +- min/max: @ref v_min, @ref v_max + +### Reduce and mask + +Most of these operations return only one value. + +- Reduce: @ref v_reduce_min, @ref v_reduce_max, @ref v_reduce_sum, @ref v_popcount +- Mask: @ref v_signmask, @ref v_check_all, @ref v_check_any, @ref v_select + +### Other math + +- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude +- Absolute values: @ref v_abs, @ref v_absdiff, @ref v_absdiffs + +### Conversions + +Different type conversions and casts: + +- Rounding: @ref v_round, @ref v_floor, @ref v_ceil, @ref v_trunc, +- To float: @ref v_cvt_f32, @ref v_cvt_f64 +- Reinterpret: @ref v_reinterpret_as_u8, @ref v_reinterpret_as_s8, ... + +### Matrix operations + +In these operations vectors represent matrix rows/columns: @ref v_dotprod, @ref v_dotprod_fast, +@ref v_dotprod_expand, @ref v_dotprod_expand_fast, @ref v_matmul, @ref v_transpose4x4 + +### Usability + +Most operations are implemented only for some subset of the available types, following matrices +shows the applicability of different operations to the types. + +Regular integers: + +| Operations\\Types | uint 8 | int 8 | uint 16 | int 16 | uint 32 | int 32 | +|-------------------|:-:|:-:|:-:|:-:|:-:|:-:| +|load, store | x | x | x | x | x | x | +|interleave | x | x | x | x | x | x | +|expand | x | x | x | x | x | x | +|expand_low | x | x | x | x | x | x | +|expand_high | x | x | x | x | x | x | +|expand_q | x | x | | | | | +|add, sub | x | x | x | x | x | x | +|add_wrap, sub_wrap | x | x | x | x | | | +|mul_wrap | x | x | x | x | | | +|mul | x | x | x | x | x | x | +|mul_expand | x | x | x | x | x | | +|compare | x | x | x | x | x | x | +|shift | | | x | x | x | x | +|dotprod | | | | x | | x | +|dotprod_fast | | | | x | | x | +|dotprod_expand | x | x | x | x | | x | +|dotprod_expand_fast| x | x | x | x | | x | +|logical | x | x | x | x | x | x | +|min, max | x | x | x | x | x | x | +|absdiff | x | x | x | x | x | x | +|absdiffs | | x | | x | | | +|reduce | x | x | x | x | x | x | +|mask | x | x | x | x | x | x | +|pack | x | x | x | x | x | x | +|pack_u | x | | x | | | | +|pack_b | x | | | | | | +|unpack | x | x | x | x | x | x | +|extract | x | x | x | x | x | x | +|rotate (lanes) | x | x | x | x | x | x | +|cvt_flt32 | | | | | | x | +|cvt_flt64 | | | | | | x | +|transpose4x4 | | | | | x | x | +|reverse | x | x | x | x | x | x | +|extract_n | x | x | x | x | x | x | +|broadcast_element | | | | | x | x | + +Big integers: + +| Operations\\Types | uint 64 | int 64 | +|-------------------|:-:|:-:| +|load, store | x | x | +|add, sub | x | x | +|shift | x | x | +|logical | x | x | +|reverse | x | x | +|extract | x | x | +|rotate (lanes) | x | x | +|cvt_flt64 | | x | +|extract_n | x | x | + +Floating point: + +| Operations\\Types | float 32 | float 64 | +|-------------------|:-:|:-:| +|load, store | x | x | +|interleave | x | | +|add, sub | x | x | +|mul | x | x | +|div | x | x | +|compare | x | x | +|min, max | x | x | +|absdiff | x | x | +|reduce | x | | +|mask | x | x | +|unpack | x | x | +|cvt_flt32 | | x | +|cvt_flt64 | x | | +|sqrt, abs | x | x | +|float math | x | x | +|transpose4x4 | x | | +|extract | x | x | +|rotate (lanes) | x | x | +|reverse | x | x | +|extract_n | x | x | +|broadcast_element | x | | + + @{ */ + +template struct v_reg +{ +//! @cond IGNORED + typedef _Tp lane_type; + enum { nlanes = n }; +// !@endcond + + /** @brief Constructor + + Initializes register with data from memory + @param ptr pointer to memory block with data for register */ + explicit v_reg(const _Tp* ptr) { for( int i = 0; i < n; i++ ) s[i] = ptr[i]; } + + /** @brief Constructor + + Initializes register with two 64-bit values */ + v_reg(_Tp s0, _Tp s1) { s[0] = s0; s[1] = s1; } + + /** @brief Constructor + + Initializes register with four 32-bit values */ + v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3) { s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3; } + + /** @brief Constructor + + Initializes register with eight 16-bit values */ + v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3, + _Tp s4, _Tp s5, _Tp s6, _Tp s7) + { + s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3; + s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7; + } + + /** @brief Constructor + + Initializes register with sixteen 8-bit values */ + v_reg(_Tp s0, _Tp s1, _Tp s2, _Tp s3, + _Tp s4, _Tp s5, _Tp s6, _Tp s7, + _Tp s8, _Tp s9, _Tp s10, _Tp s11, + _Tp s12, _Tp s13, _Tp s14, _Tp s15) + { + s[0] = s0; s[1] = s1; s[2] = s2; s[3] = s3; + s[4] = s4; s[5] = s5; s[6] = s6; s[7] = s7; + s[8] = s8; s[9] = s9; s[10] = s10; s[11] = s11; + s[12] = s12; s[13] = s13; s[14] = s14; s[15] = s15; + } + + /** @brief Default constructor + + Does not initialize anything*/ + v_reg() {} + + /** @brief Copy constructor */ + v_reg(const v_reg<_Tp, n> & r) + { + for( int i = 0; i < n; i++ ) + s[i] = r.s[i]; + } + /** @brief Access first value + + Returns value of the first lane according to register type, for example: + @code{.cpp} + v_int32x4 r(1, 2, 3, 4); + int v = r.get0(); // returns 1 + v_uint64x2 r(1, 2); + uint64_t v = r.get0(); // returns 1 + @endcode + */ + _Tp get0() const { return s[0]; } + +//! @cond IGNORED + _Tp get(const int i) const { return s[i]; } + v_reg<_Tp, n> high() const + { + v_reg<_Tp, n> c; + int i; + for( i = 0; i < n/2; i++ ) + { + c.s[i] = s[i+(n/2)]; + c.s[i+(n/2)] = 0; + } + return c; + } + + static v_reg<_Tp, n> zero() + { + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = (_Tp)0; + return c; + } + + static v_reg<_Tp, n> all(_Tp s) + { + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = s; + return c; + } + + template v_reg<_Tp2, n2> reinterpret_as() const + { + size_t bytes = std::min(sizeof(_Tp2)*n2, sizeof(_Tp)*n); + v_reg<_Tp2, n2> c; + std::memcpy(&c.s[0], &s[0], bytes); + return c; + } + + v_reg& operator=(const v_reg<_Tp, n> & r) + { + for( int i = 0; i < n; i++ ) + s[i] = r.s[i]; + return *this; + } + + _Tp s[n]; +//! @endcond +}; + +/** @brief Sixteen 8-bit unsigned integer values */ +typedef v_reg v_uint8x16; +/** @brief Sixteen 8-bit signed integer values */ +typedef v_reg v_int8x16; +/** @brief Eight 16-bit unsigned integer values */ +typedef v_reg v_uint16x8; +/** @brief Eight 16-bit signed integer values */ +typedef v_reg v_int16x8; +/** @brief Four 32-bit unsigned integer values */ +typedef v_reg v_uint32x4; +/** @brief Four 32-bit signed integer values */ +typedef v_reg v_int32x4; +/** @brief Four 32-bit floating point values (single precision) */ +typedef v_reg v_float32x4; +/** @brief Two 64-bit floating point values (double precision) */ +typedef v_reg v_float64x2; +/** @brief Two 64-bit unsigned integer values */ +typedef v_reg v_uint64x2; +/** @brief Two 64-bit signed integer values */ +typedef v_reg v_int64x2; + +#if CV_SIMD256 +/** @brief Thirty two 8-bit unsigned integer values */ +typedef v_reg v_uint8x32; +/** @brief Thirty two 8-bit signed integer values */ +typedef v_reg v_int8x32; +/** @brief Sixteen 16-bit unsigned integer values */ +typedef v_reg v_uint16x16; +/** @brief Sixteen 16-bit signed integer values */ +typedef v_reg v_int16x16; +/** @brief Eight 32-bit unsigned integer values */ +typedef v_reg v_uint32x8; +/** @brief Eight 32-bit signed integer values */ +typedef v_reg v_int32x8; +/** @brief Eight 32-bit floating point values (single precision) */ +typedef v_reg v_float32x8; +/** @brief Four 64-bit floating point values (double precision) */ +typedef v_reg v_float64x4; +/** @brief Four 64-bit unsigned integer values */ +typedef v_reg v_uint64x4; +/** @brief Four 64-bit signed integer values */ +typedef v_reg v_int64x4; +#endif + +#if CV_SIMD512 +/** @brief Sixty four 8-bit unsigned integer values */ +typedef v_reg v_uint8x64; +/** @brief Sixty four 8-bit signed integer values */ +typedef v_reg v_int8x64; +/** @brief Thirty two 16-bit unsigned integer values */ +typedef v_reg v_uint16x32; +/** @brief Thirty two 16-bit signed integer values */ +typedef v_reg v_int16x32; +/** @brief Sixteen 32-bit unsigned integer values */ +typedef v_reg v_uint32x16; +/** @brief Sixteen 32-bit signed integer values */ +typedef v_reg v_int32x16; +/** @brief Sixteen 32-bit floating point values (single precision) */ +typedef v_reg v_float32x16; +/** @brief Eight 64-bit floating point values (double precision) */ +typedef v_reg v_float64x8; +/** @brief Eight 64-bit unsigned integer values */ +typedef v_reg v_uint64x8; +/** @brief Eight 64-bit signed integer values */ +typedef v_reg v_int64x8; +#endif + +enum { + simd128_width = 16, +#if CV_SIMD256 + simd256_width = 32, +#endif +#if CV_SIMD512 + simd512_width = 64, + simdmax_width = simd512_width +#elif CV_SIMD256 + simdmax_width = simd256_width +#else + simdmax_width = simd128_width +#endif +}; + +/** @brief Add values + +For all types. */ +template CV_INLINE v_reg<_Tp, n> operator+(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator+=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Subtract values + +For all types. */ +template CV_INLINE v_reg<_Tp, n> operator-(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator-=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Multiply values + +For 16- and 32-bit integer types and floating types. */ +template CV_INLINE v_reg<_Tp, n> operator*(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator*=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Divide values + +For floating types only. */ +template CV_INLINE v_reg<_Tp, n> operator/(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator/=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + + +/** @brief Bitwise AND + +Only for integer types. */ +template CV_INLINE v_reg<_Tp, n> operator&(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator&=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Bitwise OR + +Only for integer types. */ +template CV_INLINE v_reg<_Tp, n> operator|(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator|=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Bitwise XOR + +Only for integer types.*/ +template CV_INLINE v_reg<_Tp, n> operator^(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); +template CV_INLINE v_reg<_Tp, n>& operator^=(v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b); + +/** @brief Bitwise NOT + +Only for integer types.*/ +template CV_INLINE v_reg<_Tp, n> operator~(const v_reg<_Tp, n>& a); + + +#ifndef CV_DOXYGEN + +#define CV__HAL_INTRIN_EXPAND_WITH_INTEGER_TYPES(macro_name, ...) \ +__CV_EXPAND(macro_name(uchar, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(schar, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(ushort, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(short, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(unsigned, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(int, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(uint64, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(int64, __VA_ARGS__)) \ + +#define CV__HAL_INTRIN_EXPAND_WITH_FP_TYPES(macro_name, ...) \ +__CV_EXPAND(macro_name(float, __VA_ARGS__)) \ +__CV_EXPAND(macro_name(double, __VA_ARGS__)) \ + +#define CV__HAL_INTRIN_EXPAND_WITH_ALL_TYPES(macro_name, ...) \ +CV__HAL_INTRIN_EXPAND_WITH_INTEGER_TYPES(macro_name, __VA_ARGS__) \ +CV__HAL_INTRIN_EXPAND_WITH_FP_TYPES(macro_name, __VA_ARGS__) \ + +#define CV__HAL_INTRIN_IMPL_BIN_OP_(_Tp, bin_op) \ +template inline \ +v_reg<_Tp, n> operator bin_op (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \ + return c; \ +} \ +template inline \ +v_reg<_Tp, n>& operator bin_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + for( int i = 0; i < n; i++ ) \ + a.s[i] = saturate_cast<_Tp>(a.s[i] bin_op b.s[i]); \ + return a; \ +} + +#define CV__HAL_INTRIN_IMPL_BIN_OP(bin_op) CV__HAL_INTRIN_EXPAND_WITH_ALL_TYPES(CV__HAL_INTRIN_IMPL_BIN_OP_, bin_op) + +CV__HAL_INTRIN_IMPL_BIN_OP(+) +CV__HAL_INTRIN_IMPL_BIN_OP(-) +CV__HAL_INTRIN_IMPL_BIN_OP(*) +CV__HAL_INTRIN_EXPAND_WITH_FP_TYPES(CV__HAL_INTRIN_IMPL_BIN_OP_, /) + +#define CV__HAL_INTRIN_IMPL_BIT_OP_(_Tp, bit_op) \ +template CV_INLINE \ +v_reg<_Tp, n> operator bit_op (const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tp, n> c; \ + typedef typename V_TypeTraits<_Tp>::int_type itype; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \ + V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \ + return c; \ +} \ +template CV_INLINE \ +v_reg<_Tp, n>& operator bit_op##= (v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + typedef typename V_TypeTraits<_Tp>::int_type itype; \ + for( int i = 0; i < n; i++ ) \ + a.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) bit_op \ + V_TypeTraits<_Tp>::reinterpret_int(b.s[i]))); \ + return a; \ +} + +#define CV__HAL_INTRIN_IMPL_BIT_OP(bit_op) \ +CV__HAL_INTRIN_EXPAND_WITH_INTEGER_TYPES(CV__HAL_INTRIN_IMPL_BIT_OP_, bit_op) \ +CV__HAL_INTRIN_EXPAND_WITH_FP_TYPES(CV__HAL_INTRIN_IMPL_BIT_OP_, bit_op) /* TODO: FIXIT remove this after masks refactoring */ + + +CV__HAL_INTRIN_IMPL_BIT_OP(&) +CV__HAL_INTRIN_IMPL_BIT_OP(|) +CV__HAL_INTRIN_IMPL_BIT_OP(^) + +#define CV__HAL_INTRIN_IMPL_BITWISE_NOT_(_Tp, dummy) \ +template CV_INLINE \ +v_reg<_Tp, n> operator ~ (const v_reg<_Tp, n>& a) \ +{ \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int(~V_TypeTraits<_Tp>::reinterpret_int(a.s[i])); \ + return c; \ +} \ + +CV__HAL_INTRIN_EXPAND_WITH_INTEGER_TYPES(CV__HAL_INTRIN_IMPL_BITWISE_NOT_, ~) + +#endif // !CV_DOXYGEN + + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_MATH_FUNC(func, cfunc, _Tp2) \ +template inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a) \ +{ \ + v_reg<_Tp2, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = cfunc(a.s[i]); \ + return c; \ +} + +/** @brief Square root of elements + +Only for floating point types.*/ +OPENCV_HAL_IMPL_MATH_FUNC(v_sqrt, std::sqrt, _Tp) + +//! @cond IGNORED +OPENCV_HAL_IMPL_MATH_FUNC(v_sin, std::sin, _Tp) +OPENCV_HAL_IMPL_MATH_FUNC(v_cos, std::cos, _Tp) +OPENCV_HAL_IMPL_MATH_FUNC(v_exp, std::exp, _Tp) +OPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp) +//! @endcond + +/** @brief Absolute value of elements + +Only for floating point types.*/ +OPENCV_HAL_IMPL_MATH_FUNC(v_abs, (typename V_TypeTraits<_Tp>::abs_type)std::abs, + typename V_TypeTraits<_Tp>::abs_type) + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_MINMAX_FUNC(func, cfunc) \ +template inline v_reg<_Tp, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = cfunc(a.s[i], b.s[i]); \ + return c; \ +} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(func, cfunc) \ +template inline _Tp func(const v_reg<_Tp, n>& a) \ +{ \ + _Tp c = a.s[0]; \ + for( int i = 1; i < n; i++ ) \ + c = cfunc(c, a.s[i]); \ + return c; \ +} + +/** @brief Choose min values for each pair + +Scheme: +@code +{A1 A2 ...} +{B1 B2 ...} +-------------- +{min(A1,B1) min(A2,B2) ...} +@endcode +For all types except 64-bit integer. */ +OPENCV_HAL_IMPL_MINMAX_FUNC(v_min, std::min) + +/** @brief Choose max values for each pair + +Scheme: +@code +{A1 A2 ...} +{B1 B2 ...} +-------------- +{max(A1,B1) max(A2,B2) ...} +@endcode +For all types except 64-bit integer. */ +OPENCV_HAL_IMPL_MINMAX_FUNC(v_max, std::max) + +/** @brief Find one min value + +Scheme: +@code +{A1 A2 A3 ...} => min(A1,A2,A3,...) +@endcode +For all types except 64-bit integer and 64-bit floating point types. */ +OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_min, std::min) + +/** @brief Find one max value + +Scheme: +@code +{A1 A2 A3 ...} => max(A1,A2,A3,...) +@endcode +For all types except 64-bit integer and 64-bit floating point types. */ +OPENCV_HAL_IMPL_REDUCE_MINMAX_FUNC(v_reduce_max, std::max) + +static const unsigned char popCountTable[] = +{ + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, +}; +/** @brief Count the 1 bits in the vector lanes and return result as corresponding unsigned type + +Scheme: +@code +{A1 A2 A3 ...} => {popcount(A1), popcount(A2), popcount(A3), ...} +@endcode +For all integer types. */ +template +inline v_reg::abs_type, n> v_popcount(const v_reg<_Tp, n>& a) +{ + v_reg::abs_type, n> b = v_reg::abs_type, n>::zero(); + for (int i = 0; i < n*(int)sizeof(_Tp); i++) + b.s[i/sizeof(_Tp)] += popCountTable[v_reinterpret_as_u8(a).s[i]]; + return b; +} + + +//! @cond IGNORED +template +inline void v_minmax( const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + v_reg<_Tp, n>& minval, v_reg<_Tp, n>& maxval ) +{ + for( int i = 0; i < n; i++ ) + { + minval.s[i] = std::min(a.s[i], b.s[i]); + maxval.s[i] = std::max(a.s[i], b.s[i]); + } +} +//! @endcond + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_CMP_OP(cmp_op) \ +template \ +inline v_reg<_Tp, n> operator cmp_op(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + typedef typename V_TypeTraits<_Tp>::int_type itype; \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = V_TypeTraits<_Tp>::reinterpret_from_int((itype)-(int)(a.s[i] cmp_op b.s[i])); \ + return c; \ +} + +/** @brief Less-than comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(<) + +/** @brief Greater-than comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(>) + +/** @brief Less-than or equal comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(<=) + +/** @brief Greater-than or equal comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(>=) + +/** @brief Equal comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(==) + +/** @brief Not equal comparison + +For all types except 64-bit integer values. */ +OPENCV_HAL_IMPL_CMP_OP(!=) + +template +inline v_reg v_not_nan(const v_reg& a) +{ + typedef typename V_TypeTraits::int_type itype; + v_reg c; + for (int i = 0; i < n; i++) + c.s[i] = V_TypeTraits::reinterpret_from_int((itype)-(int)(a.s[i] == a.s[i])); + return c; +} +template +inline v_reg v_not_nan(const v_reg& a) +{ + typedef typename V_TypeTraits::int_type itype; + v_reg c; + for (int i = 0; i < n; i++) + c.s[i] = V_TypeTraits::reinterpret_from_int((itype)-(int)(a.s[i] == a.s[i])); + return c; +} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_ARITHM_OP(func, bin_op, cast_op, _Tp2) \ +template \ +inline v_reg<_Tp2, n> func(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + typedef _Tp2 rtype; \ + v_reg c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = cast_op(a.s[i] bin_op b.s[i]); \ + return c; \ +} + +/** @brief Add values without saturation + +For 8- and 16-bit integer values. */ +OPENCV_HAL_IMPL_ARITHM_OP(v_add_wrap, +, (_Tp), _Tp) + +/** @brief Subtract values without saturation + +For 8- and 16-bit integer values. */ +OPENCV_HAL_IMPL_ARITHM_OP(v_sub_wrap, -, (_Tp), _Tp) + +/** @brief Multiply values without saturation + +For 8- and 16-bit integer values. */ +OPENCV_HAL_IMPL_ARITHM_OP(v_mul_wrap, *, (_Tp), _Tp) + +//! @cond IGNORED +template inline T _absdiff(T a, T b) +{ + return a > b ? a - b : b - a; +} +//! @endcond + +/** @brief Absolute difference + +Returns \f$ |a - b| \f$ converted to corresponding unsigned type. +Example: +@code{.cpp} +v_int32x4 a, b; // {1, 2, 3, 4} and {4, 3, 2, 1} +v_uint32x4 c = v_absdiff(a, b); // result is {3, 1, 1, 3} +@endcode +For 8-, 16-, 32-bit integer source types. */ +template +inline v_reg::abs_type, n> v_absdiff(const v_reg<_Tp, n>& a, const v_reg<_Tp, n> & b) +{ + typedef typename V_TypeTraits<_Tp>::abs_type rtype; + v_reg c; + const rtype mask = (rtype)(std::numeric_limits<_Tp>::is_signed ? (1 << (sizeof(rtype)*8 - 1)) : 0); + for( int i = 0; i < n; i++ ) + { + rtype ua = a.s[i] ^ mask; + rtype ub = b.s[i] ^ mask; + c.s[i] = _absdiff(ua, ub); + } + return c; +} + +/** @overload + +For 32-bit floating point values */ +template inline v_reg v_absdiff(const v_reg& a, const v_reg& b) +{ + v_reg c; + for( int i = 0; i < c.nlanes; i++ ) + c.s[i] = _absdiff(a.s[i], b.s[i]); + return c; +} + +/** @overload + +For 64-bit floating point values */ +template inline v_reg v_absdiff(const v_reg& a, const v_reg& b) +{ + v_reg c; + for( int i = 0; i < c.nlanes; i++ ) + c.s[i] = _absdiff(a.s[i], b.s[i]); + return c; +} + +/** @brief Saturating absolute difference + +Returns \f$ saturate(|a - b|) \f$ . +For 8-, 16-bit signed integer source types. */ +template +inline v_reg<_Tp, n> v_absdiffs(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++) + c.s[i] = saturate_cast<_Tp>(std::abs(a.s[i] - b.s[i])); + return c; +} + +/** @brief Inversed square root + +Returns \f$ 1/sqrt(a) \f$ +For floating point types only. */ +template +inline v_reg<_Tp, n> v_invsqrt(const v_reg<_Tp, n>& a) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = 1.f/std::sqrt(a.s[i]); + return c; +} + +/** @brief Magnitude + +Returns \f$ sqrt(a^2 + b^2) \f$ +For floating point types only. */ +template +inline v_reg<_Tp, n> v_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = std::sqrt(a.s[i]*a.s[i] + b.s[i]*b.s[i]); + return c; +} + +/** @brief Square of the magnitude + +Returns \f$ a^2 + b^2 \f$ +For floating point types only. */ +template +inline v_reg<_Tp, n> v_sqr_magnitude(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = a.s[i]*a.s[i] + b.s[i]*b.s[i]; + return c; +} + +/** @brief Multiply and add + + Returns \f$ a*b + c \f$ + For floating point types and signed 32bit int only. */ +template +inline v_reg<_Tp, n> v_fma(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg<_Tp, n>& c) +{ + v_reg<_Tp, n> d; + for( int i = 0; i < n; i++ ) + d.s[i] = a.s[i]*b.s[i] + c.s[i]; + return d; +} + +/** @brief A synonym for v_fma */ +template +inline v_reg<_Tp, n> v_muladd(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg<_Tp, n>& c) +{ + return v_fma(a, b, c); +} + +/** @brief Dot product of elements + +Multiply values in two registers and sum adjacent result pairs. + +Scheme: +@code + {A1 A2 ...} // 16-bit +x {B1 B2 ...} // 16-bit +------------- +{A1B1+A2B2 ...} // 32-bit + +@endcode +*/ +template inline v_reg::w_type, n/2> +v_dotprod(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg c; + for( int i = 0; i < (n/2); i++ ) + c.s[i] = (w_type)a.s[i*2]*b.s[i*2] + (w_type)a.s[i*2+1]*b.s[i*2+1]; + return c; +} + +/** @brief Dot product of elements + +Same as cv::v_dotprod, but add a third element to the sum of adjacent pairs. +Scheme: +@code + {A1 A2 ...} // 16-bit +x {B1 B2 ...} // 16-bit +------------- + {A1B1+A2B2+C1 ...} // 32-bit +@endcode +*/ +template inline v_reg::w_type, n/2> +v_dotprod(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg::w_type, n / 2>& c) +{ + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg s; + for( int i = 0; i < (n/2); i++ ) + s.s[i] = (w_type)a.s[i*2]*b.s[i*2] + (w_type)a.s[i*2+1]*b.s[i*2+1] + c.s[i]; + return s; +} + +/** @brief Fast Dot product of elements + +Same as cv::v_dotprod, but it may perform unorder sum between result pairs in some platforms, +this intrinsic can be used if the sum among all lanes is only matters +and also it should be yielding better performance on the affected platforms. + +*/ +template inline v_reg::w_type, n/2> +v_dotprod_fast(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ return v_dotprod(a, b); } + +/** @brief Fast Dot product of elements + +Same as cv::v_dotprod_fast, but add a third element to the sum of adjacent pairs. +*/ +template inline v_reg::w_type, n/2> +v_dotprod_fast(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg::w_type, n / 2>& c) +{ return v_dotprod(a, b, c); } + +/** @brief Dot product of elements and expand + +Multiply values in two registers and expand the sum of adjacent result pairs. + +Scheme: +@code + {A1 A2 A3 A4 ...} // 8-bit +x {B1 B2 B3 B4 ...} // 8-bit +------------- + {A1B1+A2B2+A3B3+A4B4 ...} // 32-bit + +@endcode +*/ +template inline v_reg::q_type, n/4> +v_dotprod_expand(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + typedef typename V_TypeTraits<_Tp>::q_type q_type; + v_reg s; + for( int i = 0; i < (n/4); i++ ) + s.s[i] = (q_type)a.s[i*4 ]*b.s[i*4 ] + (q_type)a.s[i*4 + 1]*b.s[i*4 + 1] + + (q_type)a.s[i*4 + 2]*b.s[i*4 + 2] + (q_type)a.s[i*4 + 3]*b.s[i*4 + 3]; + return s; +} + +/** @brief Dot product of elements + +Same as cv::v_dotprod_expand, but add a third element to the sum of adjacent pairs. +Scheme: +@code + {A1 A2 A3 A4 ...} // 8-bit +x {B1 B2 B3 B4 ...} // 8-bit +------------- + {A1B1+A2B2+A3B3+A4B4+C1 ...} // 32-bit +@endcode +*/ +template inline v_reg::q_type, n/4> +v_dotprod_expand(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg::q_type, n / 4>& c) +{ + typedef typename V_TypeTraits<_Tp>::q_type q_type; + v_reg s; + for( int i = 0; i < (n/4); i++ ) + s.s[i] = (q_type)a.s[i*4 ]*b.s[i*4 ] + (q_type)a.s[i*4 + 1]*b.s[i*4 + 1] + + (q_type)a.s[i*4 + 2]*b.s[i*4 + 2] + (q_type)a.s[i*4 + 3]*b.s[i*4 + 3] + c.s[i]; + return s; +} + +/** @brief Fast Dot product of elements and expand + +Multiply values in two registers and expand the sum of adjacent result pairs. + +Same as cv::v_dotprod_expand, but it may perform unorder sum between result pairs in some platforms, +this intrinsic can be used if the sum among all lanes is only matters +and also it should be yielding better performance on the affected platforms. + +*/ +template inline v_reg::q_type, n/4> +v_dotprod_expand_fast(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ return v_dotprod_expand(a, b); } + +/** @brief Fast Dot product of elements + +Same as cv::v_dotprod_expand_fast, but add a third element to the sum of adjacent pairs. +*/ +template inline v_reg::q_type, n/4> +v_dotprod_expand_fast(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + const v_reg::q_type, n / 4>& c) +{ return v_dotprod_expand(a, b, c); } + +/** @brief Multiply and expand + +Multiply values two registers and store results in two registers with wider pack type. +Scheme: +@code + {A B C D} // 32-bit +x {E F G H} // 32-bit +--------------- +{AE BF} // 64-bit + {CG DH} // 64-bit +@endcode +Example: +@code{.cpp} +v_uint32x4 a, b; // {1,2,3,4} and {2,2,2,2} +v_uint64x2 c, d; // results +v_mul_expand(a, b, c, d); // c, d = {2,4}, {6, 8} +@endcode +Implemented only for 16- and unsigned 32-bit source types (v_int16x8, v_uint16x8, v_uint32x4). +*/ +template inline void v_mul_expand(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + v_reg::w_type, n/2>& c, + v_reg::w_type, n/2>& d) +{ + typedef typename V_TypeTraits<_Tp>::w_type w_type; + for( int i = 0; i < (n/2); i++ ) + { + c.s[i] = (w_type)a.s[i]*b.s[i]; + d.s[i] = (w_type)a.s[i+(n/2)]*b.s[i+(n/2)]; + } +} + +/** @brief Multiply and extract high part + +Multiply values two registers and store high part of the results. +Implemented only for 16-bit source types (v_int16x8, v_uint16x8). Returns \f$ a*b >> 16 \f$ +*/ +template inline v_reg<_Tp, n> v_mul_hi(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg<_Tp, n> c; + for (int i = 0; i < n; i++) + c.s[i] = (_Tp)(((w_type)a.s[i] * b.s[i]) >> sizeof(_Tp)*8); + return c; +} + +//! @cond IGNORED +template inline void v_hsum(const v_reg<_Tp, n>& a, + v_reg::w_type, n/2>& c) +{ + typedef typename V_TypeTraits<_Tp>::w_type w_type; + for( int i = 0; i < (n/2); i++ ) + { + c.s[i] = (w_type)a.s[i*2] + a.s[i*2+1]; + } +} +//! @endcond + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_SHIFT_OP(shift_op) \ +template inline v_reg<_Tp, n> operator shift_op(const v_reg<_Tp, n>& a, int imm) \ +{ \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = (_Tp)(a.s[i] shift_op imm); \ + return c; \ +} + +/** @brief Bitwise shift left + +For 16-, 32- and 64-bit integer values. */ +OPENCV_HAL_IMPL_SHIFT_OP(<< ) + +/** @brief Bitwise shift right + +For 16-, 32- and 64-bit integer values. */ +OPENCV_HAL_IMPL_SHIFT_OP(>> ) + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(suffix,opA,opB) \ +template inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a) \ +{ \ + v_reg<_Tp, n> b; \ + for (int i = 0; i < n; i++) \ + { \ + int sIndex = i opA imm; \ + if (0 <= sIndex && sIndex < n) \ + { \ + b.s[i] = a.s[sIndex]; \ + } \ + else \ + { \ + b.s[i] = 0; \ + } \ + } \ + return b; \ +} \ +template inline v_reg<_Tp, n> v_rotate_##suffix(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tp, n> c; \ + for (int i = 0; i < n; i++) \ + { \ + int aIndex = i opA imm; \ + int bIndex = i opA imm opB n; \ + if (0 <= bIndex && bIndex < n) \ + { \ + c.s[i] = b.s[bIndex]; \ + } \ + else if (0 <= aIndex && aIndex < n) \ + { \ + c.s[i] = a.s[aIndex]; \ + } \ + else \ + { \ + c.s[i] = 0; \ + } \ + } \ + return c; \ +} + +/** @brief Element shift left among vector + +For all type */ +OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(left, -, +) + +/** @brief Element shift right among vector + +For all type */ +OPENCV_HAL_IMPL_ROTATE_SHIFT_OP(right, +, -) + +/** @brief Sum packed values + +Scheme: +@code +{A1 A2 A3 ...} => sum{A1,A2,A3,...} +@endcode +*/ +template inline typename V_TypeTraits<_Tp>::sum_type v_reduce_sum(const v_reg<_Tp, n>& a) +{ + typename V_TypeTraits<_Tp>::sum_type c = a.s[0]; + for( int i = 1; i < n; i++ ) + c += a.s[i]; + return c; +} + +/** @brief Sums all elements of each input vector, returns the vector of sums + + Scheme: + @code + result[0] = a[0] + a[1] + a[2] + a[3] + result[1] = b[0] + b[1] + b[2] + b[3] + result[2] = c[0] + c[1] + c[2] + c[3] + result[3] = d[0] + d[1] + d[2] + d[3] + @endcode +*/ +template inline v_reg v_reduce_sum4(const v_reg& a, const v_reg& b, + const v_reg& c, const v_reg& d) +{ + v_reg r; + for(int i = 0; i < (n/4); i++) + { + r.s[i*4 + 0] = a.s[i*4 + 0] + a.s[i*4 + 1] + a.s[i*4 + 2] + a.s[i*4 + 3]; + r.s[i*4 + 1] = b.s[i*4 + 0] + b.s[i*4 + 1] + b.s[i*4 + 2] + b.s[i*4 + 3]; + r.s[i*4 + 2] = c.s[i*4 + 0] + c.s[i*4 + 1] + c.s[i*4 + 2] + c.s[i*4 + 3]; + r.s[i*4 + 3] = d.s[i*4 + 0] + d.s[i*4 + 1] + d.s[i*4 + 2] + d.s[i*4 + 3]; + } + return r; +} + +/** @brief Sum absolute differences of values + +Scheme: +@code +{A1 A2 A3 ...} {B1 B2 B3 ...} => sum{ABS(A1-B1),abs(A2-B2),abs(A3-B3),...} +@endcode +For all types except 64-bit types.*/ +template inline typename V_TypeTraits< typename V_TypeTraits<_Tp>::abs_type >::sum_type v_reduce_sad(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + typename V_TypeTraits< typename V_TypeTraits<_Tp>::abs_type >::sum_type c = _absdiff(a.s[0], b.s[0]); + for (int i = 1; i < n; i++) + c += _absdiff(a.s[i], b.s[i]); + return c; +} + +/** @brief Get negative values mask +@deprecated v_signmask depends on a lane count heavily and therefore isn't universal enough + +Returned value is a bit mask with bits set to 1 on places corresponding to negative packed values indexes. +Example: +@code{.cpp} +v_int32x4 r; // set to {-1, -1, 1, 1} +int mask = v_signmask(r); // mask = 3 <== 00000000 00000000 00000000 00000011 +@endcode +*/ +template inline int v_signmask(const v_reg<_Tp, n>& a) +{ + int mask = 0; + for( int i = 0; i < n; i++ ) + mask |= (V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0) << i; + return mask; +} + +/** @brief Get first negative lane index + +Returned value is an index of first negative lane (undefined for input of all positive values) +Example: +@code{.cpp} +v_int32x4 r; // set to {0, 0, -1, -1} +int idx = v_heading_zeros(r); // idx = 2 +@endcode +*/ +template inline int v_scan_forward(const v_reg<_Tp, n>& a) +{ + for (int i = 0; i < n; i++) + if(V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0) + return i; + return 0; +} + +/** @brief Check if all packed values are less than zero + +Unsigned values will be casted to signed: `uchar 254 => char -2`. +*/ +template inline bool v_check_all(const v_reg<_Tp, n>& a) +{ + for( int i = 0; i < n; i++ ) + if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) >= 0 ) + return false; + return true; +} + +/** @brief Check if any of packed values is less than zero + +Unsigned values will be casted to signed: `uchar 254 => char -2`. +*/ +template inline bool v_check_any(const v_reg<_Tp, n>& a) +{ + for( int i = 0; i < n; i++ ) + if( V_TypeTraits<_Tp>::reinterpret_int(a.s[i]) < 0 ) + return true; + return false; +} + +/** @brief Per-element select (blend operation) + +Return value will be built by combining values _a_ and _b_ using the following scheme: + result[i] = mask[i] ? a[i] : b[i]; + +@note: _mask_ element values are restricted to these values: +- 0: select element from _b_ +- 0xff/0xffff/etc: select element from _a_ +(fully compatible with bitwise-based operator) +*/ +template inline v_reg<_Tp, n> v_select(const v_reg<_Tp, n>& mask, + const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + typedef V_TypeTraits<_Tp> Traits; + typedef typename Traits::int_type int_type; + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + { + int_type m = Traits::reinterpret_int(mask.s[i]); + CV_DbgAssert(m == 0 || m == (~(int_type)0)); // restrict mask values: 0 or 0xff/0xffff/etc + c.s[i] = m ? a.s[i] : b.s[i]; + } + return c; +} + +/** @brief Expand values to the wider pack type + +Copy contents of register to two registers with 2x wider pack type. +Scheme: +@code + int32x4 int64x2 int64x2 +{A B C D} ==> {A B} , {C D} +@endcode */ +template inline void v_expand(const v_reg<_Tp, n>& a, + v_reg::w_type, n/2>& b0, + v_reg::w_type, n/2>& b1) +{ + for( int i = 0; i < (n/2); i++ ) + { + b0.s[i] = a.s[i]; + b1.s[i] = a.s[i+(n/2)]; + } +} + +/** @brief Expand lower values to the wider pack type + +Same as cv::v_expand, but return lower half of the vector. + +Scheme: +@code + int32x4 int64x2 +{A B C D} ==> {A B} +@endcode */ +template +inline v_reg::w_type, n/2> +v_expand_low(const v_reg<_Tp, n>& a) +{ + v_reg::w_type, n/2> b; + for( int i = 0; i < (n/2); i++ ) + b.s[i] = a.s[i]; + return b; +} + +/** @brief Expand higher values to the wider pack type + +Same as cv::v_expand_low, but expand higher half of the vector instead. + +Scheme: +@code + int32x4 int64x2 +{A B C D} ==> {C D} +@endcode */ +template +inline v_reg::w_type, n/2> +v_expand_high(const v_reg<_Tp, n>& a) +{ + v_reg::w_type, n/2> b; + for( int i = 0; i < (n/2); i++ ) + b.s[i] = a.s[i+(n/2)]; + return b; +} + +//! @cond IGNORED +template inline v_reg::int_type, n> + v_reinterpret_as_int(const v_reg<_Tp, n>& a) +{ + v_reg::int_type, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = V_TypeTraits<_Tp>::reinterpret_int(a.s[i]); + return c; +} + +template inline v_reg::uint_type, n> + v_reinterpret_as_uint(const v_reg<_Tp, n>& a) +{ + v_reg::uint_type, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = V_TypeTraits<_Tp>::reinterpret_uint(a.s[i]); + return c; +} +//! @endcond + +/** @brief Interleave two vectors + +Scheme: +@code + {A1 A2 A3 A4} + {B1 B2 B3 B4} +--------------- + {A1 B1 A2 B2} and {A3 B3 A4 B4} +@endcode +For all types except 64-bit. +*/ +template inline void v_zip( const v_reg<_Tp, n>& a0, const v_reg<_Tp, n>& a1, + v_reg<_Tp, n>& b0, v_reg<_Tp, n>& b1 ) +{ + int i; + for( i = 0; i < n/2; i++ ) + { + b0.s[i*2] = a0.s[i]; + b0.s[i*2+1] = a1.s[i]; + } + for( ; i < n; i++ ) + { + b1.s[i*2-n] = a0.s[i]; + b1.s[i*2-n+1] = a1.s[i]; + } +} + +/** @brief Load register contents from memory + +@param ptr pointer to memory block with data +@return register object + +@note Returned type will be detected from passed pointer type, for example uchar ==> cv::v_uint8x16, int ==> cv::v_int32x4, etc. + +@note Use vx_load version to get maximum available register length result + +@note Alignment requirement: +if CV_STRONG_ALIGNMENT=1 then passed pointer must be aligned (`sizeof(lane type)` should be enough). +Do not cast pointer types without runtime check for pointer alignment (like `uchar*` => `int*`). + */ +template +inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_load(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + return v_reg<_Tp, simd128_width / sizeof(_Tp)>(ptr); +} + +#if CV_SIMD256 +/** @brief Load 256-bit length register contents from memory + +@param ptr pointer to memory block with data +@return register object + +@note Returned type will be detected from passed pointer type, for example uchar ==> cv::v_uint8x32, int ==> cv::v_int32x8, etc. + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load version to get maximum available register length result + +@note Alignment requirement: +if CV_STRONG_ALIGNMENT=1 then passed pointer must be aligned (`sizeof(lane type)` should be enough). +Do not cast pointer types without runtime check for pointer alignment (like `uchar*` => `int*`). + */ +template +inline v_reg<_Tp, simd256_width / sizeof(_Tp)> v256_load(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + return v_reg<_Tp, simd256_width / sizeof(_Tp)>(ptr); +} +#endif + +#if CV_SIMD512 +/** @brief Load 512-bit length register contents from memory + +@param ptr pointer to memory block with data +@return register object + +@note Returned type will be detected from passed pointer type, for example uchar ==> cv::v_uint8x64, int ==> cv::v_int32x16, etc. + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load version to get maximum available register length result + +@note Alignment requirement: +if CV_STRONG_ALIGNMENT=1 then passed pointer must be aligned (`sizeof(lane type)` should be enough). +Do not cast pointer types without runtime check for pointer alignment (like `uchar*` => `int*`). + */ +template +inline v_reg<_Tp, simd512_width / sizeof(_Tp)> v512_load(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + return v_reg<_Tp, simd512_width / sizeof(_Tp)>(ptr); +} +#endif + +/** @brief Load register contents from memory (aligned) + +similar to cv::v_load, but source memory block should be aligned (to 16-byte boundary in case of SIMD128, 32-byte - SIMD256, etc) + +@note Use vx_load_aligned version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_load_aligned(const _Tp* ptr) +{ + CV_Assert(isAligned)>(ptr)); + return v_reg<_Tp, simd128_width / sizeof(_Tp)>(ptr); +} + +#if CV_SIMD256 +/** @brief Load register contents from memory (aligned) + +similar to cv::v256_load, but source memory block should be aligned (to 32-byte boundary in case of SIMD256, 64-byte - SIMD512, etc) + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load_aligned version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd256_width / sizeof(_Tp)> v256_load_aligned(const _Tp* ptr) +{ + CV_Assert(isAligned)>(ptr)); + return v_reg<_Tp, simd256_width / sizeof(_Tp)>(ptr); +} +#endif + +#if CV_SIMD512 +/** @brief Load register contents from memory (aligned) + +similar to cv::v512_load, but source memory block should be aligned (to 64-byte boundary in case of SIMD512, etc) + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load_aligned version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd512_width / sizeof(_Tp)> v512_load_aligned(const _Tp* ptr) +{ + CV_Assert(isAligned)>(ptr)); + return v_reg<_Tp, simd512_width / sizeof(_Tp)>(ptr); +} +#endif + +/** @brief Load 64-bits of data to lower part (high part is undefined). + +@param ptr memory block containing data for first half (0..n/2) + +@code{.cpp} +int lo[2] = { 1, 2 }; +v_int32x4 r = v_load_low(lo); +@endcode + +@note Use vx_load_low version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_load_low(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + v_reg<_Tp, simd128_width / sizeof(_Tp)> c; + for( int i = 0; i < c.nlanes/2; i++ ) + { + c.s[i] = ptr[i]; + } + return c; +} + +#if CV_SIMD256 +/** @brief Load 128-bits of data to lower part (high part is undefined). + +@param ptr memory block containing data for first half (0..n/2) + +@code{.cpp} +int lo[4] = { 1, 2, 3, 4 }; +v_int32x8 r = v256_load_low(lo); +@endcode + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load_low version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd256_width / sizeof(_Tp)> v256_load_low(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + v_reg<_Tp, simd256_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes / 2; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +#if CV_SIMD512 +/** @brief Load 256-bits of data to lower part (high part is undefined). + +@param ptr memory block containing data for first half (0..n/2) + +@code{.cpp} +int lo[8] = { 1, 2, 3, 4, 5, 6, 7, 8 }; +v_int32x16 r = v512_load_low(lo); +@endcode + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load_low version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd512_width / sizeof(_Tp)> v512_load_low(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + v_reg<_Tp, simd512_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes / 2; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +/** @brief Load register contents from two memory blocks + +@param loptr memory block containing data for first half (0..n/2) +@param hiptr memory block containing data for second half (n/2..n) + +@code{.cpp} +int lo[2] = { 1, 2 }, hi[2] = { 3, 4 }; +v_int32x4 r = v_load_halves(lo, hi); +@endcode + +@note Use vx_load_halves version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_load_halves(const _Tp* loptr, const _Tp* hiptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(loptr)); + CV_Assert(isAligned(hiptr)); +#endif + v_reg<_Tp, simd128_width / sizeof(_Tp)> c; + for( int i = 0; i < c.nlanes/2; i++ ) + { + c.s[i] = loptr[i]; + c.s[i+c.nlanes/2] = hiptr[i]; + } + return c; +} + +#if CV_SIMD256 +/** @brief Load register contents from two memory blocks + +@param loptr memory block containing data for first half (0..n/2) +@param hiptr memory block containing data for second half (n/2..n) + +@code{.cpp} +int lo[4] = { 1, 2, 3, 4 }, hi[4] = { 5, 6, 7, 8 }; +v_int32x8 r = v256_load_halves(lo, hi); +@endcode + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load_halves version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd256_width / sizeof(_Tp)> v256_load_halves(const _Tp* loptr, const _Tp* hiptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(loptr)); + CV_Assert(isAligned(hiptr)); +#endif + v_reg<_Tp, simd256_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes / 2; i++) + { + c.s[i] = loptr[i]; + c.s[i + c.nlanes / 2] = hiptr[i]; + } + return c; +} +#endif + +#if CV_SIMD512 +/** @brief Load register contents from two memory blocks + +@param loptr memory block containing data for first half (0..n/2) +@param hiptr memory block containing data for second half (n/2..n) + +@code{.cpp} +int lo[4] = { 1, 2, 3, 4, 5, 6, 7, 8 }, hi[4] = { 9, 10, 11, 12, 13, 14, 15, 16 }; +v_int32x16 r = v512_load_halves(lo, hi); +@endcode + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load_halves version to get maximum available register length result +*/ +template +inline v_reg<_Tp, simd512_width / sizeof(_Tp)> v512_load_halves(const _Tp* loptr, const _Tp* hiptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(loptr)); + CV_Assert(isAligned(hiptr)); +#endif + v_reg<_Tp, simd512_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes / 2; i++) + { + c.s[i] = loptr[i]; + c.s[i + c.nlanes / 2] = hiptr[i]; + } + return c; +} +#endif + +/** @brief Load register contents from memory with double expand + +Same as cv::v_load, but result pack type will be 2x wider than memory type. + +@code{.cpp} +short buf[4] = {1, 2, 3, 4}; // type is int16 +v_int32x4 r = v_load_expand(buf); // r = {1, 2, 3, 4} - type is int32 +@endcode +For 8-, 16-, 32-bit integer source types. + +@note Use vx_load_expand version to get maximum available register length result +*/ +template +inline v_reg::w_type, simd128_width / sizeof(typename V_TypeTraits<_Tp>::w_type)> +v_load_expand(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg c; + for( int i = 0; i < c.nlanes; i++ ) + { + c.s[i] = ptr[i]; + } + return c; +} + +#if CV_SIMD256 +/** @brief Load register contents from memory with double expand + +Same as cv::v256_load, but result pack type will be 2x wider than memory type. + +@code{.cpp} +short buf[8] = {1, 2, 3, 4, 5, 6, 7, 8}; // type is int16 +v_int32x8 r = v256_load_expand(buf); // r = {1, 2, 3, 4, 5, 6, 7, 8} - type is int32 +@endcode +For 8-, 16-, 32-bit integer source types. + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load_expand version to get maximum available register length result +*/ +template +inline v_reg::w_type, simd256_width / sizeof(typename V_TypeTraits<_Tp>::w_type)> +v256_load_expand(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg c; + for (int i = 0; i < c.nlanes; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +#if CV_SIMD512 +/** @brief Load register contents from memory with double expand + +Same as cv::v512_load, but result pack type will be 2x wider than memory type. + +@code{.cpp} +short buf[8] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; // type is int16 +v_int32x16 r = v512_load_expand(buf); // r = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - type is int32 +@endcode +For 8-, 16-, 32-bit integer source types. + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load_expand version to get maximum available register length result +*/ +template +inline v_reg::w_type, simd512_width / sizeof(typename V_TypeTraits<_Tp>::w_type)> +v512_load_expand(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::w_type w_type; + v_reg c; + for (int i = 0; i < c.nlanes; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +/** @brief Load register contents from memory with quad expand + +Same as cv::v_load_expand, but result type is 4 times wider than source. +@code{.cpp} +char buf[4] = {1, 2, 3, 4}; // type is int8 +v_int32x4 r = v_load_expand_q(buf); // r = {1, 2, 3, 4} - type is int32 +@endcode +For 8-bit integer source types. + +@note Use vx_load_expand_q version to get maximum available register length result +*/ +template +inline v_reg::q_type, simd128_width / sizeof(typename V_TypeTraits<_Tp>::q_type)> +v_load_expand_q(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::q_type q_type; + v_reg c; + for( int i = 0; i < c.nlanes; i++ ) + { + c.s[i] = ptr[i]; + } + return c; +} + +#if CV_SIMD256 +/** @brief Load register contents from memory with quad expand + +Same as cv::v256_load_expand, but result type is 4 times wider than source. +@code{.cpp} +char buf[8] = {1, 2, 3, 4, 5, 6, 7, 8}; // type is int8 +v_int32x8 r = v256_load_expand_q(buf); // r = {1, 2, 3, 4, 5, 6, 7, 8} - type is int32 +@endcode +For 8-bit integer source types. + +@note Check CV_SIMD256 preprocessor definition prior to use. +Use vx_load_expand_q version to get maximum available register length result +*/ +template +inline v_reg::q_type, simd256_width / sizeof(typename V_TypeTraits<_Tp>::q_type)> +v256_load_expand_q(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::q_type q_type; + v_reg c; + for (int i = 0; i < c.nlanes; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +#if CV_SIMD512 +/** @brief Load register contents from memory with quad expand + +Same as cv::v512_load_expand, but result type is 4 times wider than source. +@code{.cpp} +char buf[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; // type is int8 +v_int32x16 r = v512_load_expand_q(buf); // r = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} - type is int32 +@endcode +For 8-bit integer source types. + +@note Check CV_SIMD512 preprocessor definition prior to use. +Use vx_load_expand_q version to get maximum available register length result +*/ +template +inline v_reg::q_type, simd512_width / sizeof(typename V_TypeTraits<_Tp>::q_type)> +v512_load_expand_q(const _Tp* ptr) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + typedef typename V_TypeTraits<_Tp>::q_type q_type; + v_reg c; + for (int i = 0; i < c.nlanes; i++) + { + c.s[i] = ptr[i]; + } + return c; +} +#endif + +/** @brief Load and deinterleave (2 channels) + +Load data from memory deinterleave and store to 2 registers. +Scheme: +@code +{A1 B1 A2 B2 ...} ==> {A1 A2 ...}, {B1 B2 ...} +@endcode +For all types except 64-bit. */ +template inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a, + v_reg<_Tp, n>& b) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i2; + for( i = i2 = 0; i < n; i++, i2 += 2 ) + { + a.s[i] = ptr[i2]; + b.s[i] = ptr[i2+1]; + } +} + +/** @brief Load and deinterleave (3 channels) + +Load data from memory deinterleave and store to 3 registers. +Scheme: +@code +{A1 B1 C1 A2 B2 C2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...} +@endcode +For all types except 64-bit. */ +template inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a, + v_reg<_Tp, n>& b, v_reg<_Tp, n>& c) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i3; + for( i = i3 = 0; i < n; i++, i3 += 3 ) + { + a.s[i] = ptr[i3]; + b.s[i] = ptr[i3+1]; + c.s[i] = ptr[i3+2]; + } +} + +/** @brief Load and deinterleave (4 channels) + +Load data from memory deinterleave and store to 4 registers. +Scheme: +@code +{A1 B1 C1 D1 A2 B2 C2 D2 ...} ==> {A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...} +@endcode +For all types except 64-bit. */ +template +inline void v_load_deinterleave(const _Tp* ptr, v_reg<_Tp, n>& a, + v_reg<_Tp, n>& b, v_reg<_Tp, n>& c, + v_reg<_Tp, n>& d) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i4; + for( i = i4 = 0; i < n; i++, i4 += 4 ) + { + a.s[i] = ptr[i4]; + b.s[i] = ptr[i4+1]; + c.s[i] = ptr[i4+2]; + d.s[i] = ptr[i4+3]; + } +} + +/** @brief Interleave and store (2 channels) + +Interleave and store data from 2 registers to memory. +Scheme: +@code +{A1 A2 ...}, {B1 B2 ...} ==> {A1 B1 A2 B2 ...} +@endcode +For all types except 64-bit. */ +template +inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a, + const v_reg<_Tp, n>& b, + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i2; + for( i = i2 = 0; i < n; i++, i2 += 2 ) + { + ptr[i2] = a.s[i]; + ptr[i2+1] = b.s[i]; + } +} + +/** @brief Interleave and store (3 channels) + +Interleave and store data from 3 registers to memory. +Scheme: +@code +{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...} ==> {A1 B1 C1 A2 B2 C2 ...} +@endcode +For all types except 64-bit. */ +template +inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a, + const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c, + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i3; + for( i = i3 = 0; i < n; i++, i3 += 3 ) + { + ptr[i3] = a.s[i]; + ptr[i3+1] = b.s[i]; + ptr[i3+2] = c.s[i]; + } +} + +/** @brief Interleave and store (4 channels) + +Interleave and store data from 4 registers to memory. +Scheme: +@code +{A1 A2 ...}, {B1 B2 ...}, {C1 C2 ...}, {D1 D2 ...} ==> {A1 B1 C1 D1 A2 B2 C2 D2 ...} +@endcode +For all types except 64-bit. */ +template inline void v_store_interleave( _Tp* ptr, const v_reg<_Tp, n>& a, + const v_reg<_Tp, n>& b, const v_reg<_Tp, n>& c, + const v_reg<_Tp, n>& d, + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + int i, i4; + for( i = i4 = 0; i < n; i++, i4 += 4 ) + { + ptr[i4] = a.s[i]; + ptr[i4+1] = b.s[i]; + ptr[i4+2] = c.s[i]; + ptr[i4+3] = d.s[i]; + } +} + +/** @brief Store data to memory + +Store register contents to memory. +Scheme: +@code + REG {A B C D} ==> MEM {A B C D} +@endcode +Pointer can be unaligned. */ +template +inline void v_store(_Tp* ptr, const v_reg<_Tp, n>& a) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + for( int i = 0; i < n; i++ ) + ptr[i] = a.s[i]; +} + +template +inline void v_store(_Tp* ptr, const v_reg<_Tp, n>& a, hal::StoreMode /*mode*/) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + v_store(ptr, a); +} + +/** @brief Store data to memory (lower half) + +Store lower half of register contents to memory. +Scheme: +@code + REG {A B C D} ==> MEM {A B} +@endcode */ +template +inline void v_store_low(_Tp* ptr, const v_reg<_Tp, n>& a) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + for( int i = 0; i < (n/2); i++ ) + ptr[i] = a.s[i]; +} + +/** @brief Store data to memory (higher half) + +Store higher half of register contents to memory. +Scheme: +@code + REG {A B C D} ==> MEM {C D} +@endcode */ +template +inline void v_store_high(_Tp* ptr, const v_reg<_Tp, n>& a) +{ +#if CV_STRONG_ALIGNMENT + CV_Assert(isAligned(ptr)); +#endif + for( int i = 0; i < (n/2); i++ ) + ptr[i] = a.s[i+(n/2)]; +} + +/** @brief Store data to memory (aligned) + +Store register contents to memory. +Scheme: +@code + REG {A B C D} ==> MEM {A B C D} +@endcode +Pointer __should__ be aligned by 16-byte boundary. */ +template +inline void v_store_aligned(_Tp* ptr, const v_reg<_Tp, n>& a) +{ + CV_Assert(isAligned)>(ptr)); + v_store(ptr, a); +} + +template +inline void v_store_aligned_nocache(_Tp* ptr, const v_reg<_Tp, n>& a) +{ + CV_Assert(isAligned)>(ptr)); + v_store(ptr, a); +} + +template +inline void v_store_aligned(_Tp* ptr, const v_reg<_Tp, n>& a, hal::StoreMode /*mode*/) +{ + CV_Assert(isAligned)>(ptr)); + v_store(ptr, a); +} + +/** @brief Combine vector from first elements of two vectors + +Scheme: +@code + {A1 A2 A3 A4} + {B1 B2 B3 B4} +--------------- + {A1 A2 B1 B2} +@endcode +For all types except 64-bit. */ +template +inline v_reg<_Tp, n> v_combine_low(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < (n/2); i++ ) + { + c.s[i] = a.s[i]; + c.s[i+(n/2)] = b.s[i]; + } + return c; +} + +/** @brief Combine vector from last elements of two vectors + +Scheme: +@code + {A1 A2 A3 A4} + {B1 B2 B3 B4} +--------------- + {A3 A4 B3 B4} +@endcode +For all types except 64-bit. */ +template +inline v_reg<_Tp, n> v_combine_high(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < (n/2); i++ ) + { + c.s[i] = a.s[i+(n/2)]; + c.s[i+(n/2)] = b.s[i+(n/2)]; + } + return c; +} + +/** @brief Combine two vectors from lower and higher parts of two other vectors + +@code{.cpp} +low = cv::v_combine_low(a, b); +high = cv::v_combine_high(a, b); +@endcode */ +template +inline void v_recombine(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, + v_reg<_Tp, n>& low, v_reg<_Tp, n>& high) +{ + for( int i = 0; i < (n/2); i++ ) + { + low.s[i] = a.s[i]; + low.s[i+(n/2)] = b.s[i]; + high.s[i] = a.s[i+(n/2)]; + high.s[i+(n/2)] = b.s[i+(n/2)]; + } +} + +/** @brief Vector reverse order + +Reverse the order of the vector +Scheme: +@code + REG {A1 ... An} ==> REG {An ... A1} +@endcode +For all types. */ +template +inline v_reg<_Tp, n> v_reverse(const v_reg<_Tp, n>& a) +{ + v_reg<_Tp, n> c; + for( int i = 0; i < n; i++ ) + c.s[i] = a.s[n-i-1]; + return c; +} + +/** @brief Vector extract + +Scheme: +@code + {A1 A2 A3 A4} + {B1 B2 B3 B4} +======================== +shift = 1 {A2 A3 A4 B1} +shift = 2 {A3 A4 B1 B2} +shift = 3 {A4 B1 B2 B3} +@endcode +Restriction: 0 <= shift < nlanes + +Usage: +@code +v_int32x4 a, b, c; +c = v_extract<2>(a, b); +@endcode +For all types. */ +template +inline v_reg<_Tp, n> v_extract(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> r; + const int shift = n - s; + int i = 0; + for (; i < shift; ++i) + r.s[i] = a.s[i+s]; + for (; i < n; ++i) + r.s[i] = b.s[i-shift]; + return r; +} + +/** @brief Vector extract + +Scheme: +Return the s-th element of v. +Restriction: 0 <= s < nlanes + +Usage: +@code +v_int32x4 a; +int r; +r = v_extract_n<2>(a); +@endcode +For all types. */ +template +inline _Tp v_extract_n(const v_reg<_Tp, n>& v) +{ + CV_DbgAssert(s >= 0 && s < n); + return v.s[s]; +} + +/** @brief Broadcast i-th element of vector + +Scheme: +@code +{ v[0] v[1] v[2] ... v[SZ] } => { v[i], v[i], v[i] ... v[i] } +@endcode +Restriction: 0 <= i < nlanes +Supported types: 32-bit integers and floats (s32/u32/f32) + */ +template +inline v_reg<_Tp, n> v_broadcast_element(const v_reg<_Tp, n>& a) +{ + CV_DbgAssert(i >= 0 && i < n); + return v_reg<_Tp, n>::all(a.s[i]); +} + +/** @brief Round elements + +Rounds each value. Input type is float vector ==> output type is int vector. +@note Only for floating point types. +*/ +template inline v_reg v_round(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = cvRound(a.s[i]); + return c; +} + +/** @overload */ +template inline v_reg v_round(const v_reg& a, const v_reg& b) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = cvRound(a.s[i]); + c.s[i+n] = cvRound(b.s[i]); + } + return c; +} + +/** @brief Floor elements + +Floor each value. Input type is float vector ==> output type is int vector. +@note Only for floating point types. +*/ +template inline v_reg v_floor(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = cvFloor(a.s[i]); + return c; +} + +/** @brief Ceil elements + +Ceil each value. Input type is float vector ==> output type is int vector. +@note Only for floating point types. +*/ +template inline v_reg v_ceil(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = cvCeil(a.s[i]); + return c; +} + +/** @brief Truncate elements + +Truncate each value. Input type is float vector ==> output type is int vector. +@note Only for floating point types. +*/ +template inline v_reg v_trunc(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = (int)(a.s[i]); + return c; +} + +/** @overload */ +template inline v_reg v_round(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = cvRound(a.s[i]); + c.s[i+n] = 0; + } + return c; +} + +/** @overload */ +template inline v_reg v_floor(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = cvFloor(a.s[i]); + c.s[i+n] = 0; + } + return c; +} + +/** @overload */ +template inline v_reg v_ceil(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = cvCeil(a.s[i]); + c.s[i+n] = 0; + } + return c; +} + +/** @overload */ +template inline v_reg v_trunc(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = (int)(a.s[i]); + c.s[i+n] = 0; + } + return c; +} + +/** @brief Convert to float + +Supported input type is cv::v_int32. */ +template inline v_reg v_cvt_f32(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = (float)a.s[i]; + return c; +} + +/** @brief Convert lower half to float + +Supported input type is cv::v_float64. */ +template inline v_reg v_cvt_f32(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = (float)a.s[i]; + c.s[i+n] = 0; + } + return c; +} + +/** @brief Convert to float + +Supported input type is cv::v_float64. */ +template inline v_reg v_cvt_f32(const v_reg& a, const v_reg& b) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + { + c.s[i] = (float)a.s[i]; + c.s[i+n] = (float)b.s[i]; + } + return c; +} + +/** @brief Convert lower half to double + +Supported input type is cv::v_int32. */ +template CV_INLINE v_reg v_cvt_f64(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < (n/2); i++ ) + c.s[i] = (double)a.s[i]; + return c; +} + +/** @brief Convert to double high part of vector + +Supported input type is cv::v_int32. */ +template CV_INLINE v_reg v_cvt_f64_high(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < (n/2); i++ ) + c.s[i] = (double)a.s[i + (n/2)]; + return c; +} + +/** @brief Convert lower half to double + +Supported input type is cv::v_float32. */ +template CV_INLINE v_reg v_cvt_f64(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < (n/2); i++ ) + c.s[i] = (double)a.s[i]; + return c; +} + +/** @brief Convert to double high part of vector + +Supported input type is cv::v_float32. */ +template CV_INLINE v_reg v_cvt_f64_high(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < (n/2); i++ ) + c.s[i] = (double)a.s[i + (n/2)]; + return c; +} + +/** @brief Convert to double + +Supported input type is cv::v_int64. */ +template CV_INLINE v_reg v_cvt_f64(const v_reg& a) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = (double)a.s[i]; + return c; +} + + +template inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_lut(const _Tp* tab, const int* idx) +{ + v_reg<_Tp, simd128_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes; i++) + c.s[i] = tab[idx[i]]; + return c; +} +template inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_lut_pairs(const _Tp* tab, const int* idx) +{ + v_reg<_Tp, simd128_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes; i++) + c.s[i] = tab[idx[i / 2] + i % 2]; + return c; +} +template inline v_reg<_Tp, simd128_width / sizeof(_Tp)> v_lut_quads(const _Tp* tab, const int* idx) +{ + v_reg<_Tp, simd128_width / sizeof(_Tp)> c; + for (int i = 0; i < c.nlanes; i++) + c.s[i] = tab[idx[i / 4] + i % 4]; + return c; +} + +template inline v_reg v_lut(const int* tab, const v_reg& idx) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = tab[idx.s[i]]; + return c; +} + +template inline v_reg v_lut(const unsigned* tab, const v_reg& idx) +{ + v_reg c; + for (int i = 0; i < n; i++) + c.s[i] = tab[idx.s[i]]; + return c; +} + +template inline v_reg v_lut(const float* tab, const v_reg& idx) +{ + v_reg c; + for( int i = 0; i < n; i++ ) + c.s[i] = tab[idx.s[i]]; + return c; +} + +template inline v_reg v_lut(const double* tab, const v_reg& idx) +{ + v_reg c; + for( int i = 0; i < n/2; i++ ) + c.s[i] = tab[idx.s[i]]; + return c; +} + + +template inline void v_lut_deinterleave(const float* tab, const v_reg& idx, + v_reg& x, v_reg& y) +{ + for( int i = 0; i < n; i++ ) + { + int j = idx.s[i]; + x.s[i] = tab[j]; + y.s[i] = tab[j+1]; + } +} + +template inline void v_lut_deinterleave(const double* tab, const v_reg& idx, + v_reg& x, v_reg& y) +{ + for( int i = 0; i < n; i++ ) + { + int j = idx.s[i]; + x.s[i] = tab[j]; + y.s[i] = tab[j+1]; + } +} + +template inline v_reg<_Tp, n> v_interleave_pairs(const v_reg<_Tp, n>& vec) +{ + v_reg<_Tp, n> c; + for (int i = 0; i < n/4; i++) + { + c.s[4*i ] = vec.s[4*i ]; + c.s[4*i+1] = vec.s[4*i+2]; + c.s[4*i+2] = vec.s[4*i+1]; + c.s[4*i+3] = vec.s[4*i+3]; + } + return c; +} + +template inline v_reg<_Tp, n> v_interleave_quads(const v_reg<_Tp, n>& vec) +{ + v_reg<_Tp, n> c; + for (int i = 0; i < n/8; i++) + { + c.s[8*i ] = vec.s[8*i ]; + c.s[8*i+1] = vec.s[8*i+4]; + c.s[8*i+2] = vec.s[8*i+1]; + c.s[8*i+3] = vec.s[8*i+5]; + c.s[8*i+4] = vec.s[8*i+2]; + c.s[8*i+5] = vec.s[8*i+6]; + c.s[8*i+6] = vec.s[8*i+3]; + c.s[8*i+7] = vec.s[8*i+7]; + } + return c; +} + +template inline v_reg<_Tp, n> v_pack_triplets(const v_reg<_Tp, n>& vec) +{ + v_reg<_Tp, n> c; + for (int i = 0; i < n/4; i++) + { + c.s[3*i ] = vec.s[4*i ]; + c.s[3*i+1] = vec.s[4*i+1]; + c.s[3*i+2] = vec.s[4*i+2]; + } + return c; +} + +/** @brief Transpose 4x4 matrix + +Scheme: +@code +a0 {A1 A2 A3 A4} +a1 {B1 B2 B3 B4} +a2 {C1 C2 C3 C4} +a3 {D1 D2 D3 D4} +=============== +b0 {A1 B1 C1 D1} +b1 {A2 B2 C2 D2} +b2 {A3 B3 C3 D3} +b3 {A4 B4 C4 D4} +@endcode +*/ +template +inline void v_transpose4x4( v_reg<_Tp, n>& a0, const v_reg<_Tp, n>& a1, + const v_reg<_Tp, n>& a2, const v_reg<_Tp, n>& a3, + v_reg<_Tp, n>& b0, v_reg<_Tp, n>& b1, + v_reg<_Tp, n>& b2, v_reg<_Tp, n>& b3 ) +{ + for (int i = 0; i < n / 4; i++) + { + b0.s[0 + i*4] = a0.s[0 + i*4]; b0.s[1 + i*4] = a1.s[0 + i*4]; + b0.s[2 + i*4] = a2.s[0 + i*4]; b0.s[3 + i*4] = a3.s[0 + i*4]; + b1.s[0 + i*4] = a0.s[1 + i*4]; b1.s[1 + i*4] = a1.s[1 + i*4]; + b1.s[2 + i*4] = a2.s[1 + i*4]; b1.s[3 + i*4] = a3.s[1 + i*4]; + b2.s[0 + i*4] = a0.s[2 + i*4]; b2.s[1 + i*4] = a1.s[2 + i*4]; + b2.s[2 + i*4] = a2.s[2 + i*4]; b2.s[3 + i*4] = a3.s[2 + i*4]; + b3.s[0 + i*4] = a0.s[3 + i*4]; b3.s[1 + i*4] = a1.s[3 + i*4]; + b3.s[2 + i*4] = a2.s[3 + i*4]; b3.s[3 + i*4] = a3.s[3 + i*4]; + } +} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_INIT_ZERO(_Tpvec, prefix, suffix) \ +inline _Tpvec prefix##_setzero_##suffix() { return _Tpvec::zero(); } + +//! @name Init with zero +//! @{ +//! @brief Create new vector with zero elements +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint8x16, v, u8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int8x16, v, s8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint16x8, v, u16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int16x8, v, s16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint32x4, v, u32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int32x4, v, s32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float32x4, v, f32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float64x2, v, f64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint64x2, v, u64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x2, v, s64) + +#if CV_SIMD256 +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint8x32, v256, u8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int8x32, v256, s8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint16x16, v256, u16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int16x16, v256, s16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint32x8, v256, u32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int32x8, v256, s32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float32x8, v256, f32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float64x4, v256, f64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint64x4, v256, u64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x4, v256, s64) +#endif + +#if CV_SIMD512 +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint8x64, v512, u8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int8x64, v512, s8) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint16x32, v512, u16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int16x32, v512, s16) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint32x16, v512, u32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int32x16, v512, s32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float32x16, v512, f32) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_float64x8, v512, f64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_uint64x8, v512, u64) +OPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x8, v512, s64) +#endif +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_INIT_VAL(_Tpvec, _Tp, prefix, suffix) \ +inline _Tpvec prefix##_setall_##suffix(_Tp val) { return _Tpvec::all(val); } + +//! @name Init with value +//! @{ +//! @brief Create new vector with elements set to a specific value +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint8x16, uchar, v, u8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int8x16, schar, v, s8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint16x8, ushort, v, u16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int16x8, short, v, s16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint32x4, unsigned, v, u32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int32x4, int, v, s32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float32x4, float, v, f32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float64x2, double, v, f64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint64x2, uint64, v, u64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int64x2, int64, v, s64) + +#if CV_SIMD256 +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint8x32, uchar, v256, u8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int8x32, schar, v256, s8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint16x16, ushort, v256, u16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int16x16, short, v256, s16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint32x8, unsigned, v256, u32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int32x8, int, v256, s32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float32x8, float, v256, f32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float64x4, double, v256, f64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint64x4, uint64, v256, u64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int64x4, int64, v256, s64) +#endif + +#if CV_SIMD512 +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint8x64, uchar, v512, u8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int8x64, schar, v512, s8) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint16x32, ushort, v512, u16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int16x32, short, v512, s16) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint32x16, unsigned, v512, u32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int32x16, int, v512, s32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float32x16, float, v512, f32) +OPENCV_HAL_IMPL_C_INIT_VAL(v_float64x8, double, v512, f64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_uint64x8, uint64, v512, u64) +OPENCV_HAL_IMPL_C_INIT_VAL(v_int64x8, int64, v512, s64) +#endif +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_REINTERPRET(_Tp, suffix) \ +template inline v_reg<_Tp, n0*sizeof(_Tp0)/sizeof(_Tp)> \ + v_reinterpret_as_##suffix(const v_reg<_Tp0, n0>& a) \ +{ return a.template reinterpret_as<_Tp, n0*sizeof(_Tp0)/sizeof(_Tp)>(); } + +//! @name Reinterpret +//! @{ +//! @brief Convert vector to different type without modifying underlying data. +OPENCV_HAL_IMPL_C_REINTERPRET(uchar, u8) +OPENCV_HAL_IMPL_C_REINTERPRET(schar, s8) +OPENCV_HAL_IMPL_C_REINTERPRET(ushort, u16) +OPENCV_HAL_IMPL_C_REINTERPRET(short, s16) +OPENCV_HAL_IMPL_C_REINTERPRET(unsigned, u32) +OPENCV_HAL_IMPL_C_REINTERPRET(int, s32) +OPENCV_HAL_IMPL_C_REINTERPRET(float, f32) +OPENCV_HAL_IMPL_C_REINTERPRET(double, f64) +OPENCV_HAL_IMPL_C_REINTERPRET(uint64, u64) +OPENCV_HAL_IMPL_C_REINTERPRET(int64, s64) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_SHIFTL(_Tp) \ +template inline v_reg<_Tp, n> v_shl(const v_reg<_Tp, n>& a) \ +{ return a << shift; } + +//! @name Left shift +//! @{ +//! @brief Shift left +OPENCV_HAL_IMPL_C_SHIFTL(ushort) +OPENCV_HAL_IMPL_C_SHIFTL(short) +OPENCV_HAL_IMPL_C_SHIFTL(unsigned) +OPENCV_HAL_IMPL_C_SHIFTL(int) +OPENCV_HAL_IMPL_C_SHIFTL(uint64) +OPENCV_HAL_IMPL_C_SHIFTL(int64) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_SHIFTR(_Tp) \ +template inline v_reg<_Tp, n> v_shr(const v_reg<_Tp, n>& a) \ +{ return a >> shift; } + +//! @name Right shift +//! @{ +//! @brief Shift right +OPENCV_HAL_IMPL_C_SHIFTR(ushort) +OPENCV_HAL_IMPL_C_SHIFTR(short) +OPENCV_HAL_IMPL_C_SHIFTR(unsigned) +OPENCV_HAL_IMPL_C_SHIFTR(int) +OPENCV_HAL_IMPL_C_SHIFTR(uint64) +OPENCV_HAL_IMPL_C_SHIFTR(int64) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_RSHIFTR(_Tp) \ +template inline v_reg<_Tp, n> v_rshr(const v_reg<_Tp, n>& a) \ +{ \ + v_reg<_Tp, n> c; \ + for( int i = 0; i < n; i++ ) \ + c.s[i] = (_Tp)((a.s[i] + ((_Tp)1 << (shift - 1))) >> shift); \ + return c; \ +} + +//! @name Rounding shift +//! @{ +//! @brief Rounding shift right +OPENCV_HAL_IMPL_C_RSHIFTR(ushort) +OPENCV_HAL_IMPL_C_RSHIFTR(short) +OPENCV_HAL_IMPL_C_RSHIFTR(unsigned) +OPENCV_HAL_IMPL_C_RSHIFTR(int) +OPENCV_HAL_IMPL_C_RSHIFTR(uint64) +OPENCV_HAL_IMPL_C_RSHIFTR(int64) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_PACK(_Tp, _Tpn, pack_suffix, cast) \ +template inline v_reg<_Tpn, 2*n> v_##pack_suffix(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tpn, 2*n> c; \ + for( int i = 0; i < n; i++ ) \ + { \ + c.s[i] = cast<_Tpn>(a.s[i]); \ + c.s[i+n] = cast<_Tpn>(b.s[i]); \ + } \ + return c; \ +} + +//! @name Pack +//! @{ +//! @brief Pack values from two vectors to one +//! +//! Return vector type have twice more elements than input vector types. Variant with _u_ suffix also +//! converts to corresponding unsigned type. +//! +//! - pack: for 16-, 32- and 64-bit integer input types +//! - pack_u: for 16- and 32-bit signed integer input types +//! +//! @note All variants except 64-bit use saturation. +OPENCV_HAL_IMPL_C_PACK(ushort, uchar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK(short, schar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK(unsigned, ushort, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK(int, short, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK(uint64, unsigned, pack, static_cast) +OPENCV_HAL_IMPL_C_PACK(int64, int, pack, static_cast) +OPENCV_HAL_IMPL_C_PACK(short, uchar, pack_u, saturate_cast) +OPENCV_HAL_IMPL_C_PACK(int, ushort, pack_u, saturate_cast) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_RSHR_PACK(_Tp, _Tpn, pack_suffix, cast) \ +template inline v_reg<_Tpn, 2*n> v_rshr_##pack_suffix(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) \ +{ \ + v_reg<_Tpn, 2*n> c; \ + for( int i = 0; i < n; i++ ) \ + { \ + c.s[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (shift - 1))) >> shift); \ + c.s[i+n] = cast<_Tpn>((b.s[i] + ((_Tp)1 << (shift - 1))) >> shift); \ + } \ + return c; \ +} + +//! @name Pack with rounding shift +//! @{ +//! @brief Pack values from two vectors to one with rounding shift +//! +//! Values from the input vectors will be shifted right by _n_ bits with rounding, converted to narrower +//! type and returned in the result vector. Variant with _u_ suffix converts to unsigned type. +//! +//! - pack: for 16-, 32- and 64-bit integer input types +//! - pack_u: for 16- and 32-bit signed integer input types +//! +//! @note All variants except 64-bit use saturation. +OPENCV_HAL_IMPL_C_RSHR_PACK(ushort, uchar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(short, schar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(unsigned, ushort, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(int, short, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(uint64, unsigned, pack, static_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(int64, int, pack, static_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(short, uchar, pack_u, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK(int, ushort, pack_u, saturate_cast) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_PACK_STORE(_Tp, _Tpn, pack_suffix, cast) \ +template inline void v_##pack_suffix##_store(_Tpn* ptr, const v_reg<_Tp, n>& a) \ +{ \ + for( int i = 0; i < n; i++ ) \ + ptr[i] = cast<_Tpn>(a.s[i]); \ +} + +//! @name Pack and store +//! @{ +//! @brief Store values from the input vector into memory with pack +//! +//! Values will be stored into memory with conversion to narrower type. +//! Variant with _u_ suffix converts to corresponding unsigned type. +//! +//! - pack: for 16-, 32- and 64-bit integer input types +//! - pack_u: for 16- and 32-bit signed integer input types +//! +//! @note All variants except 64-bit use saturation. +OPENCV_HAL_IMPL_C_PACK_STORE(ushort, uchar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(short, schar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(unsigned, ushort, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(int, short, pack, saturate_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(uint64, unsigned, pack, static_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(int64, int, pack, static_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(short, uchar, pack_u, saturate_cast) +OPENCV_HAL_IMPL_C_PACK_STORE(int, ushort, pack_u, saturate_cast) +//! @} + +//! @brief Helper macro +//! @ingroup core_hal_intrin_impl +#define OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(_Tp, _Tpn, pack_suffix, cast) \ +template inline void v_rshr_##pack_suffix##_store(_Tpn* ptr, const v_reg<_Tp, n>& a) \ +{ \ + for( int i = 0; i < n; i++ ) \ + ptr[i] = cast<_Tpn>((a.s[i] + ((_Tp)1 << (shift - 1))) >> shift); \ +} + +//! @name Pack and store with rounding shift +//! @{ +//! @brief Store values from the input vector into memory with pack +//! +//! Values will be shifted _n_ bits right with rounding, converted to narrower type and stored into +//! memory. Variant with _u_ suffix converts to unsigned type. +//! +//! - pack: for 16-, 32- and 64-bit integer input types +//! - pack_u: for 16- and 32-bit signed integer input types +//! +//! @note All variants except 64-bit use saturation. +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(ushort, uchar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(short, schar, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(unsigned, ushort, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(int, short, pack, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(uint64, unsigned, pack, static_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(int64, int, pack, static_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(short, uchar, pack_u, saturate_cast) +OPENCV_HAL_IMPL_C_RSHR_PACK_STORE(int, ushort, pack_u, saturate_cast) +//! @} + +//! @cond IGNORED +template +inline void _pack_b(_Tpm* mptr, const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + for (int i = 0; i < n; ++i) + { + mptr[i] = (_Tpm)a.s[i]; + mptr[i + n] = (_Tpm)b.s[i]; + } +} +//! @endcond + +//! @name Pack boolean values +//! @{ +//! @brief Pack boolean values from multiple vectors to one unsigned 8-bit integer vector +//! +//! @note Must provide valid boolean values to guarantee same result for all architectures. + +/** @brief +//! For 16-bit boolean values + +Scheme: +@code +a {0xFFFF 0 0 0xFFFF 0 0xFFFF 0xFFFF 0} +b {0xFFFF 0 0xFFFF 0 0 0xFFFF 0 0xFFFF} +=============== +{ + 0xFF 0 0 0xFF 0 0xFF 0xFF 0 + 0xFF 0 0xFF 0 0 0xFF 0 0xFF +} +@endcode */ + +template inline v_reg v_pack_b(const v_reg& a, const v_reg& b) +{ + v_reg mask; + _pack_b(mask.s, a, b); + return mask; +} + +/** @overload +For 32-bit boolean values + +Scheme: +@code +a {0xFFFF.. 0 0 0xFFFF..} +b {0 0xFFFF.. 0xFFFF.. 0} +c {0xFFFF.. 0 0xFFFF.. 0} +d {0 0xFFFF.. 0 0xFFFF..} +=============== +{ + 0xFF 0 0 0xFF 0 0xFF 0xFF 0 + 0xFF 0 0xFF 0 0 0xFF 0 0xFF +} +@endcode */ + +template inline v_reg v_pack_b(const v_reg& a, const v_reg& b, + const v_reg& c, const v_reg& d) +{ + v_reg mask; + _pack_b(mask.s, a, b); + _pack_b(mask.s + 2*n, c, d); + return mask; +} + +/** @overload +For 64-bit boolean values + +Scheme: +@code +a {0xFFFF.. 0} +b {0 0xFFFF..} +c {0xFFFF.. 0} +d {0 0xFFFF..} + +e {0xFFFF.. 0} +f {0xFFFF.. 0} +g {0 0xFFFF..} +h {0 0xFFFF..} +=============== +{ + 0xFF 0 0 0xFF 0xFF 0 0 0xFF + 0xFF 0 0xFF 0 0 0xFF 0 0xFF +} +@endcode */ +template inline v_reg v_pack_b(const v_reg& a, const v_reg& b, + const v_reg& c, const v_reg& d, + const v_reg& e, const v_reg& f, + const v_reg& g, const v_reg& h) +{ + v_reg mask; + _pack_b(mask.s, a, b); + _pack_b(mask.s + 2*n, c, d); + _pack_b(mask.s + 4*n, e, f); + _pack_b(mask.s + 6*n, g, h); + return mask; +} +//! @} + +/** @brief Matrix multiplication + +Scheme: +@code +{A0 A1 A2 A3} |V0| +{B0 B1 B2 B3} |V1| +{C0 C1 C2 C3} |V2| +{D0 D1 D2 D3} x |V3| +==================== +{R0 R1 R2 R3}, where: +R0 = A0V0 + B0V1 + C0V2 + D0V3, +R1 = A1V0 + B1V1 + C1V2 + D1V3 +... +@endcode +*/ +template +inline v_reg v_matmul(const v_reg& v, + const v_reg& a, const v_reg& b, + const v_reg& c, const v_reg& d) +{ + v_reg res; + for (int i = 0; i < n / 4; i++) + { + res.s[0 + i*4] = v.s[0 + i*4] * a.s[0 + i*4] + v.s[1 + i*4] * b.s[0 + i*4] + v.s[2 + i*4] * c.s[0 + i*4] + v.s[3 + i*4] * d.s[0 + i*4]; + res.s[1 + i*4] = v.s[0 + i*4] * a.s[1 + i*4] + v.s[1 + i*4] * b.s[1 + i*4] + v.s[2 + i*4] * c.s[1 + i*4] + v.s[3 + i*4] * d.s[1 + i*4]; + res.s[2 + i*4] = v.s[0 + i*4] * a.s[2 + i*4] + v.s[1 + i*4] * b.s[2 + i*4] + v.s[2 + i*4] * c.s[2 + i*4] + v.s[3 + i*4] * d.s[2 + i*4]; + res.s[3 + i*4] = v.s[0 + i*4] * a.s[3 + i*4] + v.s[1 + i*4] * b.s[3 + i*4] + v.s[2 + i*4] * c.s[3 + i*4] + v.s[3 + i*4] * d.s[3 + i*4]; + } + return res; +} + +/** @brief Matrix multiplication and add + +Scheme: +@code +{A0 A1 A2 A3} |V0| |D0| +{B0 B1 B2 B3} |V1| |D1| +{C0 C1 C2 C3} x |V2| + |D2| +==================== |D3| +{R0 R1 R2 R3}, where: +R0 = A0V0 + B0V1 + C0V2 + D0, +R1 = A1V0 + B1V1 + C1V2 + D1 +... +@endcode +*/ +template +inline v_reg v_matmuladd(const v_reg& v, + const v_reg& a, const v_reg& b, + const v_reg& c, const v_reg& d) +{ + v_reg res; + for (int i = 0; i < n / 4; i++) + { + res.s[0 + i * 4] = v.s[0 + i * 4] * a.s[0 + i * 4] + v.s[1 + i * 4] * b.s[0 + i * 4] + v.s[2 + i * 4] * c.s[0 + i * 4] + d.s[0 + i * 4]; + res.s[1 + i * 4] = v.s[0 + i * 4] * a.s[1 + i * 4] + v.s[1 + i * 4] * b.s[1 + i * 4] + v.s[2 + i * 4] * c.s[1 + i * 4] + d.s[1 + i * 4]; + res.s[2 + i * 4] = v.s[0 + i * 4] * a.s[2 + i * 4] + v.s[1 + i * 4] * b.s[2 + i * 4] + v.s[2 + i * 4] * c.s[2 + i * 4] + d.s[2 + i * 4]; + res.s[3 + i * 4] = v.s[0 + i * 4] * a.s[3 + i * 4] + v.s[1 + i * 4] * b.s[3 + i * 4] + v.s[2 + i * 4] * c.s[3 + i * 4] + d.s[3 + i * 4]; + } + return res; +} + + +template inline v_reg v_dotprod_expand(const v_reg& a, const v_reg& b) +{ return v_fma(v_cvt_f64(a), v_cvt_f64(b), v_cvt_f64_high(a) * v_cvt_f64_high(b)); } +template inline v_reg v_dotprod_expand(const v_reg& a, const v_reg& b, + const v_reg& c) +{ return v_fma(v_cvt_f64(a), v_cvt_f64(b), v_fma(v_cvt_f64_high(a), v_cvt_f64_high(b), c)); } + +template inline v_reg v_dotprod_expand_fast(const v_reg& a, const v_reg& b) +{ return v_dotprod_expand(a, b); } +template inline v_reg v_dotprod_expand_fast(const v_reg& a, const v_reg& b, + const v_reg& c) +{ return v_dotprod_expand(a, b, c); } + +////// FP16 support /////// + +inline v_reg +v_load_expand(const float16_t* ptr) +{ + v_reg v; + for( int i = 0; i < v.nlanes; i++ ) + { + v.s[i] = ptr[i]; + } + return v; +} +#if CV_SIMD256 +inline v_reg +v256_load_expand(const float16_t* ptr) +{ + v_reg v; + for (int i = 0; i < v.nlanes; i++) + { + v.s[i] = ptr[i]; + } + return v; +} +#endif +#if CV_SIMD512 +inline v_reg +v512_load_expand(const float16_t* ptr) +{ + v_reg v; + for (int i = 0; i < v.nlanes; i++) + { + v.s[i] = ptr[i]; + } + return v; +} +#endif + +template inline void +v_pack_store(float16_t* ptr, const v_reg& v) +{ + for( int i = 0; i < v.nlanes; i++ ) + { + ptr[i] = float16_t(v.s[i]); + } +} + +inline void v_cleanup() {} +#if CV_SIMD256 +inline void v256_cleanup() {} +#endif +#if CV_SIMD512 +inline void v512_cleanup() {} +#endif + +//! @} + +#ifndef CV_DOXYGEN +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END +#endif +} + +#if !defined(CV_DOXYGEN) +#undef CV_SIMD256 +#undef CV_SIMD512 +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_forward.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_forward.hpp new file mode 100644 index 00000000..979f15a2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_forward.hpp @@ -0,0 +1,191 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef CV__SIMD_FORWARD +#error "Need to pre-define forward width" +#endif + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +/** Types **/ +#if CV__SIMD_FORWARD == 1024 +// [todo] 1024 +#error "1024-long ops not implemented yet" +#elif CV__SIMD_FORWARD == 512 +// 512 +#define __CV_VX(fun) v512_##fun +#define __CV_V_UINT8 v_uint8x64 +#define __CV_V_INT8 v_int8x64 +#define __CV_V_UINT16 v_uint16x32 +#define __CV_V_INT16 v_int16x32 +#define __CV_V_UINT32 v_uint32x16 +#define __CV_V_INT32 v_int32x16 +#define __CV_V_UINT64 v_uint64x8 +#define __CV_V_INT64 v_int64x8 +#define __CV_V_FLOAT32 v_float32x16 +#define __CV_V_FLOAT64 v_float64x8 +struct v_uint8x64; +struct v_int8x64; +struct v_uint16x32; +struct v_int16x32; +struct v_uint32x16; +struct v_int32x16; +struct v_uint64x8; +struct v_int64x8; +struct v_float32x16; +struct v_float64x8; +#elif CV__SIMD_FORWARD == 256 +// 256 +#define __CV_VX(fun) v256_##fun +#define __CV_V_UINT8 v_uint8x32 +#define __CV_V_INT8 v_int8x32 +#define __CV_V_UINT16 v_uint16x16 +#define __CV_V_INT16 v_int16x16 +#define __CV_V_UINT32 v_uint32x8 +#define __CV_V_INT32 v_int32x8 +#define __CV_V_UINT64 v_uint64x4 +#define __CV_V_INT64 v_int64x4 +#define __CV_V_FLOAT32 v_float32x8 +#define __CV_V_FLOAT64 v_float64x4 +struct v_uint8x32; +struct v_int8x32; +struct v_uint16x16; +struct v_int16x16; +struct v_uint32x8; +struct v_int32x8; +struct v_uint64x4; +struct v_int64x4; +struct v_float32x8; +struct v_float64x4; +#else +// 128 +#define __CV_VX(fun) v_##fun +#define __CV_V_UINT8 v_uint8x16 +#define __CV_V_INT8 v_int8x16 +#define __CV_V_UINT16 v_uint16x8 +#define __CV_V_INT16 v_int16x8 +#define __CV_V_UINT32 v_uint32x4 +#define __CV_V_INT32 v_int32x4 +#define __CV_V_UINT64 v_uint64x2 +#define __CV_V_INT64 v_int64x2 +#define __CV_V_FLOAT32 v_float32x4 +#define __CV_V_FLOAT64 v_float64x2 +struct v_uint8x16; +struct v_int8x16; +struct v_uint16x8; +struct v_int16x8; +struct v_uint32x4; +struct v_int32x4; +struct v_uint64x2; +struct v_int64x2; +struct v_float32x4; +struct v_float64x2; +#endif + +/** Value reordering **/ + +// Expansion +void v_expand(const __CV_V_UINT8&, __CV_V_UINT16&, __CV_V_UINT16&); +void v_expand(const __CV_V_INT8&, __CV_V_INT16&, __CV_V_INT16&); +void v_expand(const __CV_V_UINT16&, __CV_V_UINT32&, __CV_V_UINT32&); +void v_expand(const __CV_V_INT16&, __CV_V_INT32&, __CV_V_INT32&); +void v_expand(const __CV_V_UINT32&, __CV_V_UINT64&, __CV_V_UINT64&); +void v_expand(const __CV_V_INT32&, __CV_V_INT64&, __CV_V_INT64&); +// Low Expansion +__CV_V_UINT16 v_expand_low(const __CV_V_UINT8&); +__CV_V_INT16 v_expand_low(const __CV_V_INT8&); +__CV_V_UINT32 v_expand_low(const __CV_V_UINT16&); +__CV_V_INT32 v_expand_low(const __CV_V_INT16&); +__CV_V_UINT64 v_expand_low(const __CV_V_UINT32&); +__CV_V_INT64 v_expand_low(const __CV_V_INT32&); +// High Expansion +__CV_V_UINT16 v_expand_high(const __CV_V_UINT8&); +__CV_V_INT16 v_expand_high(const __CV_V_INT8&); +__CV_V_UINT32 v_expand_high(const __CV_V_UINT16&); +__CV_V_INT32 v_expand_high(const __CV_V_INT16&); +__CV_V_UINT64 v_expand_high(const __CV_V_UINT32&); +__CV_V_INT64 v_expand_high(const __CV_V_INT32&); +// Load & Low Expansion +__CV_V_UINT16 __CV_VX(load_expand)(const uchar*); +__CV_V_INT16 __CV_VX(load_expand)(const schar*); +__CV_V_UINT32 __CV_VX(load_expand)(const ushort*); +__CV_V_INT32 __CV_VX(load_expand)(const short*); +__CV_V_UINT64 __CV_VX(load_expand)(const uint*); +__CV_V_INT64 __CV_VX(load_expand)(const int*); +// Load lower 8-bit and expand into 32-bit +__CV_V_UINT32 __CV_VX(load_expand_q)(const uchar*); +__CV_V_INT32 __CV_VX(load_expand_q)(const schar*); + +// Saturating Pack +__CV_V_UINT8 v_pack(const __CV_V_UINT16&, const __CV_V_UINT16&); +__CV_V_INT8 v_pack(const __CV_V_INT16&, const __CV_V_INT16&); +__CV_V_UINT16 v_pack(const __CV_V_UINT32&, const __CV_V_UINT32&); +__CV_V_INT16 v_pack(const __CV_V_INT32&, const __CV_V_INT32&); +// Non-saturating Pack +__CV_V_UINT32 v_pack(const __CV_V_UINT64&, const __CV_V_UINT64&); +__CV_V_INT32 v_pack(const __CV_V_INT64&, const __CV_V_INT64&); +// Pack signed integers with unsigned saturation +__CV_V_UINT8 v_pack_u(const __CV_V_INT16&, const __CV_V_INT16&); +__CV_V_UINT16 v_pack_u(const __CV_V_INT32&, const __CV_V_INT32&); + +/** Arithmetic, bitwise and comparison operations **/ + +// Non-saturating multiply +#if CV_VSX +template +Tvec v_mul_wrap(const Tvec& a, const Tvec& b); +#else +__CV_V_UINT8 v_mul_wrap(const __CV_V_UINT8&, const __CV_V_UINT8&); +__CV_V_INT8 v_mul_wrap(const __CV_V_INT8&, const __CV_V_INT8&); +__CV_V_UINT16 v_mul_wrap(const __CV_V_UINT16&, const __CV_V_UINT16&); +__CV_V_INT16 v_mul_wrap(const __CV_V_INT16&, const __CV_V_INT16&); +#endif + +// Multiply and expand +#if CV_VSX +template +void v_mul_expand(const Tvec& a, const Tvec& b, Twvec& c, Twvec& d); +#else +void v_mul_expand(const __CV_V_UINT8&, const __CV_V_UINT8&, __CV_V_UINT16&, __CV_V_UINT16&); +void v_mul_expand(const __CV_V_INT8&, const __CV_V_INT8&, __CV_V_INT16&, __CV_V_INT16&); +void v_mul_expand(const __CV_V_UINT16&, const __CV_V_UINT16&, __CV_V_UINT32&, __CV_V_UINT32&); +void v_mul_expand(const __CV_V_INT16&, const __CV_V_INT16&, __CV_V_INT32&, __CV_V_INT32&); +void v_mul_expand(const __CV_V_UINT32&, const __CV_V_UINT32&, __CV_V_UINT64&, __CV_V_UINT64&); +void v_mul_expand(const __CV_V_INT32&, const __CV_V_INT32&, __CV_V_INT64&, __CV_V_INT64&); +#endif + +// Conversions +__CV_V_FLOAT32 v_cvt_f32(const __CV_V_INT32& a); +__CV_V_FLOAT32 v_cvt_f32(const __CV_V_FLOAT64& a); +__CV_V_FLOAT32 v_cvt_f32(const __CV_V_FLOAT64& a, const __CV_V_FLOAT64& b); +__CV_V_FLOAT64 v_cvt_f64(const __CV_V_INT32& a); +__CV_V_FLOAT64 v_cvt_f64_high(const __CV_V_INT32& a); +__CV_V_FLOAT64 v_cvt_f64(const __CV_V_FLOAT32& a); +__CV_V_FLOAT64 v_cvt_f64_high(const __CV_V_FLOAT32& a); +__CV_V_FLOAT64 v_cvt_f64(const __CV_V_INT64& a); + +/** Cleanup **/ +#undef CV__SIMD_FORWARD +#undef __CV_VX +#undef __CV_V_UINT8 +#undef __CV_V_INT8 +#undef __CV_V_UINT16 +#undef __CV_V_INT16 +#undef __CV_V_UINT32 +#undef __CV_V_INT32 +#undef __CV_V_UINT64 +#undef __CV_V_INT64 +#undef __CV_V_FLOAT32 +#undef __CV_V_FLOAT64 + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} // cv:: \ No newline at end of file diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_msa.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_msa.hpp new file mode 100644 index 00000000..a1fbb093 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_msa.hpp @@ -0,0 +1,1887 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_HAL_INTRIN_MSA_HPP +#define OPENCV_HAL_INTRIN_MSA_HPP + +#include +#include "opencv2/core/utility.hpp" + +namespace cv +{ + +//! @cond IGNORED +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +#define CV_SIMD128 1 + +//MSA implements 128-bit wide vector registers shared with the 64-bit wide floating-point unit registers. +//MSA and FPU can not be both present, unless the FPU has 64-bit floating-point registers. +#define CV_SIMD128_64F 1 + +struct v_uint8x16 +{ + typedef uchar lane_type; + enum { nlanes = 16 }; + + v_uint8x16() {} + explicit v_uint8x16(v16u8 v) : val(v) {} + v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) + { + uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = msa_ld1q_u8(v); + } + + uchar get0() const + { + return msa_getq_lane_u8(val, 0); + } + + v16u8 val; +}; + +struct v_int8x16 +{ + typedef schar lane_type; + enum { nlanes = 16 }; + + v_int8x16() {} + explicit v_int8x16(v16i8 v) : val(v) {} + v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) + { + schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = msa_ld1q_s8(v); + } + + schar get0() const + { + return msa_getq_lane_s8(val, 0); + } + + v16i8 val; +}; + +struct v_uint16x8 +{ + typedef ushort lane_type; + enum { nlanes = 8 }; + + v_uint16x8() {} + explicit v_uint16x8(v8u16 v) : val(v) {} + v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) + { + ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = msa_ld1q_u16(v); + } + + ushort get0() const + { + return msa_getq_lane_u16(val, 0); + } + + v8u16 val; +}; + +struct v_int16x8 +{ + typedef short lane_type; + enum { nlanes = 8 }; + + v_int16x8() {} + explicit v_int16x8(v8i16 v) : val(v) {} + v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) + { + short v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = msa_ld1q_s16(v); + } + + short get0() const + { + return msa_getq_lane_s16(val, 0); + } + + v8i16 val; +}; + +struct v_uint32x4 +{ + typedef unsigned int lane_type; + enum { nlanes = 4 }; + + v_uint32x4() {} + explicit v_uint32x4(v4u32 v) : val(v) {} + v_uint32x4(unsigned int v0, unsigned int v1, unsigned int v2, unsigned int v3) + { + unsigned int v[] = {v0, v1, v2, v3}; + val = msa_ld1q_u32(v); + } + + unsigned int get0() const + { + return msa_getq_lane_u32(val, 0); + } + + v4u32 val; +}; + +struct v_int32x4 +{ + typedef int lane_type; + enum { nlanes = 4 }; + + v_int32x4() {} + explicit v_int32x4(v4i32 v) : val(v) {} + v_int32x4(int v0, int v1, int v2, int v3) + { + int v[] = {v0, v1, v2, v3}; + val = msa_ld1q_s32(v); + } + + int get0() const + { + return msa_getq_lane_s32(val, 0); + } + + v4i32 val; +}; + +struct v_float32x4 +{ + typedef float lane_type; + enum { nlanes = 4 }; + + v_float32x4() {} + explicit v_float32x4(v4f32 v) : val(v) {} + v_float32x4(float v0, float v1, float v2, float v3) + { + float v[] = {v0, v1, v2, v3}; + val = msa_ld1q_f32(v); + } + + float get0() const + { + return msa_getq_lane_f32(val, 0); + } + + v4f32 val; +}; + +struct v_uint64x2 +{ + typedef uint64 lane_type; + enum { nlanes = 2 }; + + v_uint64x2() {} + explicit v_uint64x2(v2u64 v) : val(v) {} + v_uint64x2(uint64 v0, uint64 v1) + { + uint64 v[] = {v0, v1}; + val = msa_ld1q_u64(v); + } + + uint64 get0() const + { + return msa_getq_lane_u64(val, 0); + } + + v2u64 val; +}; + +struct v_int64x2 +{ + typedef int64 lane_type; + enum { nlanes = 2 }; + + v_int64x2() {} + explicit v_int64x2(v2i64 v) : val(v) {} + v_int64x2(int64 v0, int64 v1) + { + int64 v[] = {v0, v1}; + val = msa_ld1q_s64(v); + } + + int64 get0() const + { + return msa_getq_lane_s64(val, 0); + } + + v2i64 val; +}; + +struct v_float64x2 +{ + typedef double lane_type; + enum { nlanes = 2 }; + + v_float64x2() {} + explicit v_float64x2(v2f64 v) : val(v) {} + v_float64x2(double v0, double v1) + { + double v[] = {v0, v1}; + val = msa_ld1q_f64(v); + } + + double get0() const + { + return msa_getq_lane_f64(val, 0); + } + + v2f64 val; +}; + +#define OPENCV_HAL_IMPL_MSA_INIT(_Tpv, _Tp, suffix) \ +inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(msa_dupq_n_##suffix((_Tp)0)); } \ +inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(msa_dupq_n_##suffix(v)); } \ +inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(MSA_TPV_REINTERPRET(v16u8, v.val)); } \ +inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(MSA_TPV_REINTERPRET(v16i8, v.val)); } \ +inline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(MSA_TPV_REINTERPRET(v8u16, v.val)); } \ +inline v_int16x8 v_reinterpret_as_s16(const v_##_Tpv& v) { return v_int16x8(MSA_TPV_REINTERPRET(v8i16, v.val)); } \ +inline v_uint32x4 v_reinterpret_as_u32(const v_##_Tpv& v) { return v_uint32x4(MSA_TPV_REINTERPRET(v4u32, v.val)); } \ +inline v_int32x4 v_reinterpret_as_s32(const v_##_Tpv& v) { return v_int32x4(MSA_TPV_REINTERPRET(v4i32, v.val)); } \ +inline v_uint64x2 v_reinterpret_as_u64(const v_##_Tpv& v) { return v_uint64x2(MSA_TPV_REINTERPRET(v2u64, v.val)); } \ +inline v_int64x2 v_reinterpret_as_s64(const v_##_Tpv& v) { return v_int64x2(MSA_TPV_REINTERPRET(v2i64, v.val)); } \ +inline v_float32x4 v_reinterpret_as_f32(const v_##_Tpv& v) { return v_float32x4(MSA_TPV_REINTERPRET(v4f32, v.val)); } \ +inline v_float64x2 v_reinterpret_as_f64(const v_##_Tpv& v) { return v_float64x2(MSA_TPV_REINTERPRET(v2f64, v.val)); } + +OPENCV_HAL_IMPL_MSA_INIT(uint8x16, uchar, u8) +OPENCV_HAL_IMPL_MSA_INIT(int8x16, schar, s8) +OPENCV_HAL_IMPL_MSA_INIT(uint16x8, ushort, u16) +OPENCV_HAL_IMPL_MSA_INIT(int16x8, short, s16) +OPENCV_HAL_IMPL_MSA_INIT(uint32x4, unsigned int, u32) +OPENCV_HAL_IMPL_MSA_INIT(int32x4, int, s32) +OPENCV_HAL_IMPL_MSA_INIT(uint64x2, uint64, u64) +OPENCV_HAL_IMPL_MSA_INIT(int64x2, int64, s64) +OPENCV_HAL_IMPL_MSA_INIT(float32x4, float, f32) +OPENCV_HAL_IMPL_MSA_INIT(float64x2, double, f64) + +#define OPENCV_HAL_IMPL_MSA_PACK(_Tpvec, _Tpwvec, pack, mov, rshr) \ +inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + return _Tpvec(mov(a.val, b.val)); \ +} \ +template inline \ +_Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + return _Tpvec(rshr(a.val, b.val, n)); \ +} + +OPENCV_HAL_IMPL_MSA_PACK(v_uint8x16, v_uint16x8, pack, msa_qpack_u16, msa_qrpackr_u16) +OPENCV_HAL_IMPL_MSA_PACK(v_int8x16, v_int16x8, pack, msa_qpack_s16, msa_qrpackr_s16) +OPENCV_HAL_IMPL_MSA_PACK(v_uint16x8, v_uint32x4, pack, msa_qpack_u32, msa_qrpackr_u32) +OPENCV_HAL_IMPL_MSA_PACK(v_int16x8, v_int32x4, pack, msa_qpack_s32, msa_qrpackr_s32) +OPENCV_HAL_IMPL_MSA_PACK(v_uint32x4, v_uint64x2, pack, msa_pack_u64, msa_rpackr_u64) +OPENCV_HAL_IMPL_MSA_PACK(v_int32x4, v_int64x2, pack, msa_pack_s64, msa_rpackr_s64) +OPENCV_HAL_IMPL_MSA_PACK(v_uint8x16, v_int16x8, pack_u, msa_qpacku_s16, msa_qrpackru_s16) +OPENCV_HAL_IMPL_MSA_PACK(v_uint16x8, v_int32x4, pack_u, msa_qpacku_s32, msa_qrpackru_s32) + +#define OPENCV_HAL_IMPL_MSA_PACK_STORE(_Tpvec, _Tp, hreg, suffix, _Tpwvec, pack, mov, rshr) \ +inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + hreg a1 = mov(a.val); \ + msa_st1_##suffix(ptr, a1); \ +} \ +template inline \ +void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + hreg a1 = rshr(a.val, n); \ + msa_st1_##suffix(ptr, a1); \ +} + +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_uint8x16, uchar, v8u8, u8, v_uint16x8, pack, msa_qmovn_u16, msa_qrshrn_n_u16) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_int8x16, schar, v8i8, s8, v_int16x8, pack, msa_qmovn_s16, msa_qrshrn_n_s16) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_uint16x8, ushort, v4u16, u16, v_uint32x4, pack, msa_qmovn_u32, msa_qrshrn_n_u32) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_int16x8, short, v4i16, s16, v_int32x4, pack, msa_qmovn_s32, msa_qrshrn_n_s32) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_uint32x4, unsigned, v2u32, u32, v_uint64x2, pack, msa_movn_u64, msa_rshrn_n_u64) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_int32x4, int, v2i32, s32, v_int64x2, pack, msa_movn_s64, msa_rshrn_n_s64) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_uint8x16, uchar, v8u8, u8, v_int16x8, pack_u, msa_qmovun_s16, msa_qrshrun_n_s16) +OPENCV_HAL_IMPL_MSA_PACK_STORE(v_uint16x8, ushort, v4u16, u16, v_int32x4, pack_u, msa_qmovun_s32, msa_qrshrun_n_s32) + +// pack boolean +inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) +{ + return v_uint8x16(msa_pack_u16(a.val, b.val)); +} + +inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d) +{ + return v_uint8x16(msa_pack_u16(msa_pack_u32(a.val, b.val), msa_pack_u32(c.val, d.val))); +} + +inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, + const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, + const v_uint64x2& g, const v_uint64x2& h) +{ + v8u16 abcd = msa_pack_u32(msa_pack_u64(a.val, b.val), msa_pack_u64(c.val, d.val)); + v8u16 efgh = msa_pack_u32(msa_pack_u64(e.val, f.val), msa_pack_u64(g.val, h.val)); + return v_uint8x16(msa_pack_u16(abcd, efgh)); +} + +inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& m3) +{ + v4f32 v0 = v.val; + v4f32 res = msa_mulq_lane_f32(m0.val, v0, 0); + res = msa_mlaq_lane_f32(res, m1.val, v0, 1); + res = msa_mlaq_lane_f32(res, m2.val, v0, 2); + res = msa_mlaq_lane_f32(res, m3.val, v0, 3); + return v_float32x4(res); +} + +inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& a) +{ + v4f32 v0 = v.val; + v4f32 res = msa_mulq_lane_f32(m0.val, v0, 0); + res = msa_mlaq_lane_f32(res, m1.val, v0, 1); + res = msa_mlaq_lane_f32(res, m2.val, v0, 2); + res = msa_addq_f32(res, a.val); + return v_float32x4(res); +} + +#define OPENCV_HAL_IMPL_MSA_BIN_OP(bin_op, _Tpvec, intrin) \ +inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} \ +inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ +{ \ + a.val = intrin(a.val, b.val); \ + return a; \ +} + +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_uint8x16, msa_qaddq_u8) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_uint8x16, msa_qsubq_u8) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_int8x16, msa_qaddq_s8) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_int8x16, msa_qsubq_s8) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_uint16x8, msa_qaddq_u16) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_uint16x8, msa_qsubq_u16) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_int16x8, msa_qaddq_s16) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_int16x8, msa_qsubq_s16) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_int32x4, msa_addq_s32) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_int32x4, msa_subq_s32) +OPENCV_HAL_IMPL_MSA_BIN_OP(*, v_int32x4, msa_mulq_s32) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_uint32x4, msa_addq_u32) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_uint32x4, msa_subq_u32) +OPENCV_HAL_IMPL_MSA_BIN_OP(*, v_uint32x4, msa_mulq_u32) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_float32x4, msa_addq_f32) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_float32x4, msa_subq_f32) +OPENCV_HAL_IMPL_MSA_BIN_OP(*, v_float32x4, msa_mulq_f32) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_int64x2, msa_addq_s64) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_int64x2, msa_subq_s64) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_uint64x2, msa_addq_u64) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_uint64x2, msa_subq_u64) +OPENCV_HAL_IMPL_MSA_BIN_OP(/, v_float32x4, msa_divq_f32) +OPENCV_HAL_IMPL_MSA_BIN_OP(+, v_float64x2, msa_addq_f64) +OPENCV_HAL_IMPL_MSA_BIN_OP(-, v_float64x2, msa_subq_f64) +OPENCV_HAL_IMPL_MSA_BIN_OP(*, v_float64x2, msa_mulq_f64) +OPENCV_HAL_IMPL_MSA_BIN_OP(/, v_float64x2, msa_divq_f64) + +// saturating multiply 8-bit, 16-bit +#define OPENCV_HAL_IMPL_MSA_MUL_SAT(_Tpvec, _Tpwvec) \ +inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b) \ +{ \ + _Tpwvec c, d; \ + v_mul_expand(a, b, c, d); \ + return v_pack(c, d); \ +} \ +inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b) \ +{a = a * b; return a; } + +OPENCV_HAL_IMPL_MSA_MUL_SAT(v_int8x16, v_int16x8) +OPENCV_HAL_IMPL_MSA_MUL_SAT(v_uint8x16, v_uint16x8) +OPENCV_HAL_IMPL_MSA_MUL_SAT(v_int16x8, v_int32x4) +OPENCV_HAL_IMPL_MSA_MUL_SAT(v_uint16x8, v_uint32x4) + +// Multiply and expand +inline void v_mul_expand(const v_int8x16& a, const v_int8x16& b, + v_int16x8& c, v_int16x8& d) +{ + v16i8 a_lo, a_hi, b_lo, b_hi; + + ILVRL_B2_SB(a.val, msa_dupq_n_s8(0), a_lo, a_hi); + ILVRL_B2_SB(b.val, msa_dupq_n_s8(0), b_lo, b_hi); + c.val = msa_mulq_s16(msa_paddlq_s8(a_lo), msa_paddlq_s8(b_lo)); + d.val = msa_mulq_s16(msa_paddlq_s8(a_hi), msa_paddlq_s8(b_hi)); +} + +inline void v_mul_expand(const v_uint8x16& a, const v_uint8x16& b, + v_uint16x8& c, v_uint16x8& d) +{ + v16u8 a_lo, a_hi, b_lo, b_hi; + + ILVRL_B2_UB(a.val, msa_dupq_n_u8(0), a_lo, a_hi); + ILVRL_B2_UB(b.val, msa_dupq_n_u8(0), b_lo, b_hi); + c.val = msa_mulq_u16(msa_paddlq_u8(a_lo), msa_paddlq_u8(b_lo)); + d.val = msa_mulq_u16(msa_paddlq_u8(a_hi), msa_paddlq_u8(b_hi)); +} + +inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, + v_int32x4& c, v_int32x4& d) +{ + v8i16 a_lo, a_hi, b_lo, b_hi; + + ILVRL_H2_SH(a.val, msa_dupq_n_s16(0), a_lo, a_hi); + ILVRL_H2_SH(b.val, msa_dupq_n_s16(0), b_lo, b_hi); + c.val = msa_mulq_s32(msa_paddlq_s16(a_lo), msa_paddlq_s16(b_lo)); + d.val = msa_mulq_s32(msa_paddlq_s16(a_hi), msa_paddlq_s16(b_hi)); +} + +inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, + v_uint32x4& c, v_uint32x4& d) +{ + v8u16 a_lo, a_hi, b_lo, b_hi; + + ILVRL_H2_UH(a.val, msa_dupq_n_u16(0), a_lo, a_hi); + ILVRL_H2_UH(b.val, msa_dupq_n_u16(0), b_lo, b_hi); + c.val = msa_mulq_u32(msa_paddlq_u16(a_lo), msa_paddlq_u16(b_lo)); + d.val = msa_mulq_u32(msa_paddlq_u16(a_hi), msa_paddlq_u16(b_hi)); +} + +inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, + v_uint64x2& c, v_uint64x2& d) +{ + v4u32 a_lo, a_hi, b_lo, b_hi; + + ILVRL_W2_UW(a.val, msa_dupq_n_u32(0), a_lo, a_hi); + ILVRL_W2_UW(b.val, msa_dupq_n_u32(0), b_lo, b_hi); + c.val = msa_mulq_u64(msa_paddlq_u32(a_lo), msa_paddlq_u32(b_lo)); + d.val = msa_mulq_u64(msa_paddlq_u32(a_hi), msa_paddlq_u32(b_hi)); +} + +inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) +{ + v8i16 a_lo, a_hi, b_lo, b_hi; + + ILVRL_H2_SH(a.val, msa_dupq_n_s16(0), a_lo, a_hi); + ILVRL_H2_SH(b.val, msa_dupq_n_s16(0), b_lo, b_hi); + + return v_int16x8(msa_packr_s32(msa_mulq_s32(msa_paddlq_s16(a_lo), msa_paddlq_s16(b_lo)), + msa_mulq_s32(msa_paddlq_s16(a_hi), msa_paddlq_s16(b_hi)), 16)); +} + +inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) +{ + v8u16 a_lo, a_hi, b_lo, b_hi; + + ILVRL_H2_UH(a.val, msa_dupq_n_u16(0), a_lo, a_hi); + ILVRL_H2_UH(b.val, msa_dupq_n_u16(0), b_lo, b_hi); + + return v_uint16x8(msa_packr_u32(msa_mulq_u32(msa_paddlq_u16(a_lo), msa_paddlq_u16(b_lo)), + msa_mulq_u32(msa_paddlq_u16(a_hi), msa_paddlq_u16(b_hi)), 16)); +} + +//////// Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) +{ return v_int32x4(msa_dotp_s_w(a.val, b.val)); } +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_int32x4(msa_dpadd_s_w(c.val , a.val, b.val)); } + +// 32 >> 64 +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) +{ return v_int64x2(msa_dotp_s_d(a.val, b.val)); } +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_int64x2(msa_dpadd_s_d(c.val , a.val, b.val)); } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) +{ + v8u16 even_a = msa_shrq_n_u16(msa_shlq_n_u16(MSA_TPV_REINTERPRET(v8u16, a.val), 8), 8); + v8u16 odd_a = msa_shrq_n_u16(MSA_TPV_REINTERPRET(v8u16, a.val), 8); + v8u16 even_b = msa_shrq_n_u16(msa_shlq_n_u16(MSA_TPV_REINTERPRET(v8u16, b.val), 8), 8); + v8u16 odd_b = msa_shrq_n_u16(MSA_TPV_REINTERPRET(v8u16, b.val), 8); + v4u32 prod = msa_dotp_u_w(even_a, even_b); + return v_uint32x4(msa_dpadd_u_w(prod, odd_a, odd_b)); +} +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ + v8u16 even_a = msa_shrq_n_u16(msa_shlq_n_u16(MSA_TPV_REINTERPRET(v8u16, a.val), 8), 8); + v8u16 odd_a = msa_shrq_n_u16(MSA_TPV_REINTERPRET(v8u16, a.val), 8); + v8u16 even_b = msa_shrq_n_u16(msa_shlq_n_u16(MSA_TPV_REINTERPRET(v8u16, b.val), 8), 8); + v8u16 odd_b = msa_shrq_n_u16(MSA_TPV_REINTERPRET(v8u16, b.val), 8); + v4u32 prod = msa_dpadd_u_w(c.val, even_a, even_b); + return v_uint32x4(msa_dpadd_u_w(prod, odd_a, odd_b)); +} + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) +{ + v8i16 prod = msa_dotp_s_h(a.val, b.val); + return v_int32x4(msa_hadd_s32(prod, prod)); +} +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, + const v_int32x4& c) +{ return v_dotprod_expand(a, b) + c; } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) +{ + v4u32 even_a = msa_shrq_n_u32(msa_shlq_n_u32(MSA_TPV_REINTERPRET(v4u32, a.val), 16), 16); + v4u32 odd_a = msa_shrq_n_u32(MSA_TPV_REINTERPRET(v4u32, a.val), 16); + v4u32 even_b = msa_shrq_n_u32(msa_shlq_n_u32(MSA_TPV_REINTERPRET(v4u32, b.val), 16), 16); + v4u32 odd_b = msa_shrq_n_u32(MSA_TPV_REINTERPRET(v4u32, b.val), 16); + v2u64 prod = msa_dotp_u_d(even_a, even_b); + return v_uint64x2(msa_dpadd_u_d(prod, odd_a, odd_b)); +} +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, + const v_uint64x2& c) +{ + v4u32 even_a = msa_shrq_n_u32(msa_shlq_n_u32(MSA_TPV_REINTERPRET(v4u32, a.val), 16), 16); + v4u32 odd_a = msa_shrq_n_u32(MSA_TPV_REINTERPRET(v4u32, a.val), 16); + v4u32 even_b = msa_shrq_n_u32(msa_shlq_n_u32(MSA_TPV_REINTERPRET(v4u32, b.val), 16), 16); + v4u32 odd_b = msa_shrq_n_u32(MSA_TPV_REINTERPRET(v4u32, b.val), 16); + v2u64 prod = msa_dpadd_u_d(c.val, even_a, even_b); + return v_uint64x2(msa_dpadd_u_d(prod, odd_a, odd_b)); +} + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) +{ + v4i32 prod = msa_dotp_s_w(a.val, b.val); + return v_int64x2(msa_hadd_s64(prod, prod)); +} +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b) + c; } + + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod(a, b); } +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_dotprod(a, b, c); } + +// 32 >> 64 +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod(a, b); } +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod(a, b, c); } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) +{ return v_dotprod_expand(a, b); } +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_dotprod_expand(a, b, c); } +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) +{ return v_dotprod_expand(a, b); } +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand(a, b, c); } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) +{ return v_dotprod_expand(a, b); } +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b, c); } +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod_expand(a, b); } +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b, c); } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod_expand(a, b); } +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b, c); } + +#define OPENCV_HAL_IMPL_MSA_LOGIC_OP(_Tpvec, _Tpv, suffix) \ +OPENCV_HAL_IMPL_MSA_BIN_OP(&, _Tpvec, msa_andq_##suffix) \ +OPENCV_HAL_IMPL_MSA_BIN_OP(|, _Tpvec, msa_orrq_##suffix) \ +OPENCV_HAL_IMPL_MSA_BIN_OP(^, _Tpvec, msa_eorq_##suffix) \ +inline _Tpvec operator ~ (const _Tpvec& a) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_mvnq_u8(MSA_TPV_REINTERPRET(v16u8, a.val)))); \ +} + +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_uint8x16, v16u8, u8) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_int8x16, v16i8, s8) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_uint16x8, v8u16, u16) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_int16x8, v8i16, s16) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_uint32x4, v4u32, u32) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_int32x4, v4i32, s32) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_uint64x2, v2u64, u64) +OPENCV_HAL_IMPL_MSA_LOGIC_OP(v_int64x2, v2i64, s64) + +#define OPENCV_HAL_IMPL_MSA_FLT_BIT_OP(bin_op, intrin) \ +inline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \ +{ \ + return v_float32x4(MSA_TPV_REINTERPRET(v4f32, intrin(MSA_TPV_REINTERPRET(v4i32, a.val), MSA_TPV_REINTERPRET(v4i32, b.val)))); \ +} \ +inline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \ +{ \ + a.val = MSA_TPV_REINTERPRET(v4f32, intrin(MSA_TPV_REINTERPRET(v4i32, a.val), MSA_TPV_REINTERPRET(v4i32, b.val))); \ + return a; \ +} + +OPENCV_HAL_IMPL_MSA_FLT_BIT_OP(&, msa_andq_s32) +OPENCV_HAL_IMPL_MSA_FLT_BIT_OP(|, msa_orrq_s32) +OPENCV_HAL_IMPL_MSA_FLT_BIT_OP(^, msa_eorq_s32) + +inline v_float32x4 operator ~ (const v_float32x4& a) +{ + return v_float32x4(MSA_TPV_REINTERPRET(v4f32, msa_mvnq_s32(MSA_TPV_REINTERPRET(v4i32, a.val)))); +} + +/* v_abs */ +#define OPENCV_HAL_IMPL_MSA_ABS(_Tpuvec, _Tpsvec, usuffix, ssuffix) \ +inline _Tpuvec v_abs(const _Tpsvec& a) \ +{ \ + return v_reinterpret_as_##usuffix(_Tpsvec(msa_absq_##ssuffix(a.val))); \ +} + +OPENCV_HAL_IMPL_MSA_ABS(v_uint8x16, v_int8x16, u8, s8) +OPENCV_HAL_IMPL_MSA_ABS(v_uint16x8, v_int16x8, u16, s16) +OPENCV_HAL_IMPL_MSA_ABS(v_uint32x4, v_int32x4, u32, s32) + +/* v_abs(float), v_sqrt, v_invsqrt */ +#define OPENCV_HAL_IMPL_MSA_BASIC_FUNC(_Tpvec, func, intrin) \ +inline _Tpvec func(const _Tpvec& a) \ +{ \ + return _Tpvec(intrin(a.val)); \ +} + +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float32x4, v_abs, msa_absq_f32) +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float64x2, v_abs, msa_absq_f64) +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float32x4, v_sqrt, msa_sqrtq_f32) +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float32x4, v_invsqrt, msa_rsqrtq_f32) +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float64x2, v_sqrt, msa_sqrtq_f64) +OPENCV_HAL_IMPL_MSA_BASIC_FUNC(v_float64x2, v_invsqrt, msa_rsqrtq_f64) + +#define OPENCV_HAL_IMPL_MSA_DBL_BIT_OP(bin_op, intrin) \ +inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \ +{ \ + return v_float64x2(MSA_TPV_REINTERPRET(v2f64, intrin(MSA_TPV_REINTERPRET(v2i64, a.val), MSA_TPV_REINTERPRET(v2i64, b.val)))); \ +} \ +inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \ +{ \ + a.val = MSA_TPV_REINTERPRET(v2f64, intrin(MSA_TPV_REINTERPRET(v2i64, a.val), MSA_TPV_REINTERPRET(v2i64, b.val))); \ + return a; \ +} + +OPENCV_HAL_IMPL_MSA_DBL_BIT_OP(&, msa_andq_s64) +OPENCV_HAL_IMPL_MSA_DBL_BIT_OP(|, msa_orrq_s64) +OPENCV_HAL_IMPL_MSA_DBL_BIT_OP(^, msa_eorq_s64) + +inline v_float64x2 operator ~ (const v_float64x2& a) +{ + return v_float64x2(MSA_TPV_REINTERPRET(v2f64, msa_mvnq_s32(MSA_TPV_REINTERPRET(v4i32, a.val)))); +} + +// TODO: exp, log, sin, cos + +#define OPENCV_HAL_IMPL_MSA_BIN_FUNC(_Tpvec, func, intrin) \ +inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} + +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_min, msa_minq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_max, msa_maxq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_min, msa_minq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_max, msa_maxq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_min, msa_minq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_max, msa_maxq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_min, msa_minq_s16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_max, msa_maxq_s16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint32x4, v_min, msa_minq_u32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint32x4, v_max, msa_maxq_u32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int32x4, v_min, msa_minq_s32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int32x4, v_max, msa_maxq_s32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float32x4, v_min, msa_minq_f32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float32x4, v_max, msa_maxq_f32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float64x2, v_min, msa_minq_f64) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float64x2, v_max, msa_maxq_f64) + +#define OPENCV_HAL_IMPL_MSA_INT_CMP_OP(_Tpvec, _Tpv, suffix, not_suffix) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_ceqq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_mvnq_##not_suffix(msa_ceqq_##suffix(a.val, b.val)))); } \ +inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_cltq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_cgtq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_cleq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_cgeq_##suffix(a.val, b.val))); } + +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_uint8x16, v16u8, u8, u8) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_int8x16, v16i8, s8, u8) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_uint16x8, v8u16, u16, u16) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_int16x8, v8i16, s16, u16) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_uint32x4, v4u32, u32, u32) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_int32x4, v4i32, s32, u32) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_float32x4, v4f32, f32, u32) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_uint64x2, v2u64, u64, u64) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_int64x2, v2i64, s64, u64) +OPENCV_HAL_IMPL_MSA_INT_CMP_OP(v_float64x2, v2f64, f64, u64) + +inline v_float32x4 v_not_nan(const v_float32x4& a) +{ return v_float32x4(MSA_TPV_REINTERPRET(v4f32, msa_ceqq_f32(a.val, a.val))); } +inline v_float64x2 v_not_nan(const v_float64x2& a) +{ return v_float64x2(MSA_TPV_REINTERPRET(v2f64, msa_ceqq_f64(a.val, a.val))); } + +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_add_wrap, msa_addq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_add_wrap, msa_addq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_add_wrap, msa_addq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_add_wrap, msa_addq_s16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_sub_wrap, msa_subq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_sub_wrap, msa_subq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_sub_wrap, msa_subq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_sub_wrap, msa_subq_s16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_mul_wrap, msa_mulq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_mul_wrap, msa_mulq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_mul_wrap, msa_mulq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_mul_wrap, msa_mulq_s16) + +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint8x16, v_absdiff, msa_abdq_u8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint16x8, v_absdiff, msa_abdq_u16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_uint32x4, v_absdiff, msa_abdq_u32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float32x4, v_absdiff, msa_abdq_f32) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_float64x2, v_absdiff, msa_abdq_f64) + +/** Saturating absolute difference **/ +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int8x16, v_absdiffs, msa_qabdq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC(v_int16x8, v_absdiffs, msa_qabdq_s16) + +#define OPENCV_HAL_IMPL_MSA_BIN_FUNC2(_Tpvec, _Tpvec2, _Tpv, func, intrin) \ +inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec2(MSA_TPV_REINTERPRET(_Tpv, intrin(a.val, b.val))); \ +} + +OPENCV_HAL_IMPL_MSA_BIN_FUNC2(v_int8x16, v_uint8x16, v16u8, v_absdiff, msa_abdq_s8) +OPENCV_HAL_IMPL_MSA_BIN_FUNC2(v_int16x8, v_uint16x8, v8u16, v_absdiff, msa_abdq_s16) +OPENCV_HAL_IMPL_MSA_BIN_FUNC2(v_int32x4, v_uint32x4, v4u32, v_absdiff, msa_abdq_s32) + +/* v_magnitude, v_sqr_magnitude, v_fma, v_muladd */ +inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b) +{ + v_float32x4 x(msa_mlaq_f32(msa_mulq_f32(a.val, a.val), b.val, b.val)); + return v_sqrt(x); +} + +inline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b) +{ + return v_float32x4(msa_mlaq_f32(msa_mulq_f32(a.val, a.val), b.val, b.val)); +} + +inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ + return v_float32x4(msa_mlaq_f32(c.val, a.val, b.val)); +} + +inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_int32x4(msa_mlaq_s32(c.val, a.val, b.val)); +} + +inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ + return v_fma(a, b, c); +} + +inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_fma(a, b, c); +} + +inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b) +{ + v_float64x2 x(msa_mlaq_f64(msa_mulq_f64(a.val, a.val), b.val, b.val)); + return v_sqrt(x); +} + +inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b) +{ + return v_float64x2(msa_mlaq_f64(msa_mulq_f64(a.val, a.val), b.val, b.val)); +} + +inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ + return v_float64x2(msa_mlaq_f64(c.val, a.val, b.val)); +} + +inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ + return v_fma(a, b, c); +} + +// trade efficiency for convenience +#define OPENCV_HAL_IMPL_MSA_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \ +inline _Tpvec operator << (const _Tpvec& a, int n) \ +{ return _Tpvec(msa_shlq_##suffix(a.val, msa_dupq_n_##ssuffix((_Tps)n))); } \ +inline _Tpvec operator >> (const _Tpvec& a, int n) \ +{ return _Tpvec(msa_shrq_##suffix(a.val, msa_dupq_n_##ssuffix((_Tps)n))); } \ +template inline _Tpvec v_shl(const _Tpvec& a) \ +{ return _Tpvec(msa_shlq_n_##suffix(a.val, n)); } \ +template inline _Tpvec v_shr(const _Tpvec& a) \ +{ return _Tpvec(msa_shrq_n_##suffix(a.val, n)); } \ +template inline _Tpvec v_rshr(const _Tpvec& a) \ +{ return _Tpvec(msa_rshrq_n_##suffix(a.val, n)); } + +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_uint8x16, u8, schar, s8) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_int8x16, s8, schar, s8) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_uint16x8, u16, short, s16) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_int16x8, s16, short, s16) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_uint32x4, u32, int, s32) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_int32x4, s32, int, s32) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_uint64x2, u64, int64, s64) +OPENCV_HAL_IMPL_MSA_SHIFT_OP(v_int64x2, s64, int64, s64) + +/* v_rotate_right, v_rotate_left */ +#define OPENCV_HAL_IMPL_MSA_ROTATE_OP(_Tpvec, _Tpv, _Tpvs, suffix) \ +template inline _Tpvec v_rotate_right(const _Tpvec& a) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_extq_##suffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), msa_dupq_n_##suffix(0), n))); \ +} \ +template inline _Tpvec v_rotate_left(const _Tpvec& a) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_extq_##suffix(msa_dupq_n_##suffix(0), MSA_TPV_REINTERPRET(_Tpvs, a.val), _Tpvec::nlanes - n))); \ +} \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a) \ +{ \ + return a; \ +} \ +template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_extq_##suffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), MSA_TPV_REINTERPRET(_Tpvs, b.val), n))); \ +} \ +template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_extq_##suffix(MSA_TPV_REINTERPRET(_Tpvs, b.val), MSA_TPV_REINTERPRET(_Tpvs, a.val), _Tpvec::nlanes - n))); \ +} \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a, const _Tpvec& b) \ +{ \ + CV_UNUSED(b); \ + return a; \ +} + +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_uint8x16, v16u8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_int8x16, v16i8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_uint16x8, v8u16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_int16x8, v8i16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_uint32x4, v4u32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_int32x4, v4i32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_float32x4, v4f32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_uint64x2, v2u64, v2i64, s64) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_int64x2, v2i64, v2i64, s64) +OPENCV_HAL_IMPL_MSA_ROTATE_OP(v_float64x2, v2f64, v2i64, s64) + +#define OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(msa_ld1q_##suffix(ptr)); } \ +inline _Tpvec v_load_aligned(const _Tp* ptr) \ +{ return _Tpvec(msa_ld1q_##suffix(ptr)); } \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ return _Tpvec(msa_combine_##suffix(msa_ld1_##suffix(ptr), msa_dup_n_##suffix((_Tp)0))); } \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ return _Tpvec(msa_combine_##suffix(msa_ld1_##suffix(ptr0), msa_ld1_##suffix(ptr1))); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ msa_st1q_##suffix(ptr, a.val); } \ +inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ +{ msa_st1q_##suffix(ptr, a.val); } \ +inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ +{ msa_st1q_##suffix(ptr, a.val); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode /*mode*/) \ +{ msa_st1q_##suffix(ptr, a.val); } \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ \ + int n = _Tpvec::nlanes; \ + for( int i = 0; i < (n/2); i++ ) \ + ptr[i] = a.val[i]; \ +} \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ \ + int n = _Tpvec::nlanes; \ + for( int i = 0; i < (n/2); i++ ) \ + ptr[i] = a.val[i+(n/2)]; \ +} + +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_int8x16, schar, s8) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_int16x8, short, s16) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_int32x4, int, s32) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_int64x2, int64, s64) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_float32x4, float, f32) +OPENCV_HAL_IMPL_MSA_LOADSTORE_OP(v_float64x2, double, f64) + + +/** Reverse **/ +inline v_uint8x16 v_reverse(const v_uint8x16 &a) +{ + v_uint8x16 c = v_uint8x16((v16u8)__builtin_msa_vshf_b((v16i8)((v2i64){0x08090A0B0C0D0E0F, 0x0001020304050607}), msa_dupq_n_s8(0), (v16i8)a.val)); + return c; +} + +inline v_int8x16 v_reverse(const v_int8x16 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x8 v_reverse(const v_uint16x8 &a) +{ + v_uint16x8 c = v_uint16x8((v8u16)__builtin_msa_vshf_h((v8i16)((v2i64){0x0004000500060007, 0x0000000100020003}), msa_dupq_n_s16(0), (v8i16)a.val)); + return c; +} + +inline v_int16x8 v_reverse(const v_int16x8 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x4 v_reverse(const v_uint32x4 &a) +{ + v_uint32x4 c; + c.val[0] = a.val[3]; + c.val[1] = a.val[2]; + c.val[2] = a.val[1]; + c.val[3] = a.val[0]; + return c; +} + +inline v_int32x4 v_reverse(const v_int32x4 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x4 v_reverse(const v_float32x4 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x2 v_reverse(const v_uint64x2 &a) +{ + v_uint64x2 c; + c.val[0] = a.val[1]; + c.val[1] = a.val[0]; + return c; +} + +inline v_int64x2 v_reverse(const v_int64x2 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x2 v_reverse(const v_float64x2 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + + +#define OPENCV_HAL_IMPL_MSA_REDUCE_OP_8U(func, cfunc) \ +inline unsigned short v_reduce_##func(const v_uint16x8& a) \ +{ \ + v8u16 a_lo, a_hi; \ + ILVRL_H2_UH(a.val, msa_dupq_n_u16(0), a_lo, a_hi); \ + v4u32 b = msa_##func##q_u32(msa_paddlq_u16(a_lo), msa_paddlq_u16(a_hi)); \ + v4u32 b_lo, b_hi; \ + ILVRL_W2_UW(b, msa_dupq_n_u32(0), b_lo, b_hi); \ + v2u64 c = msa_##func##q_u64(msa_paddlq_u32(b_lo), msa_paddlq_u32(b_hi)); \ + return (unsigned short)cfunc(c[0], c[1]); \ +} + +OPENCV_HAL_IMPL_MSA_REDUCE_OP_8U(max, std::max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_8U(min, std::min) + +#define OPENCV_HAL_IMPL_MSA_REDUCE_OP_8S(func, cfunc) \ +inline short v_reduce_##func(const v_int16x8& a) \ +{ \ + v8i16 a_lo, a_hi; \ + ILVRL_H2_SH(a.val, msa_dupq_n_s16(0), a_lo, a_hi); \ + v4i32 b = msa_##func##q_s32(msa_paddlq_s16(a_lo), msa_paddlq_s16(a_hi)); \ + v4i32 b_lo, b_hi; \ + ILVRL_W2_SW(b, msa_dupq_n_s32(0), b_lo, b_hi); \ + v2i64 c = msa_##func##q_s64(msa_paddlq_s32(b_lo), msa_paddlq_s32(b_hi)); \ + return (short)cfunc(c[0], c[1]); \ +} + +OPENCV_HAL_IMPL_MSA_REDUCE_OP_8S(max, std::max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_8S(min, std::min) + +#define OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(_Tpvec, scalartype, func, cfunc) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + return (scalartype)cfunc(cfunc(a.val[0], a.val[1]), cfunc(a.val[2], a.val[3])); \ +} + +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_int32x4, int, max, std::max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_int32x4, int, min, std::min) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_float32x4, float, max, std::max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_4(v_float32x4, float, min, std::min) + + +#define OPENCV_HAL_IMPL_MSA_REDUCE_OP_16(_Tpvec, scalartype, _Tpvec2, func) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + _Tpvec2 a1, a2; \ + v_expand(a, a1, a2); \ + return (scalartype)v_reduce_##func(v_##func(a1, a2)); \ +} + +OPENCV_HAL_IMPL_MSA_REDUCE_OP_16(v_uint8x16, uchar, v_uint16x8, min) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_16(v_uint8x16, uchar, v_uint16x8, max) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_16(v_int8x16, char, v_int16x8, min) +OPENCV_HAL_IMPL_MSA_REDUCE_OP_16(v_int8x16, char, v_int16x8, max) + + + +#define OPENCV_HAL_IMPL_MSA_REDUCE_SUM(_Tpvec, scalartype, suffix) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + return (scalartype)msa_sum_##suffix(a.val); \ +} + +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_uint8x16, unsigned char, u8) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_int8x16, char, s8) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_uint16x8, unsigned short, u16) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_int16x8, short, s16) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_int32x4, int, s32) +OPENCV_HAL_IMPL_MSA_REDUCE_SUM(v_float32x4, float, f32) + +inline uint64 v_reduce_sum(const v_uint64x2& a) +{ return (uint64)(msa_getq_lane_u64(a.val, 0) + msa_getq_lane_u64(a.val, 1)); } +inline int64 v_reduce_sum(const v_int64x2& a) +{ return (int64)(msa_getq_lane_s64(a.val, 0) + msa_getq_lane_s64(a.val, 1)); } +inline double v_reduce_sum(const v_float64x2& a) +{ + return msa_getq_lane_f64(a.val, 0) + msa_getq_lane_f64(a.val, 1); +} + +/* v_reduce_sum4, v_reduce_sad */ +inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d) +{ + v4f32 u0 = msa_addq_f32(MSA_TPV_REINTERPRET(v4f32, msa_ilvevq_s32(MSA_TPV_REINTERPRET(v4i32, b.val), MSA_TPV_REINTERPRET(v4i32, a.val))), + MSA_TPV_REINTERPRET(v4f32, msa_ilvodq_s32(MSA_TPV_REINTERPRET(v4i32, b.val), MSA_TPV_REINTERPRET(v4i32, a.val)))); // a0+a1 b0+b1 a2+a3 b2+b3 + v4f32 u1 = msa_addq_f32(MSA_TPV_REINTERPRET(v4f32, msa_ilvevq_s32(MSA_TPV_REINTERPRET(v4i32, d.val), MSA_TPV_REINTERPRET(v4i32, c.val))), + MSA_TPV_REINTERPRET(v4f32, msa_ilvodq_s32(MSA_TPV_REINTERPRET(v4i32, d.val), MSA_TPV_REINTERPRET(v4i32, c.val)))); // c0+c1 d0+d1 c2+c3 d2+d3 + + return v_float32x4(msa_addq_f32(MSA_TPV_REINTERPRET(v4f32, msa_ilvrq_s64(MSA_TPV_REINTERPRET(v2i64, u1), MSA_TPV_REINTERPRET(v2i64, u0))), + MSA_TPV_REINTERPRET(v4f32, msa_ilvlq_s64(MSA_TPV_REINTERPRET(v2i64, u1), MSA_TPV_REINTERPRET(v2i64, u0))))); +} + +inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b) +{ + v16u8 t0 = msa_abdq_u8(a.val, b.val); + v8u16 t1 = msa_paddlq_u8(t0); + v4u32 t2 = msa_paddlq_u16(t1); + return msa_sum_u32(t2); +} +inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b) +{ + v16u8 t0 = MSA_TPV_REINTERPRET(v16u8, msa_abdq_s8(a.val, b.val)); + v8u16 t1 = msa_paddlq_u8(t0); + v4u32 t2 = msa_paddlq_u16(t1); + return msa_sum_u32(t2); +} +inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b) +{ + v8u16 t0 = msa_abdq_u16(a.val, b.val); + v4u32 t1 = msa_paddlq_u16(t0); + return msa_sum_u32(t1); +} +inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b) +{ + v8u16 t0 = MSA_TPV_REINTERPRET(v8u16, msa_abdq_s16(a.val, b.val)); + v4u32 t1 = msa_paddlq_u16(t0); + return msa_sum_u32(t1); +} +inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b) +{ + v4u32 t0 = msa_abdq_u32(a.val, b.val); + return msa_sum_u32(t0); +} +inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b) +{ + v4u32 t0 = MSA_TPV_REINTERPRET(v4u32, msa_abdq_s32(a.val, b.val)); + return msa_sum_u32(t0); +} +inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b) +{ + v4f32 t0 = msa_abdq_f32(a.val, b.val); + return msa_sum_f32(t0); +} + +/* v_popcount */ +#define OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE8(_Tpvec) \ +inline v_uint8x16 v_popcount(const _Tpvec& a) \ +{ \ + v16u8 t = MSA_TPV_REINTERPRET(v16u8, msa_cntq_s8(MSA_TPV_REINTERPRET(v16i8, a.val))); \ + return v_uint8x16(t); \ +} +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE8(v_uint8x16) +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE8(v_int8x16) + +#define OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE16(_Tpvec) \ +inline v_uint16x8 v_popcount(const _Tpvec& a) \ +{ \ + v8u16 t = MSA_TPV_REINTERPRET(v8u16, msa_cntq_s16(MSA_TPV_REINTERPRET(v8i16, a.val))); \ + return v_uint16x8(t); \ +} +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE16(v_uint16x8) +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE16(v_int16x8) + +#define OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE32(_Tpvec) \ +inline v_uint32x4 v_popcount(const _Tpvec& a) \ +{ \ + v4u32 t = MSA_TPV_REINTERPRET(v4u32, msa_cntq_s32(MSA_TPV_REINTERPRET(v4i32, a.val))); \ + return v_uint32x4(t); \ +} +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE32(v_uint32x4) +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE32(v_int32x4) + +#define OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE64(_Tpvec) \ +inline v_uint64x2 v_popcount(const _Tpvec& a) \ +{ \ + v2u64 t = MSA_TPV_REINTERPRET(v2u64, msa_cntq_s64(MSA_TPV_REINTERPRET(v2i64, a.val))); \ + return v_uint64x2(t); \ +} +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE64(v_uint64x2) +OPENCV_HAL_IMPL_MSA_POPCOUNT_SIZE64(v_int64x2) + +inline int v_signmask(const v_uint8x16& a) +{ + v8i8 m0 = msa_create_s8(CV_BIG_UINT(0x0706050403020100)); + v16u8 v0 = msa_shlq_u8(msa_shrq_n_u8(a.val, 7), msa_combine_s8(m0, m0)); + v8u16 v1 = msa_paddlq_u8(v0); + v4u32 v2 = msa_paddlq_u16(v1); + v2u64 v3 = msa_paddlq_u32(v2); + return (int)msa_getq_lane_u64(v3, 0) + ((int)msa_getq_lane_u64(v3, 1) << 8); +} +inline int v_signmask(const v_int8x16& a) +{ return v_signmask(v_reinterpret_as_u8(a)); } + +inline int v_signmask(const v_uint16x8& a) +{ + v4i16 m0 = msa_create_s16(CV_BIG_UINT(0x0003000200010000)); + v8u16 v0 = msa_shlq_u16(msa_shrq_n_u16(a.val, 15), msa_combine_s16(m0, m0)); + v4u32 v1 = msa_paddlq_u16(v0); + v2u64 v2 = msa_paddlq_u32(v1); + return (int)msa_getq_lane_u64(v2, 0) + ((int)msa_getq_lane_u64(v2, 1) << 4); +} +inline int v_signmask(const v_int16x8& a) +{ return v_signmask(v_reinterpret_as_u16(a)); } + +inline int v_signmask(const v_uint32x4& a) +{ + v2i32 m0 = msa_create_s32(CV_BIG_UINT(0x0000000100000000)); + v4u32 v0 = msa_shlq_u32(msa_shrq_n_u32(a.val, 31), msa_combine_s32(m0, m0)); + v2u64 v1 = msa_paddlq_u32(v0); + return (int)msa_getq_lane_u64(v1, 0) + ((int)msa_getq_lane_u64(v1, 1) << 2); +} +inline int v_signmask(const v_int32x4& a) +{ return v_signmask(v_reinterpret_as_u32(a)); } +inline int v_signmask(const v_float32x4& a) +{ return v_signmask(v_reinterpret_as_u32(a)); } + +inline int v_signmask(const v_uint64x2& a) +{ + v2u64 v0 = msa_shrq_n_u64(a.val, 63); + return (int)msa_getq_lane_u64(v0, 0) + ((int)msa_getq_lane_u64(v0, 1) << 1); +} +inline int v_signmask(const v_int64x2& a) +{ return v_signmask(v_reinterpret_as_u64(a)); } +inline int v_signmask(const v_float64x2& a) +{ return v_signmask(v_reinterpret_as_u64(a)); } + +inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(a)); } + +#define OPENCV_HAL_IMPL_MSA_CHECK_ALLANY(_Tpvec, _Tpvec2, suffix, shift) \ +inline bool v_check_all(const v_##_Tpvec& a) \ +{ \ + _Tpvec2 v0 = msa_shrq_n_##suffix(msa_mvnq_##suffix(a.val), shift); \ + v2u64 v1 = MSA_TPV_REINTERPRET(v2u64, v0); \ + return (msa_getq_lane_u64(v1, 0) | msa_getq_lane_u64(v1, 1)) == 0; \ +} \ +inline bool v_check_any(const v_##_Tpvec& a) \ +{ \ + _Tpvec2 v0 = msa_shrq_n_##suffix(a.val, shift); \ + v2u64 v1 = MSA_TPV_REINTERPRET(v2u64, v0); \ + return (msa_getq_lane_u64(v1, 0) | msa_getq_lane_u64(v1, 1)) != 0; \ +} + +OPENCV_HAL_IMPL_MSA_CHECK_ALLANY(uint8x16, v16u8, u8, 7) +OPENCV_HAL_IMPL_MSA_CHECK_ALLANY(uint16x8, v8u16, u16, 15) +OPENCV_HAL_IMPL_MSA_CHECK_ALLANY(uint32x4, v4u32, u32, 31) +OPENCV_HAL_IMPL_MSA_CHECK_ALLANY(uint64x2, v2u64, u64, 63) + +inline bool v_check_all(const v_int8x16& a) +{ return v_check_all(v_reinterpret_as_u8(a)); } +inline bool v_check_all(const v_int16x8& a) +{ return v_check_all(v_reinterpret_as_u16(a)); } +inline bool v_check_all(const v_int32x4& a) +{ return v_check_all(v_reinterpret_as_u32(a)); } +inline bool v_check_all(const v_float32x4& a) +{ return v_check_all(v_reinterpret_as_u32(a)); } + +inline bool v_check_any(const v_int8x16& a) +{ return v_check_any(v_reinterpret_as_u8(a)); } +inline bool v_check_any(const v_int16x8& a) +{ return v_check_any(v_reinterpret_as_u16(a)); } +inline bool v_check_any(const v_int32x4& a) +{ return v_check_any(v_reinterpret_as_u32(a)); } +inline bool v_check_any(const v_float32x4& a) +{ return v_check_any(v_reinterpret_as_u32(a)); } + +inline bool v_check_all(const v_int64x2& a) +{ return v_check_all(v_reinterpret_as_u64(a)); } +inline bool v_check_all(const v_float64x2& a) +{ return v_check_all(v_reinterpret_as_u64(a)); } +inline bool v_check_any(const v_int64x2& a) +{ return v_check_any(v_reinterpret_as_u64(a)); } +inline bool v_check_any(const v_float64x2& a) +{ return v_check_any(v_reinterpret_as_u64(a)); } + +/* v_select */ +#define OPENCV_HAL_IMPL_MSA_SELECT(_Tpvec, _Tpv, _Tpvu) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_bslq_u8(MSA_TPV_REINTERPRET(_Tpvu, mask.val), \ + MSA_TPV_REINTERPRET(_Tpvu, b.val), MSA_TPV_REINTERPRET(_Tpvu, a.val)))); \ +} + +OPENCV_HAL_IMPL_MSA_SELECT(v_uint8x16, v16u8, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_int8x16, v16i8, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_uint16x8, v8u16, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_int16x8, v8i16, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_uint32x4, v4u32, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_int32x4, v4i32, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_float32x4, v4f32, v16u8) +OPENCV_HAL_IMPL_MSA_SELECT(v_float64x2, v2f64, v16u8) + +#define OPENCV_HAL_IMPL_MSA_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix, ssuffix, _Tpv, _Tpvs) \ +inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ +{ \ + _Tpv a_lo = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), msa_dupq_n_##ssuffix(0))); \ + _Tpv a_hi = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), msa_dupq_n_##ssuffix(0))); \ + b0.val = msa_paddlq_##suffix(a_lo); \ + b1.val = msa_paddlq_##suffix(a_hi); \ +} \ +inline _Tpwvec v_expand_low(const _Tpvec& a) \ +{ \ + _Tpv a_lo = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), msa_dupq_n_##ssuffix(0))); \ + return _Tpwvec(msa_paddlq_##suffix(a_lo)); \ +} \ +inline _Tpwvec v_expand_high(const _Tpvec& a) \ +{ \ + _Tpv a_hi = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), msa_dupq_n_##ssuffix(0))); \ + return _Tpwvec(msa_paddlq_##suffix(a_hi)); \ +} \ +inline _Tpwvec v_load_expand(const _Tp* ptr) \ +{ \ + return _Tpwvec(msa_movl_##suffix(msa_ld1_##suffix(ptr))); \ +} + +OPENCV_HAL_IMPL_MSA_EXPAND(v_uint8x16, v_uint16x8, uchar, u8, s8, v16u8, v16i8) +OPENCV_HAL_IMPL_MSA_EXPAND(v_int8x16, v_int16x8, schar, s8, s8, v16i8, v16i8) +OPENCV_HAL_IMPL_MSA_EXPAND(v_uint16x8, v_uint32x4, ushort, u16, s16, v8u16, v8i16) +OPENCV_HAL_IMPL_MSA_EXPAND(v_int16x8, v_int32x4, short, s16, s16, v8i16, v8i16) +OPENCV_HAL_IMPL_MSA_EXPAND(v_uint32x4, v_uint64x2, uint, u32, s32, v4u32, v4i32) +OPENCV_HAL_IMPL_MSA_EXPAND(v_int32x4, v_int64x2, int, s32, s32, v4i32, v4i32) + +inline v_uint32x4 v_load_expand_q(const uchar* ptr) +{ + return v_uint32x4((v4u32){ptr[0], ptr[1], ptr[2], ptr[3]}); +} + +inline v_int32x4 v_load_expand_q(const schar* ptr) +{ + return v_int32x4((v4i32){ptr[0], ptr[1], ptr[2], ptr[3]}); +} + +/* v_zip, v_combine_low, v_combine_high, v_recombine */ +#define OPENCV_HAL_IMPL_MSA_UNPACKS(_Tpvec, _Tpv, _Tpvs, ssuffix) \ +inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) \ +{ \ + b0.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a1.val), MSA_TPV_REINTERPRET(_Tpvs, a0.val))); \ + b1.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a1.val), MSA_TPV_REINTERPRET(_Tpvs, a0.val))); \ +} \ +inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_s64(MSA_TPV_REINTERPRET(v2i64, b.val), MSA_TPV_REINTERPRET(v2i64, a.val)))); \ +} \ +inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_s64(MSA_TPV_REINTERPRET(v2i64, b.val), MSA_TPV_REINTERPRET(v2i64, a.val)))); \ +} \ +inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) \ +{ \ + c.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_s64(MSA_TPV_REINTERPRET(v2i64, b.val), MSA_TPV_REINTERPRET(v2i64, a.val))); \ + d.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_s64(MSA_TPV_REINTERPRET(v2i64, b.val), MSA_TPV_REINTERPRET(v2i64, a.val))); \ +} + +OPENCV_HAL_IMPL_MSA_UNPACKS(v_uint8x16, v16u8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_int8x16, v16i8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_uint16x8, v8u16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_int16x8, v8i16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_uint32x4, v4u32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_int32x4, v4i32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_float32x4, v4f32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_UNPACKS(v_float64x2, v2f64, v2i64, s64) + +/* v_extract */ +#define OPENCV_HAL_IMPL_MSA_EXTRACT(_Tpvec, _Tpv, _Tpvs, suffix) \ +template \ +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(MSA_TPV_REINTERPRET(_Tpv, msa_extq_##suffix(MSA_TPV_REINTERPRET(_Tpvs, a.val), MSA_TPV_REINTERPRET(_Tpvs, b.val), s))); \ +} + +OPENCV_HAL_IMPL_MSA_EXTRACT(v_uint8x16, v16u8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_int8x16, v16i8, v16i8, s8) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_uint16x8, v8u16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_int16x8, v8i16, v8i16, s16) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_uint32x4, v4u32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_int32x4, v4i32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_uint64x2, v2u64, v2i64, s64) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_int64x2, v2i64, v2i64, s64) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_float32x4, v4f32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_EXTRACT(v_float64x2, v2f64, v2i64, s64) + +/* v_round, v_floor, v_ceil, v_trunc */ +inline v_int32x4 v_round(const v_float32x4& a) +{ + return v_int32x4(msa_cvttintq_s32_f32(a.val)); +} + +inline v_int32x4 v_floor(const v_float32x4& a) +{ + v4i32 a1 = msa_cvttintq_s32_f32(a.val); + return v_int32x4(msa_addq_s32(a1, MSA_TPV_REINTERPRET(v4i32, msa_cgtq_f32(msa_cvtfintq_f32_s32(a1), a.val)))); +} + +inline v_int32x4 v_ceil(const v_float32x4& a) +{ + v4i32 a1 = msa_cvttintq_s32_f32(a.val); + return v_int32x4(msa_subq_s32(a1, MSA_TPV_REINTERPRET(v4i32, msa_cgtq_f32(a.val, msa_cvtfintq_f32_s32(a1))))); +} + +inline v_int32x4 v_trunc(const v_float32x4& a) +{ + return v_int32x4(msa_cvttruncq_s32_f32(a.val)); +} + +inline v_int32x4 v_round(const v_float64x2& a) +{ + return v_int32x4(msa_pack_s64(msa_cvttintq_s64_f64(a.val), msa_dupq_n_s64(0))); +} + +inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) +{ + return v_int32x4(msa_pack_s64(msa_cvttintq_s64_f64(a.val), msa_cvttintq_s64_f64(b.val))); +} + +inline v_int32x4 v_floor(const v_float64x2& a) +{ + v2f64 a1 = msa_cvtrintq_f64(a.val); + return v_int32x4(msa_pack_s64(msa_addq_s64(msa_cvttruncq_s64_f64(a1), MSA_TPV_REINTERPRET(v2i64, msa_cgtq_f64(a1, a.val))), msa_dupq_n_s64(0))); +} + +inline v_int32x4 v_ceil(const v_float64x2& a) +{ + v2f64 a1 = msa_cvtrintq_f64(a.val); + return v_int32x4(msa_pack_s64(msa_subq_s64(msa_cvttruncq_s64_f64(a1), MSA_TPV_REINTERPRET(v2i64, msa_cgtq_f64(a.val, a1))), msa_dupq_n_s64(0))); +} + +inline v_int32x4 v_trunc(const v_float64x2& a) +{ + return v_int32x4(msa_pack_s64(msa_cvttruncq_s64_f64(a.val), msa_dupq_n_s64(0))); +} + +#define OPENCV_HAL_IMPL_MSA_TRANSPOSE4x4(_Tpvec, _Tpv, _Tpvs, ssuffix) \ +inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, \ + _Tpvec& b2, _Tpvec& b3) \ +{ \ + _Tpv t00 = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a1.val), MSA_TPV_REINTERPRET(_Tpvs, a0.val))); \ + _Tpv t01 = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a1.val), MSA_TPV_REINTERPRET(_Tpvs, a0.val))); \ + _Tpv t10 = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a3.val), MSA_TPV_REINTERPRET(_Tpvs, a2.val))); \ + _Tpv t11 = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_##ssuffix(MSA_TPV_REINTERPRET(_Tpvs, a3.val), MSA_TPV_REINTERPRET(_Tpvs, a2.val))); \ + b0.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_s64(MSA_TPV_REINTERPRET(v2i64, t10), MSA_TPV_REINTERPRET(v2i64, t00))); \ + b1.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_s64(MSA_TPV_REINTERPRET(v2i64, t10), MSA_TPV_REINTERPRET(v2i64, t00))); \ + b2.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvrq_s64(MSA_TPV_REINTERPRET(v2i64, t11), MSA_TPV_REINTERPRET(v2i64, t01))); \ + b3.val = MSA_TPV_REINTERPRET(_Tpv, msa_ilvlq_s64(MSA_TPV_REINTERPRET(v2i64, t11), MSA_TPV_REINTERPRET(v2i64, t01))); \ +} + +OPENCV_HAL_IMPL_MSA_TRANSPOSE4x4(v_uint32x4, v4u32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_TRANSPOSE4x4(v_int32x4, v4i32, v4i32, s32) +OPENCV_HAL_IMPL_MSA_TRANSPOSE4x4(v_float32x4, v4f32, v4i32, s32) + +#define OPENCV_HAL_IMPL_MSA_INTERLEAVED(_Tpvec, _Tp, suffix) \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \ +{ \ + msa_ld2q_##suffix(ptr, &a.val, &b.val); \ +} \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \ +{ \ + msa_ld3q_##suffix(ptr, &a.val, &b.val, &c.val); \ +} \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \ + v_##_Tpvec& c, v_##_Tpvec& d) \ +{ \ + msa_ld4q_##suffix(ptr, &a.val, &b.val, &c.val, &d.val); \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + msa_st2q_##suffix(ptr, a.val, b.val); \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + const v_##_Tpvec& c, hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + msa_st3q_##suffix(ptr, a.val, b.val, c.val); \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + const v_##_Tpvec& c, const v_##_Tpvec& d, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED ) \ +{ \ + msa_st4q_##suffix(ptr, a.val, b.val, c.val, d.val); \ +} + +OPENCV_HAL_IMPL_MSA_INTERLEAVED(uint8x16, uchar, u8) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(int8x16, schar, s8) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(uint16x8, ushort, u16) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(int16x8, short, s16) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(int32x4, int, s32) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(float32x4, float, f32) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(uint64x2, uint64, u64) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(int64x2, int64, s64) +OPENCV_HAL_IMPL_MSA_INTERLEAVED(float64x2, double, f64) + +/* v_cvt_f32, v_cvt_f64, v_cvt_f64_high */ +inline v_float32x4 v_cvt_f32(const v_int32x4& a) +{ + return v_float32x4(msa_cvtfintq_f32_s32(a.val)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a) +{ + return v_float32x4(msa_cvtfq_f32_f64(a.val, msa_dupq_n_f64(0.0f))); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) +{ + return v_float32x4(msa_cvtfq_f32_f64(a.val, b.val)); +} + +inline v_float64x2 v_cvt_f64(const v_int32x4& a) +{ + return v_float64x2(msa_cvtflq_f64_f32(msa_cvtfintq_f32_s32(a.val))); +} + +inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) +{ + return v_float64x2(msa_cvtfhq_f64_f32(msa_cvtfintq_f32_s32(a.val))); +} + +inline v_float64x2 v_cvt_f64(const v_float32x4& a) +{ + return v_float64x2(msa_cvtflq_f64_f32(a.val)); +} + +inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) +{ + return v_float64x2(msa_cvtfhq_f64_f32(a.val)); +} + +inline v_float64x2 v_cvt_f64(const v_int64x2& a) +{ + return v_float64x2(msa_cvtfintq_f64_s64(a.val)); +} + +////////////// Lookup table access //////////////////// +inline v_int8x16 v_lut(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[ 0]], + tab[idx[ 1]], + tab[idx[ 2]], + tab[idx[ 3]], + tab[idx[ 4]], + tab[idx[ 5]], + tab[idx[ 6]], + tab[idx[ 7]], + tab[idx[ 8]], + tab[idx[ 9]], + tab[idx[10]], + tab[idx[11]], + tab[idx[12]], + tab[idx[13]], + tab[idx[14]], + tab[idx[15]] + }; + return v_int8x16(msa_ld1q_s8(elems)); +} +inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[4]], + tab[idx[4] + 1], + tab[idx[5]], + tab[idx[5] + 1], + tab[idx[6]], + tab[idx[6] + 1], + tab[idx[7]], + tab[idx[7] + 1] + }; + return v_int8x16(msa_ld1q_s8(elems)); +} +inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[0] + 2], + tab[idx[0] + 3], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[1] + 2], + tab[idx[1] + 3], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[2] + 2], + tab[idx[2] + 3], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[3] + 2], + tab[idx[3] + 3] + }; + return v_int8x16(msa_ld1q_s8(elems)); +} +inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((schar*)tab, idx)); } +inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((schar*)tab, idx)); } +inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((schar*)tab, idx)); } + + +inline v_int16x8 v_lut(const short* tab, const int* idx) +{ + short CV_DECL_ALIGNED(32) elems[8] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]], + tab[idx[4]], + tab[idx[5]], + tab[idx[6]], + tab[idx[7]] + }; + return v_int16x8(msa_ld1q_s16(elems)); +} +inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) +{ + short CV_DECL_ALIGNED(32) elems[8] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1] + }; + return v_int16x8(msa_ld1q_s16(elems)); +} +inline v_int16x8 v_lut_quads(const short* tab, const int* idx) +{ + return v_int16x8(msa_combine_s16(msa_ld1_s16(tab + idx[0]), msa_ld1_s16(tab + idx[1]))); +} +inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((short*)tab, idx)); } +inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((short*)tab, idx)); } +inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((short*)tab, idx)); } + +inline v_int32x4 v_lut(const int* tab, const int* idx) +{ + int CV_DECL_ALIGNED(32) elems[4] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]] + }; + return v_int32x4(msa_ld1q_s32(elems)); +} +inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) +{ + return v_int32x4(msa_combine_s32(msa_ld1_s32(tab + idx[0]), msa_ld1_s32(tab + idx[1]))); +} +inline v_int32x4 v_lut_quads(const int* tab, const int* idx) +{ + return v_int32x4(msa_ld1q_s32(tab + idx[0])); +} +inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((int*)tab, idx)); } +inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((int*)tab, idx)); } +inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((int*)tab, idx)); } + +inline v_int64x2 v_lut(const int64_t* tab, const int* idx) +{ + return v_int64x2(msa_combine_s64(msa_create_s64(tab[idx[0]]), msa_create_s64(tab[idx[1]]))); +} +inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) +{ + return v_int64x2(msa_ld1q_s64(tab + idx[0])); +} +inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } +inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } + +inline v_float32x4 v_lut(const float* tab, const int* idx) +{ + float CV_DECL_ALIGNED(32) elems[4] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]] + }; + return v_float32x4(msa_ld1q_f32(elems)); +} +inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) +{ + uint64 CV_DECL_ALIGNED(32) elems[2] = + { + *(uint64*)(tab + idx[0]), + *(uint64*)(tab + idx[1]) + }; + return v_float32x4(MSA_TPV_REINTERPRET(v4f32, msa_ld1q_u64(elems))); +} +inline v_float32x4 v_lut_quads(const float* tab, const int* idx) +{ + return v_float32x4(msa_ld1q_f32(tab + idx[0])); +} + +inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + return v_int32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} + +inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) +{ + unsigned CV_DECL_ALIGNED(32) elems[4] = + { + tab[msa_getq_lane_s32(idxvec.val, 0)], + tab[msa_getq_lane_s32(idxvec.val, 1)], + tab[msa_getq_lane_s32(idxvec.val, 2)], + tab[msa_getq_lane_s32(idxvec.val, 3)] + }; + return v_uint32x4(msa_ld1q_u32(elems)); +} + +inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} + +inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + v4f32 xy02 = msa_combine_f32(msa_ld1_f32(tab + idx[0]), msa_ld1_f32(tab + idx[2])); + v4f32 xy13 = msa_combine_f32(msa_ld1_f32(tab + idx[1]), msa_ld1_f32(tab + idx[3])); + x = v_float32x4(MSA_TPV_REINTERPRET(v4f32, msa_ilvevq_s32(MSA_TPV_REINTERPRET(v4i32, xy13), MSA_TPV_REINTERPRET(v4i32, xy02)))); + y = v_float32x4(MSA_TPV_REINTERPRET(v4f32, msa_ilvodq_s32(MSA_TPV_REINTERPRET(v4i32, xy13), MSA_TPV_REINTERPRET(v4i32, xy02)))); +} + +inline v_int8x16 v_interleave_pairs(const v_int8x16& vec) +{ + v_int8x16 c = v_int8x16(__builtin_msa_vshf_b((v16i8)((v2i64){0x0705060403010200, 0x0F0D0E0C0B090A08}), msa_dupq_n_s8(0), vec.val)); + return c; +} +inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) +{ return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x16 v_interleave_quads(const v_int8x16& vec) +{ + v_int8x16 c = v_int8x16(__builtin_msa_vshf_b((v16i8)((v2i64){0x0703060205010400, 0x0F0B0E0A0D090C08}), msa_dupq_n_s8(0), vec.val)); + return c; +} +inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) +{ + v_int16x8 c = v_int16x8(__builtin_msa_vshf_h((v8i16)((v2i64){0x0003000100020000, 0x0007000500060004}), msa_dupq_n_s16(0), vec.val)); + return c; +} + +inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } + +inline v_int16x8 v_interleave_quads(const v_int16x8& vec) +{ + v_int16x8 c = v_int16x8(__builtin_msa_vshf_h((v8i16)((v2i64){0x0005000100040000, 0x0007000300060002}), msa_dupq_n_s16(0), vec.val)); + return c; +} + +inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_interleave_pairs(const v_int32x4& vec) +{ + v_int32x4 c; + c.val[0] = vec.val[0]; + c.val[1] = vec.val[2]; + c.val[2] = vec.val[1]; + c.val[3] = vec.val[3]; + return c; +} + +inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x4 v_interleave_pairs(const v_float32x4& vec) { return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x16 v_pack_triplets(const v_int8x16& vec) +{ + v_int8x16 c = v_int8x16(__builtin_msa_vshf_b((v16i8)((v2i64){0x0908060504020100, 0x131211100E0D0C0A}), msa_dupq_n_s8(0), vec.val)); + return c; +} + +inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_pack_triplets(const v_int16x8& vec) +{ + v_int16x8 c = v_int16x8(__builtin_msa_vshf_h((v8i16)((v2i64){0x0004000200010000, 0x0009000800060005}), msa_dupq_n_s16(0), vec.val)); + return c; +} + +inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + +inline v_float64x2 v_lut(const double* tab, const int* idx) +{ + double CV_DECL_ALIGNED(32) elems[2] = + { + tab[idx[0]], + tab[idx[1]] + }; + return v_float64x2(msa_ld1q_f64(elems)); +} + +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) +{ + return v_float64x2(msa_ld1q_f64(tab + idx[0])); +} + +inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + return v_float64x2(tab[idx[0]], tab[idx[1]]); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + v2f64 xy0 = msa_ld1q_f64(tab + idx[0]); + v2f64 xy1 = msa_ld1q_f64(tab + idx[1]); + x = v_float64x2(MSA_TPV_REINTERPRET(v2f64, msa_ilvevq_s64(MSA_TPV_REINTERPRET(v2i64, xy1), MSA_TPV_REINTERPRET(v2i64, xy0)))); + y = v_float64x2(MSA_TPV_REINTERPRET(v2f64, msa_ilvodq_s64(MSA_TPV_REINTERPRET(v2i64, xy1), MSA_TPV_REINTERPRET(v2i64, xy0)))); +} + +template +inline typename _Tp::lane_type v_extract_n(const _Tp& a) +{ + return v_rotate_right(a).get0(); +} + +template +inline v_uint32x4 v_broadcast_element(const v_uint32x4& a) +{ + return v_setall_u32(v_extract_n(a)); +} +template +inline v_int32x4 v_broadcast_element(const v_int32x4& a) +{ + return v_setall_s32(v_extract_n(a)); +} +template +inline v_float32x4 v_broadcast_element(const v_float32x4& a) +{ + return v_setall_f32(v_extract_n(a)); +} + +////// FP16 support /////// +#if CV_FP16 +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ +#ifndef msa_ld1_f16 + v4f16 v = (v4f16)msa_ld1_s16((const short*)ptr); +#else + v4f16 v = msa_ld1_f16((const __fp16*)ptr); +#endif + return v_float32x4(msa_cvt_f32_f16(v)); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ + v4f16 hv = msa_cvt_f16_f32(v.val); + +#ifndef msa_st1_f16 + msa_st1_s16((short*)ptr, (int16x4_t)hv); +#else + msa_st1_f16((__fp16*)ptr, hv); +#endif +} +#else +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ + float buf[4]; + for( int i = 0; i < 4; i++ ) + buf[i] = (float)ptr[i]; + return v_load(buf); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ + float buf[4]; + v_store(buf, v); + for( int i = 0; i < 4; i++ ) + ptr[i] = (float16_t)buf[i]; +} +#endif + +inline void v_cleanup() {} + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_neon.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_neon.hpp new file mode 100644 index 00000000..e17972a3 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_neon.hpp @@ -0,0 +1,2613 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_INTRIN_NEON_HPP +#define OPENCV_HAL_INTRIN_NEON_HPP + +#include +#include "opencv2/core/utility.hpp" + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +#define CV_SIMD128 1 +#if defined(__aarch64__) || defined(_M_ARM64) +#define CV_SIMD128_64F 1 +#else +#define CV_SIMD128_64F 0 +#endif + +// The following macro checks if the code is being compiled for the +// AArch64 execution state of Armv8, to enable the 128-bit +// intrinsics. The macro `__ARM_64BIT_STATE` is the one recommended by +// the Arm C Language Extension (ACLE) specifications [1] to check the +// availability of 128-bit intrinsics, and it is supporrted by clang +// and gcc. The macro `_M_ARM64` is the equivalent one for Microsoft +// Visual Studio [2] . +// +// [1] https://developer.arm.com/documentation/101028/0012/13--Advanced-SIMD--Neon--intrinsics +// [2] https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#if defined(__ARM_64BIT_STATE) || defined(_M_ARM64) +#define CV_NEON_AARCH64 1 +#else +#define CV_NEON_AARCH64 0 +#endif + +// TODO +#define CV_NEON_DOT 0 + +//////////// Utils //////////// + +#if CV_SIMD128_64F +#define OPENCV_HAL_IMPL_NEON_UNZIP(_Tpv, _Tpvx2, suffix) \ + inline void _v128_unzip(const _Tpv& a, const _Tpv& b, _Tpv& c, _Tpv& d) \ + { c = vuzp1q_##suffix(a, b); d = vuzp2q_##suffix(a, b); } +#define OPENCV_HAL_IMPL_NEON_UNZIP_L(_Tpv, _Tpvx2, suffix) \ + inline void _v128_unzip(const _Tpv&a, const _Tpv&b, _Tpv& c, _Tpv& d) \ + { c = vuzp1_##suffix(a, b); d = vuzp2_##suffix(a, b); } +#else +#define OPENCV_HAL_IMPL_NEON_UNZIP(_Tpv, _Tpvx2, suffix) \ + inline void _v128_unzip(const _Tpv& a, const _Tpv& b, _Tpv& c, _Tpv& d) \ + { _Tpvx2 ab = vuzpq_##suffix(a, b); c = ab.val[0]; d = ab.val[1]; } +#define OPENCV_HAL_IMPL_NEON_UNZIP_L(_Tpv, _Tpvx2, suffix) \ + inline void _v128_unzip(const _Tpv& a, const _Tpv& b, _Tpv& c, _Tpv& d) \ + { _Tpvx2 ab = vuzp_##suffix(a, b); c = ab.val[0]; d = ab.val[1]; } +#endif + +#if CV_SIMD128_64F +#define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \ + template static inline \ + _Tpv vreinterpretq_##suffix##_f64(T a) { return (_Tpv) a; } \ + template static inline \ + float64x2_t vreinterpretq_f64_##suffix(T a) { return (float64x2_t) a; } +#else +#define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) +#endif + +#define OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(_Tpv, _Tpvl, suffix) \ + OPENCV_HAL_IMPL_NEON_UNZIP(_Tpv##_t, _Tpv##x2_t, suffix) \ + OPENCV_HAL_IMPL_NEON_UNZIP_L(_Tpvl##_t, _Tpvl##x2_t, suffix) \ + OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv##_t, suffix) + +#define OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX_I64(_Tpv, _Tpvl, suffix) \ + OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv##_t, suffix) + +#define OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX_F64(_Tpv, _Tpvl, suffix) \ + OPENCV_HAL_IMPL_NEON_UNZIP(_Tpv##_t, _Tpv##x2_t, suffix) + +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(uint8x16, uint8x8, u8) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(int8x16, int8x8, s8) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(uint16x8, uint16x4, u16) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(int16x8, int16x4, s16) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(uint32x4, uint32x2, u32) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(int32x4, int32x2, s32) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX(float32x4, float32x2, f32) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX_I64(uint64x2, uint64x1, u64) +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX_I64(int64x2, int64x1, s64) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_UTILS_SUFFIX_F64(float64x2, float64x1,f64) +#endif + +//////////// Types //////////// + +struct v_uint8x16 +{ + typedef uchar lane_type; + enum { nlanes = 16 }; + + v_uint8x16() {} + explicit v_uint8x16(uint8x16_t v) : val(v) {} + v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) + { + uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = vld1q_u8(v); + } + uchar get0() const + { + return vgetq_lane_u8(val, 0); + } + + uint8x16_t val; +}; + +struct v_int8x16 +{ + typedef schar lane_type; + enum { nlanes = 16 }; + + v_int8x16() {} + explicit v_int8x16(int8x16_t v) : val(v) {} + v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) + { + schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = vld1q_s8(v); + } + schar get0() const + { + return vgetq_lane_s8(val, 0); + } + + int8x16_t val; +}; + +struct v_uint16x8 +{ + typedef ushort lane_type; + enum { nlanes = 8 }; + + v_uint16x8() {} + explicit v_uint16x8(uint16x8_t v) : val(v) {} + v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) + { + ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = vld1q_u16(v); + } + ushort get0() const + { + return vgetq_lane_u16(val, 0); + } + + uint16x8_t val; +}; + +struct v_int16x8 +{ + typedef short lane_type; + enum { nlanes = 8 }; + + v_int16x8() {} + explicit v_int16x8(int16x8_t v) : val(v) {} + v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) + { + short v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = vld1q_s16(v); + } + short get0() const + { + return vgetq_lane_s16(val, 0); + } + + int16x8_t val; +}; + +struct v_uint32x4 +{ + typedef unsigned lane_type; + enum { nlanes = 4 }; + + v_uint32x4() {} + explicit v_uint32x4(uint32x4_t v) : val(v) {} + v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) + { + unsigned v[] = {v0, v1, v2, v3}; + val = vld1q_u32(v); + } + unsigned get0() const + { + return vgetq_lane_u32(val, 0); + } + + uint32x4_t val; +}; + +struct v_int32x4 +{ + typedef int lane_type; + enum { nlanes = 4 }; + + v_int32x4() {} + explicit v_int32x4(int32x4_t v) : val(v) {} + v_int32x4(int v0, int v1, int v2, int v3) + { + int v[] = {v0, v1, v2, v3}; + val = vld1q_s32(v); + } + int get0() const + { + return vgetq_lane_s32(val, 0); + } + int32x4_t val; +}; + +struct v_float32x4 +{ + typedef float lane_type; + enum { nlanes = 4 }; + + v_float32x4() {} + explicit v_float32x4(float32x4_t v) : val(v) {} + v_float32x4(float v0, float v1, float v2, float v3) + { + float v[] = {v0, v1, v2, v3}; + val = vld1q_f32(v); + } + float get0() const + { + return vgetq_lane_f32(val, 0); + } + float32x4_t val; +}; + +struct v_uint64x2 +{ + typedef uint64 lane_type; + enum { nlanes = 2 }; + + v_uint64x2() {} + explicit v_uint64x2(uint64x2_t v) : val(v) {} + v_uint64x2(uint64 v0, uint64 v1) + { + uint64 v[] = {v0, v1}; + val = vld1q_u64(v); + } + uint64 get0() const + { + return vgetq_lane_u64(val, 0); + } + uint64x2_t val; +}; + +struct v_int64x2 +{ + typedef int64 lane_type; + enum { nlanes = 2 }; + + v_int64x2() {} + explicit v_int64x2(int64x2_t v) : val(v) {} + v_int64x2(int64 v0, int64 v1) + { + int64 v[] = {v0, v1}; + val = vld1q_s64(v); + } + int64 get0() const + { + return vgetq_lane_s64(val, 0); + } + int64x2_t val; +}; + +#if CV_SIMD128_64F +struct v_float64x2 +{ + typedef double lane_type; + enum { nlanes = 2 }; + + v_float64x2() {} + explicit v_float64x2(float64x2_t v) : val(v) {} + v_float64x2(double v0, double v1) + { + double v[] = {v0, v1}; + val = vld1q_f64(v); + } + double get0() const + { + return vgetq_lane_f64(val, 0); + } + float64x2_t val; +}; +#endif + +#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \ +inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \ +inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \ +inline _Tpv##_t vreinterpretq_##suffix##_##suffix(_Tpv##_t v) { return v; } \ +inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(vreinterpretq_u8_##suffix(v.val)); } \ +inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(vreinterpretq_s8_##suffix(v.val)); } \ +inline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(vreinterpretq_u16_##suffix(v.val)); } \ +inline v_int16x8 v_reinterpret_as_s16(const v_##_Tpv& v) { return v_int16x8(vreinterpretq_s16_##suffix(v.val)); } \ +inline v_uint32x4 v_reinterpret_as_u32(const v_##_Tpv& v) { return v_uint32x4(vreinterpretq_u32_##suffix(v.val)); } \ +inline v_int32x4 v_reinterpret_as_s32(const v_##_Tpv& v) { return v_int32x4(vreinterpretq_s32_##suffix(v.val)); } \ +inline v_uint64x2 v_reinterpret_as_u64(const v_##_Tpv& v) { return v_uint64x2(vreinterpretq_u64_##suffix(v.val)); } \ +inline v_int64x2 v_reinterpret_as_s64(const v_##_Tpv& v) { return v_int64x2(vreinterpretq_s64_##suffix(v.val)); } \ +inline v_float32x4 v_reinterpret_as_f32(const v_##_Tpv& v) { return v_float32x4(vreinterpretq_f32_##suffix(v.val)); } + +OPENCV_HAL_IMPL_NEON_INIT(uint8x16, uchar, u8) +OPENCV_HAL_IMPL_NEON_INIT(int8x16, schar, s8) +OPENCV_HAL_IMPL_NEON_INIT(uint16x8, ushort, u16) +OPENCV_HAL_IMPL_NEON_INIT(int16x8, short, s16) +OPENCV_HAL_IMPL_NEON_INIT(uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_NEON_INIT(int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_INIT(uint64x2, uint64, u64) +OPENCV_HAL_IMPL_NEON_INIT(int64x2, int64, s64) +OPENCV_HAL_IMPL_NEON_INIT(float32x4, float, f32) +#if CV_SIMD128_64F +#define OPENCV_HAL_IMPL_NEON_INIT_64(_Tpv, suffix) \ +inline v_float64x2 v_reinterpret_as_f64(const v_##_Tpv& v) { return v_float64x2(vreinterpretq_f64_##suffix(v.val)); } +OPENCV_HAL_IMPL_NEON_INIT(float64x2, double, f64) +OPENCV_HAL_IMPL_NEON_INIT_64(uint8x16, u8) +OPENCV_HAL_IMPL_NEON_INIT_64(int8x16, s8) +OPENCV_HAL_IMPL_NEON_INIT_64(uint16x8, u16) +OPENCV_HAL_IMPL_NEON_INIT_64(int16x8, s16) +OPENCV_HAL_IMPL_NEON_INIT_64(uint32x4, u32) +OPENCV_HAL_IMPL_NEON_INIT_64(int32x4, s32) +OPENCV_HAL_IMPL_NEON_INIT_64(uint64x2, u64) +OPENCV_HAL_IMPL_NEON_INIT_64(int64x2, s64) +OPENCV_HAL_IMPL_NEON_INIT_64(float32x4, f32) +OPENCV_HAL_IMPL_NEON_INIT_64(float64x2, f64) +#endif + +#define OPENCV_HAL_IMPL_NEON_PACK(_Tpvec, _Tp, hreg, suffix, _Tpwvec, pack, mov, rshr) \ +inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + hreg a1 = mov(a.val), b1 = mov(b.val); \ + return _Tpvec(vcombine_##suffix(a1, b1)); \ +} \ +inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + hreg a1 = mov(a.val); \ + vst1_##suffix(ptr, a1); \ +} \ +template inline \ +_Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + hreg a1 = rshr(a.val, n); \ + hreg b1 = rshr(b.val, n); \ + return _Tpvec(vcombine_##suffix(a1, b1)); \ +} \ +template inline \ +void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + hreg a1 = rshr(a.val, n); \ + vst1_##suffix(ptr, a1); \ +} + +OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_uint16x8, pack, vqmovn_u16, vqrshrn_n_u16) +OPENCV_HAL_IMPL_NEON_PACK(v_int8x16, schar, int8x8_t, s8, v_int16x8, pack, vqmovn_s16, vqrshrn_n_s16) +OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_uint32x4, pack, vqmovn_u32, vqrshrn_n_u32) +OPENCV_HAL_IMPL_NEON_PACK(v_int16x8, short, int16x4_t, s16, v_int32x4, pack, vqmovn_s32, vqrshrn_n_s32) +OPENCV_HAL_IMPL_NEON_PACK(v_uint32x4, unsigned, uint32x2_t, u32, v_uint64x2, pack, vmovn_u64, vrshrn_n_u64) +OPENCV_HAL_IMPL_NEON_PACK(v_int32x4, int, int32x2_t, s32, v_int64x2, pack, vmovn_s64, vrshrn_n_s64) + +OPENCV_HAL_IMPL_NEON_PACK(v_uint8x16, uchar, uint8x8_t, u8, v_int16x8, pack_u, vqmovun_s16, vqrshrun_n_s16) +OPENCV_HAL_IMPL_NEON_PACK(v_uint16x8, ushort, uint16x4_t, u16, v_int32x4, pack_u, vqmovun_s32, vqrshrun_n_s32) + +// pack boolean +inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) +{ + uint8x16_t ab = vcombine_u8(vmovn_u16(a.val), vmovn_u16(b.val)); + return v_uint8x16(ab); +} + +inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d) +{ + uint16x8_t nab = vcombine_u16(vmovn_u32(a.val), vmovn_u32(b.val)); + uint16x8_t ncd = vcombine_u16(vmovn_u32(c.val), vmovn_u32(d.val)); + return v_uint8x16(vcombine_u8(vmovn_u16(nab), vmovn_u16(ncd))); +} + +inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, + const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, + const v_uint64x2& g, const v_uint64x2& h) +{ + uint32x4_t ab = vcombine_u32(vmovn_u64(a.val), vmovn_u64(b.val)); + uint32x4_t cd = vcombine_u32(vmovn_u64(c.val), vmovn_u64(d.val)); + uint32x4_t ef = vcombine_u32(vmovn_u64(e.val), vmovn_u64(f.val)); + uint32x4_t gh = vcombine_u32(vmovn_u64(g.val), vmovn_u64(h.val)); + + uint16x8_t abcd = vcombine_u16(vmovn_u32(ab), vmovn_u32(cd)); + uint16x8_t efgh = vcombine_u16(vmovn_u32(ef), vmovn_u32(gh)); + return v_uint8x16(vcombine_u8(vmovn_u16(abcd), vmovn_u16(efgh))); +} + +inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& m3) +{ + float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val); + float32x4_t res = vmulq_lane_f32(m0.val, vl, 0); + res = vmlaq_lane_f32(res, m1.val, vl, 1); + res = vmlaq_lane_f32(res, m2.val, vh, 0); + res = vmlaq_lane_f32(res, m3.val, vh, 1); + return v_float32x4(res); +} + +inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& a) +{ + float32x2_t vl = vget_low_f32(v.val), vh = vget_high_f32(v.val); + float32x4_t res = vmulq_lane_f32(m0.val, vl, 0); + res = vmlaq_lane_f32(res, m1.val, vl, 1); + res = vmlaq_lane_f32(res, m2.val, vh, 0); + res = vaddq_f32(res, a.val); + return v_float32x4(res); +} + +#define OPENCV_HAL_IMPL_NEON_BIN_OP(bin_op, _Tpvec, intrin) \ +inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} \ +inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ +{ \ + a.val = intrin(a.val, b.val); \ + return a; \ +} + +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint8x16, vqaddq_u8) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint8x16, vqsubq_u8) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int8x16, vqaddq_s8) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int8x16, vqsubq_s8) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint16x8, vqaddq_u16) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint16x8, vqsubq_u16) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int16x8, vqaddq_s16) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int16x8, vqsubq_s16) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int32x4, vaddq_s32) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int32x4, vsubq_s32) +OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_int32x4, vmulq_s32) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint32x4, vaddq_u32) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint32x4, vsubq_u32) +OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_uint32x4, vmulq_u32) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float32x4, vaddq_f32) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float32x4, vsubq_f32) +OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float32x4, vmulq_f32) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_int64x2, vaddq_s64) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_int64x2, vsubq_s64) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_uint64x2, vaddq_u64) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_uint64x2, vsubq_u64) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float32x4, vdivq_f32) +OPENCV_HAL_IMPL_NEON_BIN_OP(+, v_float64x2, vaddq_f64) +OPENCV_HAL_IMPL_NEON_BIN_OP(-, v_float64x2, vsubq_f64) +OPENCV_HAL_IMPL_NEON_BIN_OP(*, v_float64x2, vmulq_f64) +OPENCV_HAL_IMPL_NEON_BIN_OP(/, v_float64x2, vdivq_f64) +#else +inline v_float32x4 operator / (const v_float32x4& a, const v_float32x4& b) +{ + float32x4_t reciprocal = vrecpeq_f32(b.val); + reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); + return v_float32x4(vmulq_f32(a.val, reciprocal)); +} +inline v_float32x4& operator /= (v_float32x4& a, const v_float32x4& b) +{ + float32x4_t reciprocal = vrecpeq_f32(b.val); + reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(b.val, reciprocal), reciprocal); + a.val = vmulq_f32(a.val, reciprocal); + return a; +} +#endif + +// saturating multiply 8-bit, 16-bit +#define OPENCV_HAL_IMPL_NEON_MUL_SAT(_Tpvec, _Tpwvec) \ + inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b) \ + { \ + _Tpwvec c, d; \ + v_mul_expand(a, b, c, d); \ + return v_pack(c, d); \ + } \ + inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b) \ + { a = a * b; return a; } + +OPENCV_HAL_IMPL_NEON_MUL_SAT(v_int8x16, v_int16x8) +OPENCV_HAL_IMPL_NEON_MUL_SAT(v_uint8x16, v_uint16x8) +OPENCV_HAL_IMPL_NEON_MUL_SAT(v_int16x8, v_int32x4) +OPENCV_HAL_IMPL_NEON_MUL_SAT(v_uint16x8, v_uint32x4) + +// Multiply and expand +inline void v_mul_expand(const v_int8x16& a, const v_int8x16& b, + v_int16x8& c, v_int16x8& d) +{ + c.val = vmull_s8(vget_low_s8(a.val), vget_low_s8(b.val)); +#if CV_NEON_AARCH64 + d.val = vmull_high_s8(a.val, b.val); +#else // #if CV_NEON_AARCH64 + d.val = vmull_s8(vget_high_s8(a.val), vget_high_s8(b.val)); +#endif // #if CV_NEON_AARCH64 +} + +inline void v_mul_expand(const v_uint8x16& a, const v_uint8x16& b, + v_uint16x8& c, v_uint16x8& d) +{ + c.val = vmull_u8(vget_low_u8(a.val), vget_low_u8(b.val)); +#if CV_NEON_AARCH64 + d.val = vmull_high_u8(a.val, b.val); +#else // #if CV_NEON_AARCH64 + d.val = vmull_u8(vget_high_u8(a.val), vget_high_u8(b.val)); +#endif // #if CV_NEON_AARCH64 +} + +inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, + v_int32x4& c, v_int32x4& d) +{ + c.val = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); +#if CV_NEON_AARCH64 + d.val = vmull_high_s16(a.val, b.val); +#else // #if CV_NEON_AARCH64 + d.val = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val)); +#endif // #if CV_NEON_AARCH64 +} + +inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, + v_uint32x4& c, v_uint32x4& d) +{ + c.val = vmull_u16(vget_low_u16(a.val), vget_low_u16(b.val)); +#if CV_NEON_AARCH64 + d.val = vmull_high_u16(a.val, b.val); +#else // #if CV_NEON_AARCH64 + d.val = vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val)); +#endif // #if CV_NEON_AARCH64 +} + +inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, + v_uint64x2& c, v_uint64x2& d) +{ + c.val = vmull_u32(vget_low_u32(a.val), vget_low_u32(b.val)); +#if CV_NEON_AARCH64 + d.val = vmull_high_u32(a.val, b.val); +#else // #if CV_NEON_AARCH64 + d.val = vmull_u32(vget_high_u32(a.val), vget_high_u32(b.val)); +#endif // #if CV_NEON_AARCH64 +} + +inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) +{ + return v_int16x8(vcombine_s16( + vshrn_n_s32(vmull_s16( vget_low_s16(a.val), vget_low_s16(b.val)), 16), + vshrn_n_s32( +#if CV_NEON_AARCH64 + vmull_high_s16(a.val, b.val) +#else // #if CV_NEON_AARCH64 + vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val)) +#endif // #if CV_NEON_AARCH64 + , 16) + )); +} +inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) +{ + return v_uint16x8(vcombine_u16( + vshrn_n_u32(vmull_u16( vget_low_u16(a.val), vget_low_u16(b.val)), 16), + vshrn_n_u32( +#if CV_NEON_AARCH64 + vmull_high_u16(a.val, b.val) +#else // #if CV_NEON_AARCH64 + vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val)) +#endif // #if CV_NEON_AARCH64 + , 16) + )); +} + +//////// Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) +{ + int16x8_t uzp1, uzp2; + _v128_unzip(a.val, b.val, uzp1, uzp2); + int16x4_t a0 = vget_low_s16(uzp1); + int16x4_t b0 = vget_high_s16(uzp1); + int16x4_t a1 = vget_low_s16(uzp2); + int16x4_t b1 = vget_high_s16(uzp2); + int32x4_t p = vmull_s16(a0, b0); + return v_int32x4(vmlal_s16(p, a1, b1)); +} +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ + int16x8_t uzp1, uzp2; + _v128_unzip(a.val, b.val, uzp1, uzp2); + int16x4_t a0 = vget_low_s16(uzp1); + int16x4_t b0 = vget_high_s16(uzp1); + int16x4_t a1 = vget_low_s16(uzp2); + int16x4_t b1 = vget_high_s16(uzp2); + int32x4_t p = vmlal_s16(c.val, a0, b0); + return v_int32x4(vmlal_s16(p, a1, b1)); +} + +// 32 >> 64 +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) +{ + int32x4_t uzp1, uzp2; + _v128_unzip(a.val, b.val, uzp1, uzp2); + int32x2_t a0 = vget_low_s32(uzp1); + int32x2_t b0 = vget_high_s32(uzp1); + int32x2_t a1 = vget_low_s32(uzp2); + int32x2_t b1 = vget_high_s32(uzp2); + int64x2_t p = vmull_s32(a0, b0); + return v_int64x2(vmlal_s32(p, a1, b1)); +} +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ + int32x4_t uzp1, uzp2; + _v128_unzip(a.val, b.val, uzp1, uzp2); + int32x2_t a0 = vget_low_s32(uzp1); + int32x2_t b0 = vget_high_s32(uzp1); + int32x2_t a1 = vget_low_s32(uzp2); + int32x2_t b1 = vget_high_s32(uzp2); + int64x2_t p = vmlal_s32(c.val, a0, b0); + return v_int64x2(vmlal_s32(p, a1, b1)); +} + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) +{ +#if CV_NEON_DOT + return v_uint32x4(vdotq_u32(vdupq_n_u32(0), a.val, b.val)); +#else + const uint8x16_t zero = vreinterpretq_u8_u32(vdupq_n_u32(0)); + const uint8x16_t mask = vreinterpretq_u8_u32(vdupq_n_u32(0x00FF00FF)); + const uint16x8_t zero32 = vreinterpretq_u16_u32(vdupq_n_u32(0)); + const uint16x8_t mask32 = vreinterpretq_u16_u32(vdupq_n_u32(0x0000FFFF)); + + uint16x8_t even = vmulq_u16(vreinterpretq_u16_u8(vbslq_u8(mask, a.val, zero)), + vreinterpretq_u16_u8(vbslq_u8(mask, b.val, zero))); + uint16x8_t odd = vmulq_u16(vshrq_n_u16(vreinterpretq_u16_u8(a.val), 8), + vshrq_n_u16(vreinterpretq_u16_u8(b.val), 8)); + + uint32x4_t s0 = vaddq_u32(vreinterpretq_u32_u16(vbslq_u16(mask32, even, zero32)), + vreinterpretq_u32_u16(vbslq_u16(mask32, odd, zero32))); + uint32x4_t s1 = vaddq_u32(vshrq_n_u32(vreinterpretq_u32_u16(even), 16), + vshrq_n_u32(vreinterpretq_u32_u16(odd), 16)); + return v_uint32x4(vaddq_u32(s0, s1)); +#endif +} +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, + const v_uint32x4& c) +{ +#if CV_NEON_DOT + return v_uint32x4(vdotq_u32(c.val, a.val, b.val)); +#else + return v_dotprod_expand(a, b) + c; +#endif +} + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_NEON_DOT + return v_int32x4(vdotq_s32(vdupq_n_s32(0), a.val, b.val)); +#else + int16x8_t p0 = vmull_s8(vget_low_s8(a.val), vget_low_s8(b.val)); + int16x8_t p1 = vmull_s8(vget_high_s8(a.val), vget_high_s8(b.val)); + int16x8_t uzp1, uzp2; + _v128_unzip(p0, p1, uzp1, uzp2); + int16x8_t sum = vaddq_s16(uzp1, uzp2); + int16x4_t uzpl1, uzpl2; + _v128_unzip(vget_low_s16(sum), vget_high_s16(sum), uzpl1, uzpl2); + return v_int32x4(vaddl_s16(uzpl1, uzpl2)); +#endif +} +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, + const v_int32x4& c) +{ +#if CV_NEON_DOT + return v_int32x4(vdotq_s32(c.val, a.val, b.val)); +#else + return v_dotprod_expand(a, b) + c; +#endif +} + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) +{ + const uint16x8_t zero = vreinterpretq_u16_u32(vdupq_n_u32(0)); + const uint16x8_t mask = vreinterpretq_u16_u32(vdupq_n_u32(0x0000FFFF)); + + uint32x4_t even = vmulq_u32(vreinterpretq_u32_u16(vbslq_u16(mask, a.val, zero)), + vreinterpretq_u32_u16(vbslq_u16(mask, b.val, zero))); + uint32x4_t odd = vmulq_u32(vshrq_n_u32(vreinterpretq_u32_u16(a.val), 16), + vshrq_n_u32(vreinterpretq_u32_u16(b.val), 16)); + uint32x4_t uzp1, uzp2; + _v128_unzip(even, odd, uzp1, uzp2); + uint64x2_t s0 = vaddl_u32(vget_low_u32(uzp1), vget_high_u32(uzp1)); + uint64x2_t s1 = vaddl_u32(vget_low_u32(uzp2), vget_high_u32(uzp2)); + return v_uint64x2(vaddq_u64(s0, s1)); +} +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) +{ + int32x4_t p0 = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); + int32x4_t p1 = vmull_s16(vget_high_s16(a.val), vget_high_s16(b.val)); + + int32x4_t uzp1, uzp2; + _v128_unzip(p0, p1, uzp1, uzp2); + int32x4_t sum = vaddq_s32(uzp1, uzp2); + + int32x2_t uzpl1, uzpl2; + _v128_unzip(vget_low_s32(sum), vget_high_s32(sum), uzpl1, uzpl2); + return v_int64x2(vaddl_s32(uzpl1, uzpl2)); +} +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, + const v_int64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +#if CV_SIMD128_64F +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, + const v_float64x2& c) +{ return v_dotprod_expand(a, b) + c; } +#endif + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) +{ +#if CV_NEON_AARCH64 + int32x4_t p = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); + return v_int32x4(vmlal_high_s16(p, a.val, b.val)); +#else + int16x4_t a0 = vget_low_s16(a.val); + int16x4_t a1 = vget_high_s16(a.val); + int16x4_t b0 = vget_low_s16(b.val); + int16x4_t b1 = vget_high_s16(b.val); + int32x4_t p = vmull_s16(a0, b0); + return v_int32x4(vmlal_s16(p, a1, b1)); +#endif +} +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ +#if CV_NEON_AARCH64 + int32x4_t p = vmlal_s16(c.val, vget_low_s16(a.val), vget_low_s16(b.val)); + return v_int32x4(vmlal_high_s16(p, a.val, b.val)); +#else + int16x4_t a0 = vget_low_s16(a.val); + int16x4_t a1 = vget_high_s16(a.val); + int16x4_t b0 = vget_low_s16(b.val); + int16x4_t b1 = vget_high_s16(b.val); + int32x4_t p = vmlal_s16(c.val, a0, b0); + return v_int32x4(vmlal_s16(p, a1, b1)); +#endif +} + +// 32 >> 64 +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_NEON_AARCH64 + int64x2_t p = vmull_s32(vget_low_s32(a.val), vget_low_s32(b.val)); + return v_int64x2(vmlal_high_s32(p, a.val, b.val)); +#else + int32x2_t a0 = vget_low_s32(a.val); + int32x2_t a1 = vget_high_s32(a.val); + int32x2_t b0 = vget_low_s32(b.val); + int32x2_t b1 = vget_high_s32(b.val); + int64x2_t p = vmull_s32(a0, b0); + return v_int64x2(vmlal_s32(p, a1, b1)); +#endif +} +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ +#if CV_NEON_AARCH64 + int64x2_t p = vmlal_s32(c.val, vget_low_s32(a.val), vget_low_s32(b.val)); + return v_int64x2(vmlal_high_s32(p, a.val, b.val)); +#else + int32x2_t a0 = vget_low_s32(a.val); + int32x2_t a1 = vget_high_s32(a.val); + int32x2_t b0 = vget_low_s32(b.val); + int32x2_t b1 = vget_high_s32(b.val); + int64x2_t p = vmlal_s32(c.val, a0, b0); + return v_int64x2(vmlal_s32(p, a1, b1)); +#endif +} + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) +{ +#if CV_NEON_DOT + return v_uint32x4(vdotq_u32(vdupq_n_u32(0), a.val, b.val)); +#else + uint16x8_t p0 = vmull_u8(vget_low_u8(a.val), vget_low_u8(b.val)); + uint16x8_t p1 = vmull_u8(vget_high_u8(a.val), vget_high_u8(b.val)); + uint32x4_t s0 = vaddl_u16(vget_low_u16(p0), vget_low_u16(p1)); + uint32x4_t s1 = vaddl_u16(vget_high_u16(p0), vget_high_u16(p1)); + return v_uint32x4(vaddq_u32(s0, s1)); +#endif +} +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ +#if CV_NEON_DOT + return v_uint32x4(vdotq_u32(c.val, a.val, b.val)); +#else + return v_dotprod_expand_fast(a, b) + c; +#endif +} + +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_NEON_DOT + return v_int32x4(vdotq_s32(vdupq_n_s32(0), a.val, b.val)); +#else + int16x8_t prod = vmull_s8(vget_low_s8(a.val), vget_low_s8(b.val)); + prod = vmlal_s8(prod, vget_high_s8(a.val), vget_high_s8(b.val)); + return v_int32x4(vaddl_s16(vget_low_s16(prod), vget_high_s16(prod))); +#endif +} +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ +#if CV_NEON_DOT + return v_int32x4(vdotq_s32(c.val, a.val, b.val)); +#else + return v_dotprod_expand_fast(a, b) + c; +#endif +} + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) +{ + uint32x4_t p0 = vmull_u16(vget_low_u16(a.val), vget_low_u16(b.val)); + uint32x4_t p1 = vmull_u16(vget_high_u16(a.val), vget_high_u16(b.val)); + uint64x2_t s0 = vaddl_u32(vget_low_u32(p0), vget_high_u32(p0)); + uint64x2_t s1 = vaddl_u32(vget_low_u32(p1), vget_high_u32(p1)); + return v_uint64x2(vaddq_u64(s0, s1)); +} +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) +{ + int32x4_t prod = vmull_s16(vget_low_s16(a.val), vget_low_s16(b.val)); + prod = vmlal_s16(prod, vget_high_s16(a.val), vget_high_s16(b.val)); + return v_int64x2(vaddl_s32(vget_low_s32(prod), vget_high_s32(prod))); +} +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 32 >> 64f +#if CV_SIMD128_64F +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_cvt_f64(v_dotprod_fast(a, b)); } +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } +#endif + + +#define OPENCV_HAL_IMPL_NEON_LOGIC_OP(_Tpvec, suffix) \ + OPENCV_HAL_IMPL_NEON_BIN_OP(&, _Tpvec, vandq_##suffix) \ + OPENCV_HAL_IMPL_NEON_BIN_OP(|, _Tpvec, vorrq_##suffix) \ + OPENCV_HAL_IMPL_NEON_BIN_OP(^, _Tpvec, veorq_##suffix) \ + inline _Tpvec operator ~ (const _Tpvec& a) \ + { \ + return _Tpvec(vreinterpretq_##suffix##_u8(vmvnq_u8(vreinterpretq_u8_##suffix(a.val)))); \ + } + +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint8x16, u8) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int8x16, s8) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint16x8, u16) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int16x8, s16) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint32x4, u32) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int32x4, s32) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_uint64x2, u64) +OPENCV_HAL_IMPL_NEON_LOGIC_OP(v_int64x2, s64) + +#define OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(bin_op, intrin) \ +inline v_float32x4 operator bin_op (const v_float32x4& a, const v_float32x4& b) \ +{ \ + return v_float32x4(vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val)))); \ +} \ +inline v_float32x4& operator bin_op##= (v_float32x4& a, const v_float32x4& b) \ +{ \ + a.val = vreinterpretq_f32_s32(intrin(vreinterpretq_s32_f32(a.val), vreinterpretq_s32_f32(b.val))); \ + return a; \ +} + +OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(&, vandq_s32) +OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(|, vorrq_s32) +OPENCV_HAL_IMPL_NEON_FLT_BIT_OP(^, veorq_s32) + +inline v_float32x4 operator ~ (const v_float32x4& a) +{ + return v_float32x4(vreinterpretq_f32_s32(vmvnq_s32(vreinterpretq_s32_f32(a.val)))); +} + +#if CV_SIMD128_64F +inline v_float32x4 v_sqrt(const v_float32x4& x) +{ + return v_float32x4(vsqrtq_f32(x.val)); +} + +inline v_float32x4 v_invsqrt(const v_float32x4& x) +{ + v_float32x4 one = v_setall_f32(1.0f); + return one / v_sqrt(x); +} +#else +inline v_float32x4 v_sqrt(const v_float32x4& x) +{ + float32x4_t x1 = vmaxq_f32(x.val, vdupq_n_f32(FLT_MIN)); + float32x4_t e = vrsqrteq_f32(x1); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x1, e), e), e); + return v_float32x4(vmulq_f32(x.val, e)); +} + +inline v_float32x4 v_invsqrt(const v_float32x4& x) +{ + float32x4_t e = vrsqrteq_f32(x.val); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x.val, e), e), e); + return v_float32x4(e); +} +#endif + +#define OPENCV_HAL_IMPL_NEON_ABS(_Tpuvec, _Tpsvec, usuffix, ssuffix) \ +inline _Tpuvec v_abs(const _Tpsvec& a) { return v_reinterpret_as_##usuffix(_Tpsvec(vabsq_##ssuffix(a.val))); } + +OPENCV_HAL_IMPL_NEON_ABS(v_uint8x16, v_int8x16, u8, s8) +OPENCV_HAL_IMPL_NEON_ABS(v_uint16x8, v_int16x8, u16, s16) +OPENCV_HAL_IMPL_NEON_ABS(v_uint32x4, v_int32x4, u32, s32) + +inline v_float32x4 v_abs(v_float32x4 x) +{ return v_float32x4(vabsq_f32(x.val)); } + +#if CV_SIMD128_64F +#define OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(bin_op, intrin) \ +inline v_float64x2 operator bin_op (const v_float64x2& a, const v_float64x2& b) \ +{ \ + return v_float64x2(vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val)))); \ +} \ +inline v_float64x2& operator bin_op##= (v_float64x2& a, const v_float64x2& b) \ +{ \ + a.val = vreinterpretq_f64_s64(intrin(vreinterpretq_s64_f64(a.val), vreinterpretq_s64_f64(b.val))); \ + return a; \ +} + +OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(&, vandq_s64) +OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(|, vorrq_s64) +OPENCV_HAL_IMPL_NEON_DBL_BIT_OP(^, veorq_s64) + +inline v_float64x2 operator ~ (const v_float64x2& a) +{ + return v_float64x2(vreinterpretq_f64_s32(vmvnq_s32(vreinterpretq_s32_f64(a.val)))); +} + +inline v_float64x2 v_sqrt(const v_float64x2& x) +{ + return v_float64x2(vsqrtq_f64(x.val)); +} + +inline v_float64x2 v_invsqrt(const v_float64x2& x) +{ + v_float64x2 one = v_setall_f64(1.0f); + return one / v_sqrt(x); +} + +inline v_float64x2 v_abs(v_float64x2 x) +{ return v_float64x2(vabsq_f64(x.val)); } +#endif + +// TODO: exp, log, sin, cos + +#define OPENCV_HAL_IMPL_NEON_BIN_FUNC(_Tpvec, func, intrin) \ +inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} + +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_min, vminq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_max, vmaxq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_min, vminq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_max, vmaxq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_min, vminq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_max, vmaxq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_min, vminq_s16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_max, vmaxq_s16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_min, vminq_u32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_max, vmaxq_u32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_min, vminq_s32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int32x4, v_max, vmaxq_s32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_min, vminq_f32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_max, vmaxq_f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_min, vminq_f64) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_max, vmaxq_f64) +#endif + +#if CV_SIMD128_64F +inline int64x2_t vmvnq_s64(int64x2_t a) +{ + int64x2_t vx = vreinterpretq_s64_u32(vdupq_n_u32(0xFFFFFFFF)); + return veorq_s64(a, vx); +} +inline uint64x2_t vmvnq_u64(uint64x2_t a) +{ + uint64x2_t vx = vreinterpretq_u64_u32(vdupq_n_u32(0xFFFFFFFF)); + return veorq_u64(a, vx); +} +#endif +#define OPENCV_HAL_IMPL_NEON_INT_CMP_OP(_Tpvec, cast, suffix, not_suffix) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vceqq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vmvnq_##not_suffix(vceqq_##suffix(a.val, b.val)))); } \ +inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vcltq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vcgtq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vcleq_##suffix(a.val, b.val))); } \ +inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(cast(vcgeq_##suffix(a.val, b.val))); } + +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint8x16, OPENCV_HAL_NOP, u8, u8) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int8x16, vreinterpretq_s8_u8, s8, u8) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint16x8, OPENCV_HAL_NOP, u16, u16) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int16x8, vreinterpretq_s16_u16, s16, u16) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint32x4, OPENCV_HAL_NOP, u32, u32) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int32x4, vreinterpretq_s32_u32, s32, u32) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float32x4, vreinterpretq_f32_u32, f32, u32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_uint64x2, OPENCV_HAL_NOP, u64, u64) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_int64x2, vreinterpretq_s64_u64, s64, u64) +OPENCV_HAL_IMPL_NEON_INT_CMP_OP(v_float64x2, vreinterpretq_f64_u64, f64, u64) +#endif + +inline v_float32x4 v_not_nan(const v_float32x4& a) +{ return v_float32x4(vreinterpretq_f32_u32(vceqq_f32(a.val, a.val))); } +#if CV_SIMD128_64F +inline v_float64x2 v_not_nan(const v_float64x2& a) +{ return v_float64x2(vreinterpretq_f64_u64(vceqq_f64(a.val, a.val))); } +#endif + +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_add_wrap, vaddq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_add_wrap, vaddq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_add_wrap, vaddq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_add_wrap, vaddq_s16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_sub_wrap, vsubq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_sub_wrap, vsubq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_sub_wrap, vsubq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_sub_wrap, vsubq_s16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_mul_wrap, vmulq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int8x16, v_mul_wrap, vmulq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_mul_wrap, vmulq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_int16x8, v_mul_wrap, vmulq_s16) + +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint8x16, v_absdiff, vabdq_u8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint16x8, v_absdiff, vabdq_u16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_uint32x4, v_absdiff, vabdq_u32) +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float32x4, v_absdiff, vabdq_f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_BIN_FUNC(v_float64x2, v_absdiff, vabdq_f64) +#endif + +/** Saturating absolute difference **/ +inline v_int8x16 v_absdiffs(const v_int8x16& a, const v_int8x16& b) +{ return v_int8x16(vqabsq_s8(vqsubq_s8(a.val, b.val))); } +inline v_int16x8 v_absdiffs(const v_int16x8& a, const v_int16x8& b) +{ return v_int16x8(vqabsq_s16(vqsubq_s16(a.val, b.val))); } + +#define OPENCV_HAL_IMPL_NEON_BIN_FUNC2(_Tpvec, _Tpvec2, cast, func, intrin) \ +inline _Tpvec2 func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec2(cast(intrin(a.val, b.val))); \ +} + +OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int8x16, v_uint8x16, vreinterpretq_u8_s8, v_absdiff, vabdq_s8) +OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int16x8, v_uint16x8, vreinterpretq_u16_s16, v_absdiff, vabdq_s16) +OPENCV_HAL_IMPL_NEON_BIN_FUNC2(v_int32x4, v_uint32x4, vreinterpretq_u32_s32, v_absdiff, vabdq_s32) + +inline v_float32x4 v_magnitude(const v_float32x4& a, const v_float32x4& b) +{ + v_float32x4 x(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val)); + return v_sqrt(x); +} + +inline v_float32x4 v_sqr_magnitude(const v_float32x4& a, const v_float32x4& b) +{ + return v_float32x4(vmlaq_f32(vmulq_f32(a.val, a.val), b.val, b.val)); +} + +inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ +#if CV_SIMD128_64F + // ARMv8, which adds support for 64-bit floating-point (so CV_SIMD128_64F is defined), + // also adds FMA support both for single- and double-precision floating-point vectors + return v_float32x4(vfmaq_f32(c.val, a.val, b.val)); +#else + return v_float32x4(vmlaq_f32(c.val, a.val, b.val)); +#endif +} + +inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_int32x4(vmlaq_s32(c.val, a.val, b.val)); +} + +inline v_float32x4 v_muladd(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ + return v_fma(a, b, c); +} + +inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_fma(a, b, c); +} + +#if CV_SIMD128_64F +inline v_float64x2 v_magnitude(const v_float64x2& a, const v_float64x2& b) +{ + v_float64x2 x(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val))); + return v_sqrt(x); +} + +inline v_float64x2 v_sqr_magnitude(const v_float64x2& a, const v_float64x2& b) +{ + return v_float64x2(vaddq_f64(vmulq_f64(a.val, a.val), vmulq_f64(b.val, b.val))); +} + +inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ + return v_float64x2(vfmaq_f64(c.val, a.val, b.val)); +} + +inline v_float64x2 v_muladd(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ + return v_fma(a, b, c); +} +#endif + +// trade efficiency for convenience +#define OPENCV_HAL_IMPL_NEON_SHIFT_OP(_Tpvec, suffix, _Tps, ssuffix) \ +inline _Tpvec operator << (const _Tpvec& a, int n) \ +{ return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)n))); } \ +inline _Tpvec operator >> (const _Tpvec& a, int n) \ +{ return _Tpvec(vshlq_##suffix(a.val, vdupq_n_##ssuffix((_Tps)-n))); } \ +template inline _Tpvec v_shl(const _Tpvec& a) \ +{ return _Tpvec(vshlq_n_##suffix(a.val, n)); } \ +template inline _Tpvec v_shr(const _Tpvec& a) \ +{ return _Tpvec(vshrq_n_##suffix(a.val, n)); } \ +template inline _Tpvec v_rshr(const _Tpvec& a) \ +{ return _Tpvec(vrshrq_n_##suffix(a.val, n)); } + +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint8x16, u8, schar, s8) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int8x16, s8, schar, s8) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint16x8, u16, short, s16) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int16x8, s16, short, s16) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint32x4, u32, int, s32) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int32x4, s32, int, s32) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_uint64x2, u64, int64, s64) +OPENCV_HAL_IMPL_NEON_SHIFT_OP(v_int64x2, s64, int64, s64) + +#define OPENCV_HAL_IMPL_NEON_ROTATE_OP(_Tpvec, suffix) \ +template inline _Tpvec v_rotate_right(const _Tpvec& a) \ +{ return _Tpvec(vextq_##suffix(a.val, vdupq_n_##suffix(0), n)); } \ +template inline _Tpvec v_rotate_left(const _Tpvec& a) \ +{ return _Tpvec(vextq_##suffix(vdupq_n_##suffix(0), a.val, _Tpvec::nlanes - n)); } \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a) \ +{ return a; } \ +template inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vextq_##suffix(a.val, b.val, n)); } \ +template inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vextq_##suffix(b.val, a.val, _Tpvec::nlanes - n)); } \ +template<> inline _Tpvec v_rotate_left<0>(const _Tpvec& a, const _Tpvec& b) \ +{ CV_UNUSED(b); return a; } + +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_uint8x16, u8) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_int8x16, s8) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_uint16x8, u16) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_int16x8, s16) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_uint32x4, u32) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_int32x4, s32) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_float32x4, f32) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_uint64x2, u64) +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_int64x2, s64) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_ROTATE_OP(v_float64x2, f64) +#endif + +#if defined(__clang__) && defined(__aarch64__) +// avoid LD2 instruction. details: https://github.com/opencv/opencv/issues/14863 +#define OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ \ +typedef uint64 CV_DECL_ALIGNED(1) unaligned_uint64; \ +uint64 v = *(unaligned_uint64*)ptr; \ +return _Tpvec(v_reinterpret_as_##suffix(v_uint64x2(v, (uint64)123456))); \ +} +#else +#define OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr), vdup_n_##suffix((_Tp)0))); } +#endif + +#define OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(vld1q_##suffix(ptr)); } \ +inline _Tpvec v_load_aligned(const _Tp* ptr) \ +{ return _Tpvec(vld1q_##suffix(ptr)); } \ +OPENCV_HAL_IMPL_NEON_LOAD_LOW_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ return _Tpvec(vcombine_##suffix(vld1_##suffix(ptr0), vld1_##suffix(ptr1))); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ vst1q_##suffix(ptr, a.val); } \ +inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ +{ vst1q_##suffix(ptr, a.val); } \ +inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ +{ vst1q_##suffix(ptr, a.val); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode /*mode*/) \ +{ vst1q_##suffix(ptr, a.val); } \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ vst1_##suffix(ptr, vget_low_##suffix(a.val)); } \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ vst1_##suffix(ptr, vget_high_##suffix(a.val)); } + +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int8x16, schar, s8) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int16x8, short, s16) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64) +#endif + +inline unsigned v_reduce_sum(const v_uint8x16& a) +{ +#if CV_NEON_AARCH64 + uint16_t t0 = vaddlvq_u8(a.val); + return t0; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(a.val)); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline int v_reduce_sum(const v_int8x16& a) +{ +#if CV_NEON_AARCH64 + int16_t t0 = vaddlvq_s8(a.val); + return t0; +#else // #if CV_NEON_AARCH64 + int32x4_t t0 = vpaddlq_s16(vpaddlq_s8(a.val)); + int32x2_t t1 = vpadd_s32(vget_low_s32(t0), vget_high_s32(t0)); + return vget_lane_s32(vpadd_s32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sum(const v_uint16x8& a) +{ +#if CV_NEON_AARCH64 + uint32_t t0 = vaddlvq_u16(a.val); + return t0; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(a.val); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline int v_reduce_sum(const v_int16x8& a) +{ +#if CV_NEON_AARCH64 + int32_t t0 = vaddlvq_s16(a.val); + return t0; +#else // #if CV_NEON_AARCH64 + int32x4_t t0 = vpaddlq_s16(a.val); + int32x2_t t1 = vpadd_s32(vget_low_s32(t0), vget_high_s32(t0)); + return vget_lane_s32(vpadd_s32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} + +#if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + return v##vectorfunc##vq_##suffix(a.val); \ +} +#else // #if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \ + a0 = vp##vectorfunc##_##suffix(a0, a0); \ + a0 = vp##vectorfunc##_##suffix(a0, a0); \ + return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, a0),0); \ +} +#endif // #if CV_NEON_AARCH64 + +OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(v_uint8x16, uint8x8, uchar, max, max, u8) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(v_uint8x16, uint8x8, uchar, min, min, u8) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(v_int8x16, int8x8, schar, max, max, s8) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_16(v_int8x16, int8x8, schar, min, min, s8) + +#if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + return v##vectorfunc##vq_##suffix(a.val); \ +} +#else // #if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \ + a0 = vp##vectorfunc##_##suffix(a0, a0); \ + return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, a0),0); \ +} +#endif // #if CV_NEON_AARCH64 + +OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, ushort, max, max, u16) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_uint16x8, uint16x4, ushort, min, min, u16) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, max, max, s16) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(v_int16x8, int16x4, short, min, min, s16) + +#if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + return v##vectorfunc##vq_##suffix(a.val); \ +} +#else // #if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + _Tpnvec##_t a0 = vp##vectorfunc##_##suffix(vget_low_##suffix(a.val), vget_high_##suffix(a.val)); \ + return (scalartype)vget_lane_##suffix(vp##vectorfunc##_##suffix(a0, vget_high_##suffix(a.val)),0); \ +} +#endif // #if CV_NEON_AARCH64 + +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, sum, add, u32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, max, max, u32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_uint32x4, uint32x2, unsigned, min, min, u32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, sum, add, s32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, max, max, s32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_int32x4, int32x2, int, min, min, s32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, sum, add, f32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, max, max, f32) +OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(v_float32x4, float32x2, float, min, min, f32) + +inline uint64 v_reduce_sum(const v_uint64x2& a) +{ +#if CV_NEON_AARCH64 + return vaddvq_u64(a.val); +#else // #if CV_NEON_AARCH64 + return vget_lane_u64(vadd_u64(vget_low_u64(a.val), vget_high_u64(a.val)),0); +#endif // #if CV_NEON_AARCH64 +} +inline int64 v_reduce_sum(const v_int64x2& a) +{ +#if CV_NEON_AARCH64 + return vaddvq_s64(a.val); +#else // #if CV_NEON_AARCH64 + return vget_lane_s64(vadd_s64(vget_low_s64(a.val), vget_high_s64(a.val)),0); +#endif // #if CV_NEON_AARCH64 +} +#if CV_SIMD128_64F +inline double v_reduce_sum(const v_float64x2& a) +{ + return vaddvq_f64(a.val); +} +#endif + +inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d) +{ +#if CV_NEON_AARCH64 + float32x4_t ab = vpaddq_f32(a.val, b.val); // a0+a1 a2+a3 b0+b1 b2+b3 + float32x4_t cd = vpaddq_f32(c.val, d.val); // c0+c1 d0+d1 c2+c3 d2+d3 + return v_float32x4(vpaddq_f32(ab, cd)); // sumA sumB sumC sumD +#else // #if CV_NEON_AARCH64 + float32x4x2_t ab = vtrnq_f32(a.val, b.val); + float32x4x2_t cd = vtrnq_f32(c.val, d.val); + + float32x4_t u0 = vaddq_f32(ab.val[0], ab.val[1]); // a0+a1 b0+b1 a2+a3 b2+b3 + float32x4_t u1 = vaddq_f32(cd.val[0], cd.val[1]); // c0+c1 d0+d1 c2+c3 d2+d3 + + float32x4_t v0 = vcombine_f32(vget_low_f32(u0), vget_low_f32(u1)); + float32x4_t v1 = vcombine_f32(vget_high_f32(u0), vget_high_f32(u1)); + + return v_float32x4(vaddq_f32(v0, v1)); +#endif // #if CV_NEON_AARCH64 +} + +inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b) +{ +#if CV_NEON_AARCH64 + uint8x16_t t0 = vabdq_u8(a.val, b.val); + uint16_t t1 = vaddlvq_u8(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(vabdq_u8(a.val, b.val))); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_NEON_AARCH64 + uint8x16_t t0 = vreinterpretq_u8_s8(vabdq_s8(a.val, b.val)); + uint16_t t1 = vaddlvq_u8(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s8(vabdq_s8(a.val, b.val)))); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b) +{ +#if CV_NEON_AARCH64 + uint16x8_t t0 = vabdq_u16(a.val, b.val); + uint32_t t1 = vaddlvq_u16(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(vabdq_u16(a.val, b.val)); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b) +{ +#if CV_NEON_AARCH64 + uint16x8_t t0 = vreinterpretq_u16_s16(vabdq_s16(a.val, b.val)); + uint32_t t1 = vaddlvq_u16(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vpaddlq_u16(vreinterpretq_u16_s16(vabdq_s16(a.val, b.val))); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b) +{ +#if CV_NEON_AARCH64 + uint32x4_t t0 = vabdq_u32(a.val, b.val); + uint32_t t1 = vaddvq_u32(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vabdq_u32(a.val, b.val); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_NEON_AARCH64 + uint32x4_t t0 = vreinterpretq_u32_s32(vabdq_s32(a.val, b.val)); + uint32_t t1 = vaddvq_u32(t0); + return t1; +#else // #if CV_NEON_AARCH64 + uint32x4_t t0 = vreinterpretq_u32_s32(vabdq_s32(a.val, b.val)); + uint32x2_t t1 = vpadd_u32(vget_low_u32(t0), vget_high_u32(t0)); + return vget_lane_u32(vpadd_u32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} +inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b) +{ +#if CV_NEON_AARCH64 + float32x4_t t0 = vabdq_f32(a.val, b.val); + return vaddvq_f32(t0); +#else // #if CV_NEON_AARCH64 + float32x4_t t0 = vabdq_f32(a.val, b.val); + float32x2_t t1 = vpadd_f32(vget_low_f32(t0), vget_high_f32(t0)); + return vget_lane_f32(vpadd_f32(t1, t1), 0); +#endif // #if CV_NEON_AARCH64 +} + +inline v_uint8x16 v_popcount(const v_uint8x16& a) +{ return v_uint8x16(vcntq_u8(a.val)); } +inline v_uint8x16 v_popcount(const v_int8x16& a) +{ return v_uint8x16(vcntq_u8(vreinterpretq_u8_s8(a.val))); } +inline v_uint16x8 v_popcount(const v_uint16x8& a) +{ return v_uint16x8(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_u16(a.val)))); } +inline v_uint16x8 v_popcount(const v_int16x8& a) +{ return v_uint16x8(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_s16(a.val)))); } +inline v_uint32x4 v_popcount(const v_uint32x4& a) +{ return v_uint32x4(vpaddlq_u16(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_u32(a.val))))); } +inline v_uint32x4 v_popcount(const v_int32x4& a) +{ return v_uint32x4(vpaddlq_u16(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_s32(a.val))))); } +inline v_uint64x2 v_popcount(const v_uint64x2& a) +{ return v_uint64x2(vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_u64(a.val)))))); } +inline v_uint64x2 v_popcount(const v_int64x2& a) +{ return v_uint64x2(vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vcntq_u8(vreinterpretq_u8_s64(a.val)))))); } + +inline int v_signmask(const v_uint8x16& a) +{ +#if CV_NEON_AARCH64 + const int8x16_t signPosition = {0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7}; + const uint8x16_t byteOrder = {0,8,1,9,2,10,3,11,4,12,5,13,6,14,7,15}; + uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), signPosition); + uint8x16_t v1 = vqtbl1q_u8(v0, byteOrder); + uint32_t t0 = vaddlvq_u16(vreinterpretq_u16_u8(v1)); + return t0; +#else // #if CV_NEON_AARCH64 + int8x8_t m0 = vcreate_s8(CV_BIG_UINT(0x0706050403020100)); + uint8x16_t v0 = vshlq_u8(vshrq_n_u8(a.val, 7), vcombine_s8(m0, m0)); + uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(v0))); + return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 8); +#endif // #if CV_NEON_AARCH64 +} + +inline int v_signmask(const v_int8x16& a) +{ return v_signmask(v_reinterpret_as_u8(a)); } + +inline int v_signmask(const v_uint16x8& a) +{ +#if CV_NEON_AARCH64 + const int16x8_t signPosition = {0,1,2,3,4,5,6,7}; + uint16x8_t v0 = vshlq_u16(vshrq_n_u16(a.val, 15), signPosition); + uint32_t t0 = vaddlvq_u16(v0); + return t0; +#else // #if CV_NEON_AARCH64 + int16x4_t m0 = vcreate_s16(CV_BIG_UINT(0x0003000200010000)); + uint16x8_t v0 = vshlq_u16(vshrq_n_u16(a.val, 15), vcombine_s16(m0, m0)); + uint64x2_t v1 = vpaddlq_u32(vpaddlq_u16(v0)); + return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 4); +#endif // #if CV_NEON_AARCH64 +} +inline int v_signmask(const v_int16x8& a) +{ return v_signmask(v_reinterpret_as_u16(a)); } + +inline int v_signmask(const v_uint32x4& a) +{ +#if CV_NEON_AARCH64 + const int32x4_t signPosition = {0,1,2,3}; + uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), signPosition); + uint32_t t0 = vaddvq_u32(v0); + return t0; +#else // #if CV_NEON_AARCH64 + int32x2_t m0 = vcreate_s32(CV_BIG_UINT(0x0000000100000000)); + uint32x4_t v0 = vshlq_u32(vshrq_n_u32(a.val, 31), vcombine_s32(m0, m0)); + uint64x2_t v1 = vpaddlq_u32(v0); + return (int)vgetq_lane_u64(v1, 0) + ((int)vgetq_lane_u64(v1, 1) << 2); +#endif // #if CV_NEON_AARCH64 +} +inline int v_signmask(const v_int32x4& a) +{ return v_signmask(v_reinterpret_as_u32(a)); } +inline int v_signmask(const v_float32x4& a) +{ return v_signmask(v_reinterpret_as_u32(a)); } +inline int v_signmask(const v_uint64x2& a) +{ +#if CV_NEON_AARCH64 + const int64x2_t signPosition = {0,1}; + uint64x2_t v0 = vshlq_u64(vshrq_n_u64(a.val, 63), signPosition); + uint64_t t0 = vaddvq_u64(v0); + return t0; +#else // #if CV_NEON_AARCH64 + int64x1_t m0 = vdup_n_s64(0); + uint64x2_t v0 = vshlq_u64(vshrq_n_u64(a.val, 63), vcombine_s64(m0, m0)); + return (int)vgetq_lane_u64(v0, 0) + ((int)vgetq_lane_u64(v0, 1) << 1); +#endif // #if CV_NEON_AARCH64 +} +inline int v_signmask(const v_int64x2& a) +{ return v_signmask(v_reinterpret_as_u64(a)); } +#if CV_SIMD128_64F +inline int v_signmask(const v_float64x2& a) +{ return v_signmask(v_reinterpret_as_u64(a)); } +#endif + +inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(a)); } +#if CV_SIMD128_64F +inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(a)); } +#endif + +#if CV_NEON_AARCH64 + #define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \ + inline bool v_check_all(const v_##_Tpvec& a) \ + { \ + return (vminvq_##suffix(a.val) >> shift) != 0; \ + } \ + inline bool v_check_any(const v_##_Tpvec& a) \ + { \ + return (vmaxvq_##suffix(a.val) >> shift) != 0; \ + } +#else // #if CV_NEON_AARCH64 + #define OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(_Tpvec, suffix, shift) \ + inline bool v_check_all(const v_##_Tpvec& a) \ + { \ + _Tpvec##_t v0 = vshrq_n_##suffix(vmvnq_##suffix(a.val), shift); \ + uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \ + return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) == 0; \ + } \ + inline bool v_check_any(const v_##_Tpvec& a) \ + { \ + _Tpvec##_t v0 = vshrq_n_##suffix(a.val, shift); \ + uint64x2_t v1 = vreinterpretq_u64_##suffix(v0); \ + return (vgetq_lane_u64(v1, 0) | vgetq_lane_u64(v1, 1)) != 0; \ + } +#endif // #if CV_NEON_AARCH64 + +OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint8x16, u8, 7) +OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint16x8, u16, 15) +OPENCV_HAL_IMPL_NEON_CHECK_ALLANY(uint32x4, u32, 31) + +inline bool v_check_all(const v_uint64x2& a) +{ + uint64x2_t v0 = vshrq_n_u64(a.val, 63); + return (vgetq_lane_u64(v0, 0) & vgetq_lane_u64(v0, 1)) == 1; +} +inline bool v_check_any(const v_uint64x2& a) +{ + uint64x2_t v0 = vshrq_n_u64(a.val, 63); + return (vgetq_lane_u64(v0, 0) | vgetq_lane_u64(v0, 1)) != 0; +} + +inline bool v_check_all(const v_int8x16& a) +{ return v_check_all(v_reinterpret_as_u8(a)); } +inline bool v_check_all(const v_int16x8& a) +{ return v_check_all(v_reinterpret_as_u16(a)); } +inline bool v_check_all(const v_int32x4& a) +{ return v_check_all(v_reinterpret_as_u32(a)); } +inline bool v_check_all(const v_float32x4& a) +{ return v_check_all(v_reinterpret_as_u32(a)); } + +inline bool v_check_any(const v_int8x16& a) +{ return v_check_any(v_reinterpret_as_u8(a)); } +inline bool v_check_any(const v_int16x8& a) +{ return v_check_any(v_reinterpret_as_u16(a)); } +inline bool v_check_any(const v_int32x4& a) +{ return v_check_any(v_reinterpret_as_u32(a)); } +inline bool v_check_any(const v_float32x4& a) +{ return v_check_any(v_reinterpret_as_u32(a)); } + +inline bool v_check_all(const v_int64x2& a) +{ return v_check_all(v_reinterpret_as_u64(a)); } +inline bool v_check_any(const v_int64x2& a) +{ return v_check_any(v_reinterpret_as_u64(a)); } +#if CV_SIMD128_64F +inline bool v_check_all(const v_float64x2& a) +{ return v_check_all(v_reinterpret_as_u64(a)); } +inline bool v_check_any(const v_float64x2& a) +{ return v_check_any(v_reinterpret_as_u64(a)); } +#endif + +#define OPENCV_HAL_IMPL_NEON_SELECT(_Tpvec, suffix, usuffix) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(vbslq_##suffix(vreinterpretq_##usuffix##_##suffix(mask.val), a.val, b.val)); \ +} + +OPENCV_HAL_IMPL_NEON_SELECT(v_uint8x16, u8, u8) +OPENCV_HAL_IMPL_NEON_SELECT(v_int8x16, s8, u8) +OPENCV_HAL_IMPL_NEON_SELECT(v_uint16x8, u16, u16) +OPENCV_HAL_IMPL_NEON_SELECT(v_int16x8, s16, u16) +OPENCV_HAL_IMPL_NEON_SELECT(v_uint32x4, u32, u32) +OPENCV_HAL_IMPL_NEON_SELECT(v_int32x4, s32, u32) +OPENCV_HAL_IMPL_NEON_SELECT(v_float32x4, f32, u32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_SELECT(v_float64x2, f64, u64) +#endif + +#if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \ +inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ +{ \ + b0.val = vmovl_##suffix(vget_low_##suffix(a.val)); \ + b1.val = vmovl_high_##suffix(a.val); \ +} \ +inline _Tpwvec v_expand_low(const _Tpvec& a) \ +{ \ + return _Tpwvec(vmovl_##suffix(vget_low_##suffix(a.val))); \ +} \ +inline _Tpwvec v_expand_high(const _Tpvec& a) \ +{ \ + return _Tpwvec(vmovl_high_##suffix(a.val)); \ +} \ +inline _Tpwvec v_load_expand(const _Tp* ptr) \ +{ \ + return _Tpwvec(vmovl_##suffix(vld1_##suffix(ptr))); \ +} +#else +#define OPENCV_HAL_IMPL_NEON_EXPAND(_Tpvec, _Tpwvec, _Tp, suffix) \ +inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ +{ \ + b0.val = vmovl_##suffix(vget_low_##suffix(a.val)); \ + b1.val = vmovl_##suffix(vget_high_##suffix(a.val)); \ +} \ +inline _Tpwvec v_expand_low(const _Tpvec& a) \ +{ \ + return _Tpwvec(vmovl_##suffix(vget_low_##suffix(a.val))); \ +} \ +inline _Tpwvec v_expand_high(const _Tpvec& a) \ +{ \ + return _Tpwvec(vmovl_##suffix(vget_high_##suffix(a.val))); \ +} \ +inline _Tpwvec v_load_expand(const _Tp* ptr) \ +{ \ + return _Tpwvec(vmovl_##suffix(vld1_##suffix(ptr))); \ +} +#endif + +OPENCV_HAL_IMPL_NEON_EXPAND(v_uint8x16, v_uint16x8, uchar, u8) +OPENCV_HAL_IMPL_NEON_EXPAND(v_int8x16, v_int16x8, schar, s8) +OPENCV_HAL_IMPL_NEON_EXPAND(v_uint16x8, v_uint32x4, ushort, u16) +OPENCV_HAL_IMPL_NEON_EXPAND(v_int16x8, v_int32x4, short, s16) +OPENCV_HAL_IMPL_NEON_EXPAND(v_uint32x4, v_uint64x2, uint, u32) +OPENCV_HAL_IMPL_NEON_EXPAND(v_int32x4, v_int64x2, int, s32) + +inline v_uint32x4 v_load_expand_q(const uchar* ptr) +{ + typedef unsigned int CV_DECL_ALIGNED(1) unaligned_uint; + uint8x8_t v0 = vcreate_u8(*(unaligned_uint*)ptr); + uint16x4_t v1 = vget_low_u16(vmovl_u8(v0)); + return v_uint32x4(vmovl_u16(v1)); +} + +inline v_int32x4 v_load_expand_q(const schar* ptr) +{ + typedef unsigned int CV_DECL_ALIGNED(1) unaligned_uint; + int8x8_t v0 = vcreate_s8(*(unaligned_uint*)ptr); + int16x4_t v1 = vget_low_s16(vmovl_s8(v0)); + return v_int32x4(vmovl_s16(v1)); +} + +#if defined(__aarch64__) || defined(_M_ARM64) +#define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \ +inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \ +{ \ + b0.val = vzip1q_##suffix(a0.val, a1.val); \ + b1.val = vzip2q_##suffix(a0.val, a1.val); \ +} \ +inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \ +} \ +inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \ +} \ +inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \ +{ \ + c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \ + d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \ +} +#else +#define OPENCV_HAL_IMPL_NEON_UNPACKS(_Tpvec, suffix) \ +inline void v_zip(const v_##_Tpvec& a0, const v_##_Tpvec& a1, v_##_Tpvec& b0, v_##_Tpvec& b1) \ +{ \ + _Tpvec##x2_t p = vzipq_##suffix(a0.val, a1.val); \ + b0.val = p.val[0]; \ + b1.val = p.val[1]; \ +} \ +inline v_##_Tpvec v_combine_low(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val))); \ +} \ +inline v_##_Tpvec v_combine_high(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val))); \ +} \ +inline void v_recombine(const v_##_Tpvec& a, const v_##_Tpvec& b, v_##_Tpvec& c, v_##_Tpvec& d) \ +{ \ + c.val = vcombine_##suffix(vget_low_##suffix(a.val), vget_low_##suffix(b.val)); \ + d.val = vcombine_##suffix(vget_high_##suffix(a.val), vget_high_##suffix(b.val)); \ +} +#endif + +OPENCV_HAL_IMPL_NEON_UNPACKS(uint8x16, u8) +OPENCV_HAL_IMPL_NEON_UNPACKS(int8x16, s8) +OPENCV_HAL_IMPL_NEON_UNPACKS(uint16x8, u16) +OPENCV_HAL_IMPL_NEON_UNPACKS(int16x8, s16) +OPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32) +OPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32) +OPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_UNPACKS(float64x2, f64) +#endif + +inline v_uint8x16 v_reverse(const v_uint8x16 &a) +{ + uint8x16_t vec = vrev64q_u8(a.val); + return v_uint8x16(vextq_u8(vec, vec, 8)); +} + +inline v_int8x16 v_reverse(const v_int8x16 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x8 v_reverse(const v_uint16x8 &a) +{ + uint16x8_t vec = vrev64q_u16(a.val); + return v_uint16x8(vextq_u16(vec, vec, 4)); +} + +inline v_int16x8 v_reverse(const v_int16x8 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x4 v_reverse(const v_uint32x4 &a) +{ + uint32x4_t vec = vrev64q_u32(a.val); + return v_uint32x4(vextq_u32(vec, vec, 2)); +} + +inline v_int32x4 v_reverse(const v_int32x4 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x4 v_reverse(const v_float32x4 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x2 v_reverse(const v_uint64x2 &a) +{ + uint64x2_t vec = a.val; + uint64x1_t vec_lo = vget_low_u64(vec); + uint64x1_t vec_hi = vget_high_u64(vec); + return v_uint64x2(vcombine_u64(vec_hi, vec_lo)); +} + +inline v_int64x2 v_reverse(const v_int64x2 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +#if CV_SIMD128_64F +inline v_float64x2 v_reverse(const v_float64x2 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } +#endif + +#define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \ +template \ +inline v_##_Tpvec v_extract(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vextq_##suffix(a.val, b.val, s)); \ +} + +OPENCV_HAL_IMPL_NEON_EXTRACT(uint8x16, u8) +OPENCV_HAL_IMPL_NEON_EXTRACT(int8x16, s8) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint16x8, u16) +OPENCV_HAL_IMPL_NEON_EXTRACT(int16x8, s16) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint32x4, u32) +OPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64) +OPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64) +OPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_EXTRACT(float64x2, f64) +#endif + +#define OPENCV_HAL_IMPL_NEON_EXTRACT_N(_Tpvec, _Tp, suffix) \ +template inline _Tp v_extract_n(_Tpvec v) { return vgetq_lane_##suffix(v.val, i); } + +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_int8x16, schar, s8) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_int16x8, short, s16) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_uint32x4, uint, u32) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_int64x2, int64, s64) +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_float32x4, float, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_EXTRACT_N(v_float64x2, double, f64) +#endif + +#define OPENCV_HAL_IMPL_NEON_BROADCAST(_Tpvec, _Tp, suffix) \ +template inline _Tpvec v_broadcast_element(_Tpvec v) { _Tp t = v_extract_n(v); return v_setall_##suffix(t); } + +OPENCV_HAL_IMPL_NEON_BROADCAST(v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_int8x16, schar, s8) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_int16x8, short, s16) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_uint32x4, uint, u32) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_int64x2, int64, s64) +OPENCV_HAL_IMPL_NEON_BROADCAST(v_float32x4, float, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_BROADCAST(v_float64x2, double, f64) +#endif + +#if CV_SIMD128_64F +inline v_int32x4 v_round(const v_float32x4& a) +{ + float32x4_t a_ = a.val; + int32x4_t result; + __asm__ ("fcvtns %0.4s, %1.4s" + : "=w"(result) + : "w"(a_) + : /* No clobbers */); + return v_int32x4(result); +} +#else +inline v_int32x4 v_round(const v_float32x4& a) +{ + static const int32x4_t v_sign = vdupq_n_s32(1 << 31), + v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f)); + + int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(a.val))); + return v_int32x4(vcvtq_s32_f32(vaddq_f32(a.val, vreinterpretq_f32_s32(v_addition)))); +} +#endif +inline v_int32x4 v_floor(const v_float32x4& a) +{ + int32x4_t a1 = vcvtq_s32_f32(a.val); + uint32x4_t mask = vcgtq_f32(vcvtq_f32_s32(a1), a.val); + return v_int32x4(vaddq_s32(a1, vreinterpretq_s32_u32(mask))); +} + +inline v_int32x4 v_ceil(const v_float32x4& a) +{ + int32x4_t a1 = vcvtq_s32_f32(a.val); + uint32x4_t mask = vcgtq_f32(a.val, vcvtq_f32_s32(a1)); + return v_int32x4(vsubq_s32(a1, vreinterpretq_s32_u32(mask))); +} + +inline v_int32x4 v_trunc(const v_float32x4& a) +{ return v_int32x4(vcvtq_s32_f32(a.val)); } + +#if CV_SIMD128_64F +inline v_int32x4 v_round(const v_float64x2& a) +{ + static const int32x2_t zero = vdup_n_s32(0); + return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero)); +} + +inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) +{ + return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), vmovn_s64(vcvtaq_s64_f64(b.val)))); +} + +inline v_int32x4 v_floor(const v_float64x2& a) +{ + static const int32x2_t zero = vdup_n_s32(0); + int64x2_t a1 = vcvtq_s64_f64(a.val); + uint64x2_t mask = vcgtq_f64(vcvtq_f64_s64(a1), a.val); + a1 = vaddq_s64(a1, vreinterpretq_s64_u64(mask)); + return v_int32x4(vcombine_s32(vmovn_s64(a1), zero)); +} + +inline v_int32x4 v_ceil(const v_float64x2& a) +{ + static const int32x2_t zero = vdup_n_s32(0); + int64x2_t a1 = vcvtq_s64_f64(a.val); + uint64x2_t mask = vcgtq_f64(a.val, vcvtq_f64_s64(a1)); + a1 = vsubq_s64(a1, vreinterpretq_s64_u64(mask)); + return v_int32x4(vcombine_s32(vmovn_s64(a1), zero)); +} + +inline v_int32x4 v_trunc(const v_float64x2& a) +{ + static const int32x2_t zero = vdup_n_s32(0); + return v_int32x4(vcombine_s32(vmovn_s64(vcvtaq_s64_f64(a.val)), zero)); +} +#endif + +#if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \ +inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \ + const v_##_Tpvec& a2, const v_##_Tpvec& a3, \ + v_##_Tpvec& b0, v_##_Tpvec& b1, \ + v_##_Tpvec& b2, v_##_Tpvec& b3) \ +{ \ + /* -- Pass 1: 64b transpose */ \ + _Tpvec##_t t0 = vreinterpretq_##suffix##32_##suffix##64( \ + vtrn1q_##suffix##64(vreinterpretq_##suffix##64_##suffix##32(a0.val), \ + vreinterpretq_##suffix##64_##suffix##32(a2.val))); \ + _Tpvec##_t t1 = vreinterpretq_##suffix##32_##suffix##64( \ + vtrn1q_##suffix##64(vreinterpretq_##suffix##64_##suffix##32(a1.val), \ + vreinterpretq_##suffix##64_##suffix##32(a3.val))); \ + _Tpvec##_t t2 = vreinterpretq_##suffix##32_##suffix##64( \ + vtrn2q_##suffix##64(vreinterpretq_##suffix##64_##suffix##32(a0.val), \ + vreinterpretq_##suffix##64_##suffix##32(a2.val))); \ + _Tpvec##_t t3 = vreinterpretq_##suffix##32_##suffix##64( \ + vtrn2q_##suffix##64(vreinterpretq_##suffix##64_##suffix##32(a1.val), \ + vreinterpretq_##suffix##64_##suffix##32(a3.val))); \ + /* -- Pass 2: 32b transpose */ \ + b0.val = vtrn1q_##suffix##32(t0, t1); \ + b1.val = vtrn2q_##suffix##32(t0, t1); \ + b2.val = vtrn1q_##suffix##32(t2, t3); \ + b3.val = vtrn2q_##suffix##32(t2, t3); \ +} + +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(uint32x4, u) +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s) +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f) +#else // #if CV_NEON_AARCH64 +#define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \ +inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \ + const v_##_Tpvec& a2, const v_##_Tpvec& a3, \ + v_##_Tpvec& b0, v_##_Tpvec& b1, \ + v_##_Tpvec& b2, v_##_Tpvec& b3) \ +{ \ + /* m00 m01 m02 m03 */ \ + /* m10 m11 m12 m13 */ \ + /* m20 m21 m22 m23 */ \ + /* m30 m31 m32 m33 */ \ + _Tpvec##x2_t t0 = vtrnq_##suffix(a0.val, a1.val); \ + _Tpvec##x2_t t1 = vtrnq_##suffix(a2.val, a3.val); \ + /* m00 m10 m02 m12 */ \ + /* m01 m11 m03 m13 */ \ + /* m20 m30 m22 m32 */ \ + /* m21 m31 m23 m33 */ \ + b0.val = vcombine_##suffix(vget_low_##suffix(t0.val[0]), vget_low_##suffix(t1.val[0])); \ + b1.val = vcombine_##suffix(vget_low_##suffix(t0.val[1]), vget_low_##suffix(t1.val[1])); \ + b2.val = vcombine_##suffix(vget_high_##suffix(t0.val[0]), vget_high_##suffix(t1.val[0])); \ + b3.val = vcombine_##suffix(vget_high_##suffix(t0.val[1]), vget_high_##suffix(t1.val[1])); \ +} + +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(uint32x4, u32) +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(int32x4, s32) +OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(float32x4, f32) +#endif // #if CV_NEON_AARCH64 + +#define OPENCV_HAL_IMPL_NEON_INTERLEAVED(_Tpvec, _Tp, suffix) \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b) \ +{ \ + _Tpvec##x2_t v = vld2q_##suffix(ptr); \ + a.val = v.val[0]; \ + b.val = v.val[1]; \ +} \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, v_##_Tpvec& c) \ +{ \ + _Tpvec##x3_t v = vld3q_##suffix(ptr); \ + a.val = v.val[0]; \ + b.val = v.val[1]; \ + c.val = v.val[2]; \ +} \ +inline void v_load_deinterleave(const _Tp* ptr, v_##_Tpvec& a, v_##_Tpvec& b, \ + v_##_Tpvec& c, v_##_Tpvec& d) \ +{ \ + _Tpvec##x4_t v = vld4q_##suffix(ptr); \ + a.val = v.val[0]; \ + b.val = v.val[1]; \ + c.val = v.val[2]; \ + d.val = v.val[3]; \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + _Tpvec##x2_t v; \ + v.val[0] = a.val; \ + v.val[1] = b.val; \ + vst2q_##suffix(ptr, v); \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + const v_##_Tpvec& c, hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + _Tpvec##x3_t v; \ + v.val[0] = a.val; \ + v.val[1] = b.val; \ + v.val[2] = c.val; \ + vst3q_##suffix(ptr, v); \ +} \ +inline void v_store_interleave( _Tp* ptr, const v_##_Tpvec& a, const v_##_Tpvec& b, \ + const v_##_Tpvec& c, const v_##_Tpvec& d, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec##x4_t v; \ + v.val[0] = a.val; \ + v.val[1] = b.val; \ + v.val[2] = c.val; \ + v.val[3] = d.val; \ + vst4q_##suffix(ptr, v); \ +} + +#define OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64(tp, suffix) \ +inline void v_load_deinterleave( const tp* ptr, v_##tp##x2& a, v_##tp##x2& b ) \ +{ \ + tp##x1_t a0 = vld1_##suffix(ptr); \ + tp##x1_t b0 = vld1_##suffix(ptr + 1); \ + tp##x1_t a1 = vld1_##suffix(ptr + 2); \ + tp##x1_t b1 = vld1_##suffix(ptr + 3); \ + a = v_##tp##x2(vcombine_##suffix(a0, a1)); \ + b = v_##tp##x2(vcombine_##suffix(b0, b1)); \ +} \ + \ +inline void v_load_deinterleave( const tp* ptr, v_##tp##x2& a, \ + v_##tp##x2& b, v_##tp##x2& c ) \ +{ \ + tp##x1_t a0 = vld1_##suffix(ptr); \ + tp##x1_t b0 = vld1_##suffix(ptr + 1); \ + tp##x1_t c0 = vld1_##suffix(ptr + 2); \ + tp##x1_t a1 = vld1_##suffix(ptr + 3); \ + tp##x1_t b1 = vld1_##suffix(ptr + 4); \ + tp##x1_t c1 = vld1_##suffix(ptr + 5); \ + a = v_##tp##x2(vcombine_##suffix(a0, a1)); \ + b = v_##tp##x2(vcombine_##suffix(b0, b1)); \ + c = v_##tp##x2(vcombine_##suffix(c0, c1)); \ +} \ + \ +inline void v_load_deinterleave( const tp* ptr, v_##tp##x2& a, v_##tp##x2& b, \ + v_##tp##x2& c, v_##tp##x2& d ) \ +{ \ + tp##x1_t a0 = vld1_##suffix(ptr); \ + tp##x1_t b0 = vld1_##suffix(ptr + 1); \ + tp##x1_t c0 = vld1_##suffix(ptr + 2); \ + tp##x1_t d0 = vld1_##suffix(ptr + 3); \ + tp##x1_t a1 = vld1_##suffix(ptr + 4); \ + tp##x1_t b1 = vld1_##suffix(ptr + 5); \ + tp##x1_t c1 = vld1_##suffix(ptr + 6); \ + tp##x1_t d1 = vld1_##suffix(ptr + 7); \ + a = v_##tp##x2(vcombine_##suffix(a0, a1)); \ + b = v_##tp##x2(vcombine_##suffix(b0, b1)); \ + c = v_##tp##x2(vcombine_##suffix(c0, c1)); \ + d = v_##tp##x2(vcombine_##suffix(d0, d1)); \ +} \ + \ +inline void v_store_interleave( tp* ptr, const v_##tp##x2& a, const v_##tp##x2& b, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + vst1_##suffix(ptr, vget_low_##suffix(a.val)); \ + vst1_##suffix(ptr + 1, vget_low_##suffix(b.val)); \ + vst1_##suffix(ptr + 2, vget_high_##suffix(a.val)); \ + vst1_##suffix(ptr + 3, vget_high_##suffix(b.val)); \ +} \ + \ +inline void v_store_interleave( tp* ptr, const v_##tp##x2& a, \ + const v_##tp##x2& b, const v_##tp##x2& c, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + vst1_##suffix(ptr, vget_low_##suffix(a.val)); \ + vst1_##suffix(ptr + 1, vget_low_##suffix(b.val)); \ + vst1_##suffix(ptr + 2, vget_low_##suffix(c.val)); \ + vst1_##suffix(ptr + 3, vget_high_##suffix(a.val)); \ + vst1_##suffix(ptr + 4, vget_high_##suffix(b.val)); \ + vst1_##suffix(ptr + 5, vget_high_##suffix(c.val)); \ +} \ + \ +inline void v_store_interleave( tp* ptr, const v_##tp##x2& a, const v_##tp##x2& b, \ + const v_##tp##x2& c, const v_##tp##x2& d, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ \ + vst1_##suffix(ptr, vget_low_##suffix(a.val)); \ + vst1_##suffix(ptr + 1, vget_low_##suffix(b.val)); \ + vst1_##suffix(ptr + 2, vget_low_##suffix(c.val)); \ + vst1_##suffix(ptr + 3, vget_low_##suffix(d.val)); \ + vst1_##suffix(ptr + 4, vget_high_##suffix(a.val)); \ + vst1_##suffix(ptr + 5, vget_high_##suffix(b.val)); \ + vst1_##suffix(ptr + 6, vget_high_##suffix(c.val)); \ + vst1_##suffix(ptr + 7, vget_high_##suffix(d.val)); \ +} + +OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint8x16, uchar, u8) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(int8x16, schar, s8) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint16x8, ushort, u16) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(int16x8, short, s16) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_INTERLEAVED(float32x4, float, f32) +#if CV_SIMD128_64F +OPENCV_HAL_IMPL_NEON_INTERLEAVED(float64x2, double, f64) +#endif + +OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64(int64, s64) +OPENCV_HAL_IMPL_NEON_INTERLEAVED_INT64(uint64, u64) + +inline v_float32x4 v_cvt_f32(const v_int32x4& a) +{ + return v_float32x4(vcvtq_f32_s32(a.val)); +} + +#if CV_SIMD128_64F +inline v_float32x4 v_cvt_f32(const v_float64x2& a) +{ + float32x2_t zero = vdup_n_f32(0.0f); + return v_float32x4(vcombine_f32(vcvt_f32_f64(a.val), zero)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) +{ + return v_float32x4(vcombine_f32(vcvt_f32_f64(a.val), vcvt_f32_f64(b.val))); +} + +inline v_float64x2 v_cvt_f64(const v_int32x4& a) +{ + return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_low_s32(a.val)))); +} + +inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) +{ + return v_float64x2(vcvt_f64_f32(vcvt_f32_s32(vget_high_s32(a.val)))); +} + +inline v_float64x2 v_cvt_f64(const v_float32x4& a) +{ + return v_float64x2(vcvt_f64_f32(vget_low_f32(a.val))); +} + +inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) +{ + return v_float64x2(vcvt_f64_f32(vget_high_f32(a.val))); +} + +inline v_float64x2 v_cvt_f64(const v_int64x2& a) +{ return v_float64x2(vcvtq_f64_s64(a.val)); } + +#endif + +////////////// Lookup table access //////////////////// + +inline v_int8x16 v_lut(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[ 0]], + tab[idx[ 1]], + tab[idx[ 2]], + tab[idx[ 3]], + tab[idx[ 4]], + tab[idx[ 5]], + tab[idx[ 6]], + tab[idx[ 7]], + tab[idx[ 8]], + tab[idx[ 9]], + tab[idx[10]], + tab[idx[11]], + tab[idx[12]], + tab[idx[13]], + tab[idx[14]], + tab[idx[15]] + }; + return v_int8x16(vld1q_s8(elems)); +} +inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[4]], + tab[idx[4] + 1], + tab[idx[5]], + tab[idx[5] + 1], + tab[idx[6]], + tab[idx[6] + 1], + tab[idx[7]], + tab[idx[7] + 1] + }; + return v_int8x16(vld1q_s8(elems)); +} +inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) +{ + schar CV_DECL_ALIGNED(32) elems[16] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[0] + 2], + tab[idx[0] + 3], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[1] + 2], + tab[idx[1] + 3], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[2] + 2], + tab[idx[2] + 3], + tab[idx[3]], + tab[idx[3] + 1], + tab[idx[3] + 2], + tab[idx[3] + 3] + }; + return v_int8x16(vld1q_s8(elems)); +} +inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((schar*)tab, idx)); } +inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((schar*)tab, idx)); } +inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((schar*)tab, idx)); } + +inline v_int16x8 v_lut(const short* tab, const int* idx) +{ + short CV_DECL_ALIGNED(32) elems[8] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]], + tab[idx[4]], + tab[idx[5]], + tab[idx[6]], + tab[idx[7]] + }; + return v_int16x8(vld1q_s16(elems)); +} +inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) +{ + short CV_DECL_ALIGNED(32) elems[8] = + { + tab[idx[0]], + tab[idx[0] + 1], + tab[idx[1]], + tab[idx[1] + 1], + tab[idx[2]], + tab[idx[2] + 1], + tab[idx[3]], + tab[idx[3] + 1] + }; + return v_int16x8(vld1q_s16(elems)); +} +inline v_int16x8 v_lut_quads(const short* tab, const int* idx) +{ + return v_int16x8(vcombine_s16(vld1_s16(tab + idx[0]), vld1_s16(tab + idx[1]))); +} +inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((short*)tab, idx)); } +inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((short*)tab, idx)); } +inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((short*)tab, idx)); } + +inline v_int32x4 v_lut(const int* tab, const int* idx) +{ + int CV_DECL_ALIGNED(32) elems[4] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]] + }; + return v_int32x4(vld1q_s32(elems)); +} +inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) +{ + return v_int32x4(vcombine_s32(vld1_s32(tab + idx[0]), vld1_s32(tab + idx[1]))); +} +inline v_int32x4 v_lut_quads(const int* tab, const int* idx) +{ + return v_int32x4(vld1q_s32(tab + idx[0])); +} +inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((int*)tab, idx)); } +inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((int*)tab, idx)); } +inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((int*)tab, idx)); } + +inline v_int64x2 v_lut(const int64_t* tab, const int* idx) +{ + return v_int64x2(vcombine_s64(vcreate_s64(tab[idx[0]]), vcreate_s64(tab[idx[1]]))); +} +inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) +{ + return v_int64x2(vld1q_s64(tab + idx[0])); +} +inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } +inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } + +inline v_float32x4 v_lut(const float* tab, const int* idx) +{ + float CV_DECL_ALIGNED(32) elems[4] = + { + tab[idx[0]], + tab[idx[1]], + tab[idx[2]], + tab[idx[3]] + }; + return v_float32x4(vld1q_f32(elems)); +} +inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) +{ + typedef uint64 CV_DECL_ALIGNED(1) unaligned_uint64; + + uint64 CV_DECL_ALIGNED(32) elems[2] = + { + *(unaligned_uint64*)(tab + idx[0]), + *(unaligned_uint64*)(tab + idx[1]) + }; + return v_float32x4(vreinterpretq_f32_u64(vld1q_u64(elems))); +} +inline v_float32x4 v_lut_quads(const float* tab, const int* idx) +{ + return v_float32x4(vld1q_f32(tab + idx[0])); +} + +inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) elems[4] = + { + tab[vgetq_lane_s32(idxvec.val, 0)], + tab[vgetq_lane_s32(idxvec.val, 1)], + tab[vgetq_lane_s32(idxvec.val, 2)], + tab[vgetq_lane_s32(idxvec.val, 3)] + }; + return v_int32x4(vld1q_s32(elems)); +} + +inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) +{ + unsigned CV_DECL_ALIGNED(32) elems[4] = + { + tab[vgetq_lane_s32(idxvec.val, 0)], + tab[vgetq_lane_s32(idxvec.val, 1)], + tab[vgetq_lane_s32(idxvec.val, 2)], + tab[vgetq_lane_s32(idxvec.val, 3)] + }; + return v_uint32x4(vld1q_u32(elems)); +} + +inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) +{ + float CV_DECL_ALIGNED(32) elems[4] = + { + tab[vgetq_lane_s32(idxvec.val, 0)], + tab[vgetq_lane_s32(idxvec.val, 1)], + tab[vgetq_lane_s32(idxvec.val, 2)], + tab[vgetq_lane_s32(idxvec.val, 3)] + }; + return v_float32x4(vld1q_f32(elems)); +} + +inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) +{ + /*int CV_DECL_ALIGNED(32) idx[4]; + v_store(idx, idxvec); + + float32x4_t xy02 = vcombine_f32(vld1_f32(tab + idx[0]), vld1_f32(tab + idx[2])); + float32x4_t xy13 = vcombine_f32(vld1_f32(tab + idx[1]), vld1_f32(tab + idx[3])); + + float32x4x2_t xxyy = vuzpq_f32(xy02, xy13); + x = v_float32x4(xxyy.val[0]); + y = v_float32x4(xxyy.val[1]);*/ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + x = v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); + y = v_float32x4(tab[idx[0]+1], tab[idx[1]+1], tab[idx[2]+1], tab[idx[3]+1]); +} + +inline v_int8x16 v_interleave_pairs(const v_int8x16& vec) +{ + return v_int8x16(vcombine_s8(vtbl1_s8(vget_low_s8(vec.val), vcreate_s8(0x0705060403010200)), vtbl1_s8(vget_high_s8(vec.val), vcreate_s8(0x0705060403010200)))); +} +inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x16 v_interleave_quads(const v_int8x16& vec) +{ + return v_int8x16(vcombine_s8(vtbl1_s8(vget_low_s8(vec.val), vcreate_s8(0x0703060205010400)), vtbl1_s8(vget_high_s8(vec.val), vcreate_s8(0x0703060205010400)))); +} +inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) +{ + return v_int16x8(vreinterpretq_s16_s8(vcombine_s8(vtbl1_s8(vget_low_s8(vreinterpretq_s8_s16(vec.val)), vcreate_s8(0x0706030205040100)), vtbl1_s8(vget_high_s8(vreinterpretq_s8_s16(vec.val)), vcreate_s8(0x0706030205040100))))); +} +inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } +inline v_int16x8 v_interleave_quads(const v_int16x8& vec) +{ + int16x4x2_t res = vzip_s16(vget_low_s16(vec.val), vget_high_s16(vec.val)); + return v_int16x8(vcombine_s16(res.val[0], res.val[1])); +} +inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_interleave_pairs(const v_int32x4& vec) +{ + int32x2x2_t res = vzip_s32(vget_low_s32(vec.val), vget_high_s32(vec.val)); + return v_int32x4(vcombine_s32(res.val[0], res.val[1])); +} +inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x4 v_interleave_pairs(const v_float32x4& vec) { return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x16 v_pack_triplets(const v_int8x16& vec) +{ + return v_int8x16(vextq_s8(vcombine_s8(vtbl1_s8(vget_low_s8(vec.val), vcreate_s8(0x0605040201000000)), vtbl1_s8(vget_high_s8(vec.val), vcreate_s8(0x0807060504020100))), vdupq_n_s8(0), 2)); +} +inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_pack_triplets(const v_int16x8& vec) +{ + return v_int16x8(vreinterpretq_s16_s8(vextq_s8(vcombine_s8(vtbl1_s8(vget_low_s8(vreinterpretq_s8_s16(vec.val)), vcreate_s8(0x0504030201000000)), vget_high_s8(vreinterpretq_s8_s16(vec.val))), vdupq_n_s8(0), 2))); +} +inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + +#if CV_SIMD128_64F +inline v_float64x2 v_lut(const double* tab, const int* idx) +{ + double CV_DECL_ALIGNED(32) elems[2] = + { + tab[idx[0]], + tab[idx[1]] + }; + return v_float64x2(vld1q_f64(elems)); +} + +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) +{ + return v_float64x2(vld1q_f64(tab + idx[0])); +} + +inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) +{ + double CV_DECL_ALIGNED(32) elems[2] = + { + tab[vgetq_lane_s32(idxvec.val, 0)], + tab[vgetq_lane_s32(idxvec.val, 1)], + }; + return v_float64x2(vld1q_f64(elems)); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + + x = v_float64x2(tab[idx[0]], tab[idx[1]]); + y = v_float64x2(tab[idx[0]+1], tab[idx[1]+1]); +} +#endif + +////// FP16 support /////// +#if CV_FP16 +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ + float16x4_t v = + #ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro + (float16x4_t)vld1_s16((const short*)ptr); + #else + vld1_f16((const __fp16*)ptr); + #endif + return v_float32x4(vcvt_f32_f16(v)); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ + float16x4_t hv = vcvt_f16_f32(v.val); + + #ifndef vst1_f16 // APPLE compiler defines vst1_f16 as macro + vst1_s16((short*)ptr, (int16x4_t)hv); + #else + vst1_f16((__fp16*)ptr, hv); + #endif +} +#else +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ + const int N = 4; + float buf[N]; + for( int i = 0; i < N; i++ ) buf[i] = (float)ptr[i]; + return v_load(buf); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ + const int N = 4; + float buf[N]; + v_store(buf, v); + for( int i = 0; i < N; i++ ) ptr[i] = float16_t(buf[i]); +} +#endif + +inline void v_cleanup() {} + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse.hpp new file mode 100644 index 00000000..f4b43a2d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse.hpp @@ -0,0 +1,3455 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_SSE_HPP +#define OPENCV_HAL_SSE_HPP + +#include +#include "opencv2/core/utility.hpp" + +#define CV_SIMD128 1 +#define CV_SIMD128_64F 1 +#define CV_SIMD128_FP16 0 // no native operations with FP16 type. + +namespace cv +{ + +//! @cond IGNORED + +// +// Compilation troubleshooting: +// - MSVC: error C2719: 'a': formal parameter with requested alignment of 16 won't be aligned +// Replace parameter declaration to const reference: +// -v_int32x4 a +// +const v_int32x4& a +// + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +///////// Types //////////// + +struct v_uint8x16 +{ + typedef uchar lane_type; + typedef __m128i vector_type; + enum { nlanes = 16 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_uint8x16() {} + explicit v_uint8x16(__m128i v) : val(v) {} + v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) + { + val = _mm_setr_epi8((char)v0, (char)v1, (char)v2, (char)v3, + (char)v4, (char)v5, (char)v6, (char)v7, + (char)v8, (char)v9, (char)v10, (char)v11, + (char)v12, (char)v13, (char)v14, (char)v15); + } + + uchar get0() const + { + return (uchar)_mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_int8x16 +{ + typedef schar lane_type; + typedef __m128i vector_type; + enum { nlanes = 16 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_int8x16() {} + explicit v_int8x16(__m128i v) : val(v) {} + v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) + { + val = _mm_setr_epi8((char)v0, (char)v1, (char)v2, (char)v3, + (char)v4, (char)v5, (char)v6, (char)v7, + (char)v8, (char)v9, (char)v10, (char)v11, + (char)v12, (char)v13, (char)v14, (char)v15); + } + + schar get0() const + { + return (schar)_mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_uint16x8 +{ + typedef ushort lane_type; + typedef __m128i vector_type; + enum { nlanes = 8 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_uint16x8() {} + explicit v_uint16x8(__m128i v) : val(v) {} + v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) + { + val = _mm_setr_epi16((short)v0, (short)v1, (short)v2, (short)v3, + (short)v4, (short)v5, (short)v6, (short)v7); + } + + ushort get0() const + { + return (ushort)_mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_int16x8 +{ + typedef short lane_type; + typedef __m128i vector_type; + enum { nlanes = 8 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_int16x8() {} + explicit v_int16x8(__m128i v) : val(v) {} + v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) + { + val = _mm_setr_epi16((short)v0, (short)v1, (short)v2, (short)v3, + (short)v4, (short)v5, (short)v6, (short)v7); + } + + short get0() const + { + return (short)_mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_uint32x4 +{ + typedef unsigned lane_type; + typedef __m128i vector_type; + enum { nlanes = 4 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_uint32x4() {} + explicit v_uint32x4(__m128i v) : val(v) {} + v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) + { + val = _mm_setr_epi32((int)v0, (int)v1, (int)v2, (int)v3); + } + + unsigned get0() const + { + return (unsigned)_mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_int32x4 +{ + typedef int lane_type; + typedef __m128i vector_type; + enum { nlanes = 4 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_int32x4() {} + explicit v_int32x4(__m128i v) : val(v) {} + v_int32x4(int v0, int v1, int v2, int v3) + { + val = _mm_setr_epi32(v0, v1, v2, v3); + } + + int get0() const + { + return _mm_cvtsi128_si32(val); + } + + __m128i val; +}; + +struct v_float32x4 +{ + typedef float lane_type; + typedef __m128 vector_type; + enum { nlanes = 4 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_float32x4() {} + explicit v_float32x4(__m128 v) : val(v) {} + v_float32x4(float v0, float v1, float v2, float v3) + { + val = _mm_setr_ps(v0, v1, v2, v3); + } + + float get0() const + { + return _mm_cvtss_f32(val); + } + + __m128 val; +}; + +struct v_uint64x2 +{ + typedef uint64 lane_type; + typedef __m128i vector_type; + enum { nlanes = 2 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_uint64x2() {} + explicit v_uint64x2(__m128i v) : val(v) {} + v_uint64x2(uint64 v0, uint64 v1) + { + val = _mm_setr_epi32((int)v0, (int)(v0 >> 32), (int)v1, (int)(v1 >> 32)); + } + + uint64 get0() const + { + #if !defined(__x86_64__) && !defined(_M_X64) + int a = _mm_cvtsi128_si32(val); + int b = _mm_cvtsi128_si32(_mm_srli_epi64(val, 32)); + return (unsigned)a | ((uint64)(unsigned)b << 32); + #else + return (uint64)_mm_cvtsi128_si64(val); + #endif + } + + __m128i val; +}; + +struct v_int64x2 +{ + typedef int64 lane_type; + typedef __m128i vector_type; + enum { nlanes = 2 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_int64x2() {} + explicit v_int64x2(__m128i v) : val(v) {} + v_int64x2(int64 v0, int64 v1) + { + val = _mm_setr_epi32((int)v0, (int)(v0 >> 32), (int)v1, (int)(v1 >> 32)); + } + + int64 get0() const + { + #if !defined(__x86_64__) && !defined(_M_X64) + int a = _mm_cvtsi128_si32(val); + int b = _mm_cvtsi128_si32(_mm_srli_epi64(val, 32)); + return (int64)((unsigned)a | ((uint64)(unsigned)b << 32)); + #else + return _mm_cvtsi128_si64(val); + #endif + } + + __m128i val; +}; + +struct v_float64x2 +{ + typedef double lane_type; + typedef __m128d vector_type; + enum { nlanes = 2 }; + + /* coverity[uninit_ctor]: suppress warning */ + v_float64x2() {} + explicit v_float64x2(__m128d v) : val(v) {} + v_float64x2(double v0, double v1) + { + val = _mm_setr_pd(v0, v1); + } + + double get0() const + { + return _mm_cvtsd_f64(val); + } + + __m128d val; +}; + +namespace hal_sse_internal +{ + template + to_sse_type v_sse_reinterpret_as(const from_sse_type& val); + +#define OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(to_sse_type, from_sse_type, sse_cast_intrin) \ + template<> inline \ + to_sse_type v_sse_reinterpret_as(const from_sse_type& a) \ + { return sse_cast_intrin(a); } + + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128i, __m128i, OPENCV_HAL_NOP) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128i, __m128, _mm_castps_si128) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128i, __m128d, _mm_castpd_si128) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128, __m128i, _mm_castsi128_ps) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128, __m128, OPENCV_HAL_NOP) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128, __m128d, _mm_castpd_ps) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128d, __m128i, _mm_castsi128_pd) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128d, __m128, _mm_castps_pd) + OPENCV_HAL_IMPL_SSE_REINTERPRET_RAW(__m128d, __m128d, OPENCV_HAL_NOP) +} + +#define OPENCV_HAL_IMPL_SSE_INITVEC(_Tpvec, _Tp, suffix, zsuffix, ssuffix, _Tps, cast) \ +inline _Tpvec v_setzero_##suffix() { return _Tpvec(_mm_setzero_##zsuffix()); } \ +inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(_mm_set1_##ssuffix((_Tps)v)); } \ +template inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \ +{ return _Tpvec(cast(a.val)); } + +OPENCV_HAL_IMPL_SSE_INITVEC(v_uint8x16, uchar, u8, si128, epi8, schar, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_int8x16, schar, s8, si128, epi8, schar, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_uint16x8, ushort, u16, si128, epi16, short, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_int16x8, short, s16, si128, epi16, short, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_uint32x4, unsigned, u32, si128, epi32, int, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_int32x4, int, s32, si128, epi32, int, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_INITVEC(v_float32x4, float, f32, ps, ps, float, _mm_castsi128_ps) +OPENCV_HAL_IMPL_SSE_INITVEC(v_float64x2, double, f64, pd, pd, double, _mm_castsi128_pd) + +inline v_uint64x2 v_setzero_u64() { return v_uint64x2(_mm_setzero_si128()); } +inline v_int64x2 v_setzero_s64() { return v_int64x2(_mm_setzero_si128()); } +inline v_uint64x2 v_setall_u64(uint64 val) { return v_uint64x2(val, val); } +inline v_int64x2 v_setall_s64(int64 val) { return v_int64x2(val, val); } + +template inline +v_uint64x2 v_reinterpret_as_u64(const _Tpvec& a) { return v_uint64x2(a.val); } +template inline +v_int64x2 v_reinterpret_as_s64(const _Tpvec& a) { return v_int64x2(a.val); } +inline v_float32x4 v_reinterpret_as_f32(const v_uint64x2& a) +{ return v_float32x4(_mm_castsi128_ps(a.val)); } +inline v_float32x4 v_reinterpret_as_f32(const v_int64x2& a) +{ return v_float32x4(_mm_castsi128_ps(a.val)); } +inline v_float64x2 v_reinterpret_as_f64(const v_uint64x2& a) +{ return v_float64x2(_mm_castsi128_pd(a.val)); } +inline v_float64x2 v_reinterpret_as_f64(const v_int64x2& a) +{ return v_float64x2(_mm_castsi128_pd(a.val)); } + +#define OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(_Tpvec, suffix) \ +inline _Tpvec v_reinterpret_as_##suffix(const v_float32x4& a) \ +{ return _Tpvec(_mm_castps_si128(a.val)); } \ +inline _Tpvec v_reinterpret_as_##suffix(const v_float64x2& a) \ +{ return _Tpvec(_mm_castpd_si128(a.val)); } + +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint8x16, u8) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int8x16, s8) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint16x8, u16) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int16x8, s16) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint32x4, u32) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int32x4, s32) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_uint64x2, u64) +OPENCV_HAL_IMPL_SSE_INIT_FROM_FLT(v_int64x2, s64) + +inline v_float32x4 v_reinterpret_as_f32(const v_float32x4& a) {return a; } +inline v_float64x2 v_reinterpret_as_f64(const v_float64x2& a) {return a; } +inline v_float32x4 v_reinterpret_as_f32(const v_float64x2& a) {return v_float32x4(_mm_castpd_ps(a.val)); } +inline v_float64x2 v_reinterpret_as_f64(const v_float32x4& a) {return v_float64x2(_mm_castps_pd(a.val)); } + +//////////////// PACK /////////////// +inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b) +{ + __m128i delta = _mm_set1_epi16(255); + return v_uint8x16(_mm_packus_epi16(_mm_subs_epu16(a.val, _mm_subs_epu16(a.val, delta)), + _mm_subs_epu16(b.val, _mm_subs_epu16(b.val, delta)))); +} + +inline void v_pack_store(uchar* ptr, const v_uint16x8& a) +{ + __m128i delta = _mm_set1_epi16(255); + __m128i a1 = _mm_subs_epu16(a.val, _mm_subs_epu16(a.val, delta)); + _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1)); +} + +inline v_uint8x16 v_pack_u(const v_int16x8& a, const v_int16x8& b) +{ return v_uint8x16(_mm_packus_epi16(a.val, b.val)); } + +inline void v_pack_u_store(uchar* ptr, const v_int16x8& a) +{ _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a.val, a.val)); } + +template inline +v_uint8x16 v_rshr_pack(const v_uint16x8& a, const v_uint16x8& b) +{ + // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers. + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + return v_uint8x16(_mm_packus_epi16(_mm_srli_epi16(_mm_adds_epu16(a.val, delta), n), + _mm_srli_epi16(_mm_adds_epu16(b.val, delta), n))); +} + +template inline +void v_rshr_pack_store(uchar* ptr, const v_uint16x8& a) +{ + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + __m128i a1 = _mm_srli_epi16(_mm_adds_epu16(a.val, delta), n); + _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1)); +} + +template inline +v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b) +{ + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + return v_uint8x16(_mm_packus_epi16(_mm_srai_epi16(_mm_adds_epi16(a.val, delta), n), + _mm_srai_epi16(_mm_adds_epi16(b.val, delta), n))); +} + +template inline +void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a) +{ + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + __m128i a1 = _mm_srai_epi16(_mm_adds_epi16(a.val, delta), n); + _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi16(a1, a1)); +} + +inline v_int8x16 v_pack(const v_int16x8& a, const v_int16x8& b) +{ return v_int8x16(_mm_packs_epi16(a.val, b.val)); } + +inline void v_pack_store(schar* ptr, const v_int16x8& a) +{ _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi16(a.val, a.val)); } + +template inline +v_int8x16 v_rshr_pack(const v_int16x8& a, const v_int16x8& b) +{ + // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers. + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + return v_int8x16(_mm_packs_epi16(_mm_srai_epi16(_mm_adds_epi16(a.val, delta), n), + _mm_srai_epi16(_mm_adds_epi16(b.val, delta), n))); +} +template inline +void v_rshr_pack_store(schar* ptr, const v_int16x8& a) +{ + // we assume that n > 0, and so the shifted 16-bit values can be treated as signed numbers. + __m128i delta = _mm_set1_epi16((short)(1 << (n-1))); + __m128i a1 = _mm_srai_epi16(_mm_adds_epi16(a.val, delta), n); + _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi16(a1, a1)); +} + + +// byte-wise "mask ? a : b" +inline __m128i v_select_si128(__m128i mask, __m128i a, __m128i b) +{ +#if CV_SSE4_1 + return _mm_blendv_epi8(b, a, mask); +#else + return _mm_xor_si128(b, _mm_and_si128(_mm_xor_si128(a, b), mask)); +#endif +} + +inline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b) +{ return v_uint16x8(_v128_packs_epu32(a.val, b.val)); } + +inline void v_pack_store(ushort* ptr, const v_uint32x4& a) +{ + __m128i z = _mm_setzero_si128(), maxval32 = _mm_set1_epi32(65535), delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(v_select_si128(_mm_cmpgt_epi32(z, a.val), maxval32, a.val), delta32); + __m128i r = _mm_packs_epi32(a1, a1); + _mm_storel_epi64((__m128i*)ptr, _mm_sub_epi16(r, _mm_set1_epi16(-32768))); +} + +template inline +v_uint16x8 v_rshr_pack(const v_uint32x4& a, const v_uint32x4& b) +{ + __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(a.val, delta), n), delta32); + __m128i b1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(b.val, delta), n), delta32); + return v_uint16x8(_mm_sub_epi16(_mm_packs_epi32(a1, b1), _mm_set1_epi16(-32768))); +} + +template inline +void v_rshr_pack_store(ushort* ptr, const v_uint32x4& a) +{ + __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(_mm_srli_epi32(_mm_add_epi32(a.val, delta), n), delta32); + __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768)); + _mm_storel_epi64((__m128i*)ptr, a2); +} + +inline v_uint16x8 v_pack_u(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + return v_uint16x8(_mm_packus_epi32(a.val, b.val)); +#else + __m128i delta32 = _mm_set1_epi32(32768); + + // preliminary saturate negative values to zero + __m128i a1 = _mm_and_si128(a.val, _mm_cmpgt_epi32(a.val, _mm_set1_epi32(0))); + __m128i b1 = _mm_and_si128(b.val, _mm_cmpgt_epi32(b.val, _mm_set1_epi32(0))); + + __m128i r = _mm_packs_epi32(_mm_sub_epi32(a1, delta32), _mm_sub_epi32(b1, delta32)); + return v_uint16x8(_mm_sub_epi16(r, _mm_set1_epi16(-32768))); +#endif +} + +inline void v_pack_u_store(ushort* ptr, const v_int32x4& a) +{ +#if CV_SSE4_1 + _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi32(a.val, a.val)); +#else + __m128i delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(a.val, delta32); + __m128i r = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768)); + _mm_storel_epi64((__m128i*)ptr, r); +#endif +} + +template inline +v_uint16x8 v_rshr_pack_u(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + __m128i delta = _mm_set1_epi32(1 << (n - 1)); + return v_uint16x8(_mm_packus_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), + _mm_srai_epi32(_mm_add_epi32(b.val, delta), n))); +#else + __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), delta32); + __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768)); + __m128i b1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(b.val, delta), n), delta32); + __m128i b2 = _mm_sub_epi16(_mm_packs_epi32(b1, b1), _mm_set1_epi16(-32768)); + return v_uint16x8(_mm_unpacklo_epi64(a2, b2)); +#endif +} + +template inline +void v_rshr_pack_u_store(ushort* ptr, const v_int32x4& a) +{ +#if CV_SSE4_1 + __m128i delta = _mm_set1_epi32(1 << (n - 1)); + __m128i a1 = _mm_srai_epi32(_mm_add_epi32(a.val, delta), n); + _mm_storel_epi64((__m128i*)ptr, _mm_packus_epi32(a1, a1)); +#else + __m128i delta = _mm_set1_epi32(1 << (n-1)), delta32 = _mm_set1_epi32(32768); + __m128i a1 = _mm_sub_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), delta32); + __m128i a2 = _mm_sub_epi16(_mm_packs_epi32(a1, a1), _mm_set1_epi16(-32768)); + _mm_storel_epi64((__m128i*)ptr, a2); +#endif +} + +inline v_int16x8 v_pack(const v_int32x4& a, const v_int32x4& b) +{ return v_int16x8(_mm_packs_epi32(a.val, b.val)); } + +inline void v_pack_store(short* ptr, const v_int32x4& a) +{ + _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi32(a.val, a.val)); +} + +template inline +v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b) +{ + __m128i delta = _mm_set1_epi32(1 << (n-1)); + return v_int16x8(_mm_packs_epi32(_mm_srai_epi32(_mm_add_epi32(a.val, delta), n), + _mm_srai_epi32(_mm_add_epi32(b.val, delta), n))); +} + +template inline +void v_rshr_pack_store(short* ptr, const v_int32x4& a) +{ + __m128i delta = _mm_set1_epi32(1 << (n-1)); + __m128i a1 = _mm_srai_epi32(_mm_add_epi32(a.val, delta), n); + _mm_storel_epi64((__m128i*)ptr, _mm_packs_epi32(a1, a1)); +} + + +// [a0 0 | b0 0] [a1 0 | b1 0] +inline v_uint32x4 v_pack(const v_uint64x2& a, const v_uint64x2& b) +{ + __m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0 + __m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0 + return v_uint32x4(_mm_unpacklo_epi32(v0, v1)); +} + +inline void v_pack_store(unsigned* ptr, const v_uint64x2& a) +{ + __m128i a1 = _mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 2, 2, 0)); + _mm_storel_epi64((__m128i*)ptr, a1); +} + +// [a0 0 | b0 0] [a1 0 | b1 0] +inline v_int32x4 v_pack(const v_int64x2& a, const v_int64x2& b) +{ + __m128i v0 = _mm_unpacklo_epi32(a.val, b.val); // a0 a1 0 0 + __m128i v1 = _mm_unpackhi_epi32(a.val, b.val); // b0 b1 0 0 + return v_int32x4(_mm_unpacklo_epi32(v0, v1)); +} + +inline void v_pack_store(int* ptr, const v_int64x2& a) +{ + __m128i a1 = _mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 2, 2, 0)); + _mm_storel_epi64((__m128i*)ptr, a1); +} + +template inline +v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b) +{ + uint64 delta = (uint64)1 << (n-1); + v_uint64x2 delta2(delta, delta); + __m128i a1 = _mm_srli_epi64(_mm_add_epi64(a.val, delta2.val), n); + __m128i b1 = _mm_srli_epi64(_mm_add_epi64(b.val, delta2.val), n); + __m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0 + __m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0 + return v_uint32x4(_mm_unpacklo_epi32(v0, v1)); +} + +template inline +void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a) +{ + uint64 delta = (uint64)1 << (n-1); + v_uint64x2 delta2(delta, delta); + __m128i a1 = _mm_srli_epi64(_mm_add_epi64(a.val, delta2.val), n); + __m128i a2 = _mm_shuffle_epi32(a1, _MM_SHUFFLE(0, 2, 2, 0)); + _mm_storel_epi64((__m128i*)ptr, a2); +} + +inline __m128i v_sign_epi64(__m128i a) +{ + return _mm_shuffle_epi32(_mm_srai_epi32(a, 31), _MM_SHUFFLE(3, 3, 1, 1)); // x m0 | x m1 +} + +inline __m128i v_srai_epi64(__m128i a, int imm) +{ + __m128i smask = v_sign_epi64(a); + return _mm_xor_si128(_mm_srli_epi64(_mm_xor_si128(a, smask), imm), smask); +} + +template inline +v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b) +{ + int64 delta = (int64)1 << (n-1); + v_int64x2 delta2(delta, delta); + __m128i a1 = v_srai_epi64(_mm_add_epi64(a.val, delta2.val), n); + __m128i b1 = v_srai_epi64(_mm_add_epi64(b.val, delta2.val), n); + __m128i v0 = _mm_unpacklo_epi32(a1, b1); // a0 a1 0 0 + __m128i v1 = _mm_unpackhi_epi32(a1, b1); // b0 b1 0 0 + return v_int32x4(_mm_unpacklo_epi32(v0, v1)); +} + +template inline +void v_rshr_pack_store(int* ptr, const v_int64x2& a) +{ + int64 delta = (int64)1 << (n-1); + v_int64x2 delta2(delta, delta); + __m128i a1 = v_srai_epi64(_mm_add_epi64(a.val, delta2.val), n); + __m128i a2 = _mm_shuffle_epi32(a1, _MM_SHUFFLE(0, 2, 2, 0)); + _mm_storel_epi64((__m128i*)ptr, a2); +} + +// pack boolean +inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) +{ + __m128i ab = _mm_packs_epi16(a.val, b.val); + return v_uint8x16(ab); +} + +inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d) +{ + __m128i ab = _mm_packs_epi32(a.val, b.val); + __m128i cd = _mm_packs_epi32(c.val, d.val); + return v_uint8x16(_mm_packs_epi16(ab, cd)); +} + +inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, + const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, + const v_uint64x2& g, const v_uint64x2& h) +{ + __m128i ab = _mm_packs_epi32(a.val, b.val); + __m128i cd = _mm_packs_epi32(c.val, d.val); + __m128i ef = _mm_packs_epi32(e.val, f.val); + __m128i gh = _mm_packs_epi32(g.val, h.val); + + __m128i abcd = _mm_packs_epi32(ab, cd); + __m128i efgh = _mm_packs_epi32(ef, gh); + return v_uint8x16(_mm_packs_epi16(abcd, efgh)); +} + +inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& m3) +{ + __m128 v0 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(0, 0, 0, 0)), m0.val); + __m128 v1 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(1, 1, 1, 1)), m1.val); + __m128 v2 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(2, 2, 2, 2)), m2.val); + __m128 v3 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(3, 3, 3, 3)), m3.val); + + return v_float32x4(_mm_add_ps(_mm_add_ps(v0, v1), _mm_add_ps(v2, v3))); +} + +inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& a) +{ + __m128 v0 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(0, 0, 0, 0)), m0.val); + __m128 v1 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(1, 1, 1, 1)), m1.val); + __m128 v2 = _mm_mul_ps(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE(2, 2, 2, 2)), m2.val); + + return v_float32x4(_mm_add_ps(_mm_add_ps(v0, v1), _mm_add_ps(v2, a.val))); +} + +#define OPENCV_HAL_IMPL_SSE_BIN_OP(bin_op, _Tpvec, intrin) \ + inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ + { \ + return _Tpvec(intrin(a.val, b.val)); \ + } \ + inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ + { \ + a.val = intrin(a.val, b.val); \ + return a; \ + } + +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint8x16, _mm_adds_epu8) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint8x16, _mm_subs_epu8) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int8x16, _mm_adds_epi8) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int8x16, _mm_subs_epi8) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint16x8, _mm_adds_epu16) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint16x8, _mm_subs_epu16) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int16x8, _mm_adds_epi16) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int16x8, _mm_subs_epi16) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint32x4, _mm_add_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint32x4, _mm_sub_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(*, v_uint32x4, _v128_mullo_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int32x4, _mm_add_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int32x4, _mm_sub_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(*, v_int32x4, _v128_mullo_epi32) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_float32x4, _mm_add_ps) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_float32x4, _mm_sub_ps) +OPENCV_HAL_IMPL_SSE_BIN_OP(*, v_float32x4, _mm_mul_ps) +OPENCV_HAL_IMPL_SSE_BIN_OP(/, v_float32x4, _mm_div_ps) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_float64x2, _mm_add_pd) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_float64x2, _mm_sub_pd) +OPENCV_HAL_IMPL_SSE_BIN_OP(*, v_float64x2, _mm_mul_pd) +OPENCV_HAL_IMPL_SSE_BIN_OP(/, v_float64x2, _mm_div_pd) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_uint64x2, _mm_add_epi64) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_uint64x2, _mm_sub_epi64) +OPENCV_HAL_IMPL_SSE_BIN_OP(+, v_int64x2, _mm_add_epi64) +OPENCV_HAL_IMPL_SSE_BIN_OP(-, v_int64x2, _mm_sub_epi64) + +// saturating multiply 8-bit, 16-bit +#define OPENCV_HAL_IMPL_SSE_MUL_SAT(_Tpvec, _Tpwvec) \ + inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b) \ + { \ + _Tpwvec c, d; \ + v_mul_expand(a, b, c, d); \ + return v_pack(c, d); \ + } \ + inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b) \ + { a = a * b; return a; } + +OPENCV_HAL_IMPL_SSE_MUL_SAT(v_uint8x16, v_uint16x8) +OPENCV_HAL_IMPL_SSE_MUL_SAT(v_int8x16, v_int16x8) +OPENCV_HAL_IMPL_SSE_MUL_SAT(v_uint16x8, v_uint32x4) +OPENCV_HAL_IMPL_SSE_MUL_SAT(v_int16x8, v_int32x4) + +// Multiply and expand +inline void v_mul_expand(const v_uint8x16& a, const v_uint8x16& b, + v_uint16x8& c, v_uint16x8& d) +{ + v_uint16x8 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int8x16& a, const v_int8x16& b, + v_int16x8& c, v_int16x8& d) +{ + v_int16x8 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, + v_int32x4& c, v_int32x4& d) +{ + __m128i v0 = _mm_mullo_epi16(a.val, b.val); + __m128i v1 = _mm_mulhi_epi16(a.val, b.val); + c.val = _mm_unpacklo_epi16(v0, v1); + d.val = _mm_unpackhi_epi16(v0, v1); +} + +inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, + v_uint32x4& c, v_uint32x4& d) +{ + __m128i v0 = _mm_mullo_epi16(a.val, b.val); + __m128i v1 = _mm_mulhi_epu16(a.val, b.val); + c.val = _mm_unpacklo_epi16(v0, v1); + d.val = _mm_unpackhi_epi16(v0, v1); +} + +inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, + v_uint64x2& c, v_uint64x2& d) +{ + __m128i c0 = _mm_mul_epu32(a.val, b.val); + __m128i c1 = _mm_mul_epu32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32)); + c.val = _mm_unpacklo_epi64(c0, c1); + d.val = _mm_unpackhi_epi64(c0, c1); +} + +inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) { return v_int16x8(_mm_mulhi_epi16(a.val, b.val)); } +inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) { return v_uint16x8(_mm_mulhi_epu16(a.val, b.val)); } + +//////// Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) +{ return v_int32x4(_mm_madd_epi16(a.val, b.val)); } +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_dotprod(a, b) + c; } + +// 32 >> 64 +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + __m128i even = _mm_mul_epi32(a.val, b.val); + __m128i odd = _mm_mul_epi32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32)); + return v_int64x2(_mm_add_epi64(even, odd)); +#else + __m128i even_u = _mm_mul_epu32(a.val, b.val); + __m128i odd_u = _mm_mul_epu32(_mm_srli_epi64(a.val, 32), _mm_srli_epi64(b.val, 32)); + // convert unsigned to signed high multiplication (from: Agner Fog(veclib) and H S Warren: Hacker's delight, 2003, p. 132) + __m128i a_sign = _mm_srai_epi32(a.val, 31); + __m128i b_sign = _mm_srai_epi32(b.val, 31); + // |x * sign of x + __m128i axb = _mm_and_si128(a.val, b_sign); + __m128i bxa = _mm_and_si128(b.val, a_sign); + // sum of sign corrections + __m128i ssum = _mm_add_epi32(bxa, axb); + __m128i even_ssum = _mm_slli_epi64(ssum, 32); + __m128i odd_ssum = _mm_and_si128(ssum, _mm_set_epi32(-1, 0, -1, 0)); + // convert to signed and prod + return v_int64x2(_mm_add_epi64(_mm_sub_epi64(even_u, even_ssum), _mm_sub_epi64(odd_u, odd_ssum))); +#endif +} +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod(a, b) + c; } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) +{ + __m128i a0 = _mm_srli_epi16(_mm_slli_si128(a.val, 1), 8); // even + __m128i a1 = _mm_srli_epi16(a.val, 8); // odd + __m128i b0 = _mm_srli_epi16(_mm_slli_si128(b.val, 1), 8); + __m128i b1 = _mm_srli_epi16(b.val, 8); + __m128i p0 = _mm_madd_epi16(a0, b0); + __m128i p1 = _mm_madd_epi16(a1, b1); + return v_uint32x4(_mm_add_epi32(p0, p1)); +} +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) +{ + __m128i a0 = _mm_srai_epi16(_mm_slli_si128(a.val, 1), 8); // even + __m128i a1 = _mm_srai_epi16(a.val, 8); // odd + __m128i b0 = _mm_srai_epi16(_mm_slli_si128(b.val, 1), 8); + __m128i b1 = _mm_srai_epi16(b.val, 8); + __m128i p0 = _mm_madd_epi16(a0, b0); + __m128i p1 = _mm_madd_epi16(a1, b1); + return v_int32x4(_mm_add_epi32(p0, p1)); +} +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand(a, b) + c; } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) +{ + v_uint32x4 c, d; + v_mul_expand(a, b, c, d); + + v_uint64x2 c0, c1, d0, d1; + v_expand(c, c0, c1); + v_expand(d, d0, d1); + + c0 += c1; d0 += d1; + return v_uint64x2(_mm_add_epi64( + _mm_unpacklo_epi64(c0.val, d0.val), + _mm_unpackhi_epi64(c0.val, d0.val) + )); +} +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) +{ + v_int32x4 prod = v_dotprod(a, b); + v_int64x2 c, d; + v_expand(prod, c, d); + return v_int64x2(_mm_add_epi64( + _mm_unpacklo_epi64(c.val, d.val), + _mm_unpackhi_epi64(c.val, d.val) + )); +} +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + return v_cvt_f64(v_dotprod(a, b)); +#else + v_float64x2 c = v_cvt_f64(a) * v_cvt_f64(b); + v_float64x2 d = v_cvt_f64_high(a) * v_cvt_f64_high(b); + + return v_float64x2(_mm_add_pd( + _mm_unpacklo_pd(c.val, d.val), + _mm_unpackhi_pd(c.val, d.val) + )); +#endif +} +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod(a, b); } +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_dotprod(a, b) + c; } + +// 32 >> 64 +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod(a, b); } +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod_fast(a, b) + c; } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) +{ + __m128i a0 = v_expand_low(a).val; + __m128i a1 = v_expand_high(a).val; + __m128i b0 = v_expand_low(b).val; + __m128i b1 = v_expand_high(b).val; + __m128i p0 = _mm_madd_epi16(a0, b0); + __m128i p1 = _mm_madd_epi16(a1, b1); + return v_uint32x4(_mm_add_epi32(p0, p1)); +} +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_SSE4_1 + __m128i a0 = _mm_cvtepi8_epi16(a.val); + __m128i a1 = v_expand_high(a).val; + __m128i b0 = _mm_cvtepi8_epi16(b.val); + __m128i b1 = v_expand_high(b).val; + __m128i p0 = _mm_madd_epi16(a0, b0); + __m128i p1 = _mm_madd_epi16(a1, b1); + return v_int32x4(_mm_add_epi32(p0, p1)); +#else + return v_dotprod_expand(a, b); +#endif +} +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) +{ + v_uint32x4 c, d; + v_mul_expand(a, b, c, d); + + v_uint64x2 c0, c1, d0, d1; + v_expand(c, c0, c1); + v_expand(d, d0, d1); + + c0 += c1; d0 += d1; + return c0 + d0; +} +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) +{ + v_int32x4 prod = v_dotprod(a, b); + v_int64x2 c, d; + v_expand(prod, c, d); + return c + d; +} +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 32 >> 64f +v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c); +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_fma(v_cvt_f64(a), v_cvt_f64(b), v_cvt_f64_high(a) * v_cvt_f64_high(b)); } +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_fma(v_cvt_f64(a), v_cvt_f64(b), v_fma(v_cvt_f64_high(a), v_cvt_f64_high(b), c)); } + +#define OPENCV_HAL_IMPL_SSE_LOGIC_OP(_Tpvec, suffix, not_const) \ + OPENCV_HAL_IMPL_SSE_BIN_OP(&, _Tpvec, _mm_and_##suffix) \ + OPENCV_HAL_IMPL_SSE_BIN_OP(|, _Tpvec, _mm_or_##suffix) \ + OPENCV_HAL_IMPL_SSE_BIN_OP(^, _Tpvec, _mm_xor_##suffix) \ + inline _Tpvec operator ~ (const _Tpvec& a) \ + { \ + return _Tpvec(_mm_xor_##suffix(a.val, not_const)); \ + } + +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint8x16, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int8x16, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint16x8, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int16x8, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint32x4, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int32x4, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_uint64x2, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_int64x2, si128, _mm_set1_epi32(-1)) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_float32x4, ps, _mm_castsi128_ps(_mm_set1_epi32(-1))) +OPENCV_HAL_IMPL_SSE_LOGIC_OP(v_float64x2, pd, _mm_castsi128_pd(_mm_set1_epi32(-1))) + +inline v_float32x4 v_sqrt(const v_float32x4& x) +{ return v_float32x4(_mm_sqrt_ps(x.val)); } + +inline v_float32x4 v_invsqrt(const v_float32x4& x) +{ + const __m128 _0_5 = _mm_set1_ps(0.5f), _1_5 = _mm_set1_ps(1.5f); + __m128 t = x.val; + __m128 h = _mm_mul_ps(t, _0_5); + t = _mm_rsqrt_ps(t); + t = _mm_mul_ps(t, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t, t), h))); + return v_float32x4(t); +} + +inline v_float64x2 v_sqrt(const v_float64x2& x) +{ return v_float64x2(_mm_sqrt_pd(x.val)); } + +inline v_float64x2 v_invsqrt(const v_float64x2& x) +{ + const __m128d v_1 = _mm_set1_pd(1.); + return v_float64x2(_mm_div_pd(v_1, _mm_sqrt_pd(x.val))); +} + +#define OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(_Tpuvec, _Tpsvec, func, suffix, subWidth) \ +inline _Tpuvec v_abs(const _Tpsvec& x) \ +{ return _Tpuvec(_mm_##func##_ep##suffix(x.val, _mm_sub_ep##subWidth(_mm_setzero_si128(), x.val))); } + +OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(v_uint8x16, v_int8x16, min, u8, i8) +OPENCV_HAL_IMPL_SSE_ABS_INT_FUNC(v_uint16x8, v_int16x8, max, i16, i16) +inline v_uint32x4 v_abs(const v_int32x4& x) +{ + __m128i s = _mm_srli_epi32(x.val, 31); + __m128i f = _mm_srai_epi32(x.val, 31); + return v_uint32x4(_mm_add_epi32(_mm_xor_si128(x.val, f), s)); +} +inline v_float32x4 v_abs(const v_float32x4& x) +{ return v_float32x4(_mm_and_ps(x.val, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)))); } +inline v_float64x2 v_abs(const v_float64x2& x) +{ + return v_float64x2(_mm_and_pd(x.val, + _mm_castsi128_pd(_mm_srli_epi64(_mm_set1_epi32(-1), 1)))); +} + +// TODO: exp, log, sin, cos + +#define OPENCV_HAL_IMPL_SSE_BIN_FUNC(_Tpvec, func, intrin) \ +inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} + +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_min, _mm_min_epu8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_max, _mm_max_epu8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_min, _mm_min_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_max, _mm_max_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float32x4, v_min, _mm_min_ps) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float32x4, v_max, _mm_max_ps) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float64x2, v_min, _mm_min_pd) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_float64x2, v_max, _mm_max_pd) + +inline v_int8x16 v_min(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_SSE4_1 + return v_int8x16(_mm_min_epi8(a.val, b.val)); +#else + __m128i delta = _mm_set1_epi8((char)-128); + return v_int8x16(_mm_xor_si128(delta, _mm_min_epu8(_mm_xor_si128(a.val, delta), + _mm_xor_si128(b.val, delta)))); +#endif +} +inline v_int8x16 v_max(const v_int8x16& a, const v_int8x16& b) +{ +#if CV_SSE4_1 + return v_int8x16(_mm_max_epi8(a.val, b.val)); +#else + __m128i delta = _mm_set1_epi8((char)-128); + return v_int8x16(_mm_xor_si128(delta, _mm_max_epu8(_mm_xor_si128(a.val, delta), + _mm_xor_si128(b.val, delta)))); +#endif +} +inline v_uint16x8 v_min(const v_uint16x8& a, const v_uint16x8& b) +{ +#if CV_SSE4_1 + return v_uint16x8(_mm_min_epu16(a.val, b.val)); +#else + return v_uint16x8(_mm_subs_epu16(a.val, _mm_subs_epu16(a.val, b.val))); +#endif +} +inline v_uint16x8 v_max(const v_uint16x8& a, const v_uint16x8& b) +{ +#if CV_SSE4_1 + return v_uint16x8(_mm_max_epu16(a.val, b.val)); +#else + return v_uint16x8(_mm_adds_epu16(_mm_subs_epu16(a.val, b.val), b.val)); +#endif +} +inline v_uint32x4 v_min(const v_uint32x4& a, const v_uint32x4& b) +{ +#if CV_SSE4_1 + return v_uint32x4(_mm_min_epu32(a.val, b.val)); +#else + __m128i delta = _mm_set1_epi32((int)0x80000000); + __m128i mask = _mm_cmpgt_epi32(_mm_xor_si128(a.val, delta), _mm_xor_si128(b.val, delta)); + return v_uint32x4(v_select_si128(mask, b.val, a.val)); +#endif +} +inline v_uint32x4 v_max(const v_uint32x4& a, const v_uint32x4& b) +{ +#if CV_SSE4_1 + return v_uint32x4(_mm_max_epu32(a.val, b.val)); +#else + __m128i delta = _mm_set1_epi32((int)0x80000000); + __m128i mask = _mm_cmpgt_epi32(_mm_xor_si128(a.val, delta), _mm_xor_si128(b.val, delta)); + return v_uint32x4(v_select_si128(mask, a.val, b.val)); +#endif +} +inline v_int32x4 v_min(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + return v_int32x4(_mm_min_epi32(a.val, b.val)); +#else + return v_int32x4(v_select_si128(_mm_cmpgt_epi32(a.val, b.val), b.val, a.val)); +#endif +} +inline v_int32x4 v_max(const v_int32x4& a, const v_int32x4& b) +{ +#if CV_SSE4_1 + return v_int32x4(_mm_max_epi32(a.val, b.val)); +#else + return v_int32x4(v_select_si128(_mm_cmpgt_epi32(a.val, b.val), a.val, b.val)); +#endif +} + +#define OPENCV_HAL_IMPL_SSE_INT_CMP_OP(_Tpuvec, _Tpsvec, suffix, sbit) \ +inline _Tpuvec operator == (const _Tpuvec& a, const _Tpuvec& b) \ +{ return _Tpuvec(_mm_cmpeq_##suffix(a.val, b.val)); } \ +inline _Tpuvec operator != (const _Tpuvec& a, const _Tpuvec& b) \ +{ \ + __m128i not_mask = _mm_set1_epi32(-1); \ + return _Tpuvec(_mm_xor_si128(_mm_cmpeq_##suffix(a.val, b.val), not_mask)); \ +} \ +inline _Tpsvec operator == (const _Tpsvec& a, const _Tpsvec& b) \ +{ return _Tpsvec(_mm_cmpeq_##suffix(a.val, b.val)); } \ +inline _Tpsvec operator != (const _Tpsvec& a, const _Tpsvec& b) \ +{ \ + __m128i not_mask = _mm_set1_epi32(-1); \ + return _Tpsvec(_mm_xor_si128(_mm_cmpeq_##suffix(a.val, b.val), not_mask)); \ +} \ +inline _Tpuvec operator < (const _Tpuvec& a, const _Tpuvec& b) \ +{ \ + __m128i smask = _mm_set1_##suffix(sbit); \ + return _Tpuvec(_mm_cmpgt_##suffix(_mm_xor_si128(b.val, smask), _mm_xor_si128(a.val, smask))); \ +} \ +inline _Tpuvec operator > (const _Tpuvec& a, const _Tpuvec& b) \ +{ \ + __m128i smask = _mm_set1_##suffix(sbit); \ + return _Tpuvec(_mm_cmpgt_##suffix(_mm_xor_si128(a.val, smask), _mm_xor_si128(b.val, smask))); \ +} \ +inline _Tpuvec operator <= (const _Tpuvec& a, const _Tpuvec& b) \ +{ \ + __m128i smask = _mm_set1_##suffix(sbit); \ + __m128i not_mask = _mm_set1_epi32(-1); \ + __m128i res = _mm_cmpgt_##suffix(_mm_xor_si128(a.val, smask), _mm_xor_si128(b.val, smask)); \ + return _Tpuvec(_mm_xor_si128(res, not_mask)); \ +} \ +inline _Tpuvec operator >= (const _Tpuvec& a, const _Tpuvec& b) \ +{ \ + __m128i smask = _mm_set1_##suffix(sbit); \ + __m128i not_mask = _mm_set1_epi32(-1); \ + __m128i res = _mm_cmpgt_##suffix(_mm_xor_si128(b.val, smask), _mm_xor_si128(a.val, smask)); \ + return _Tpuvec(_mm_xor_si128(res, not_mask)); \ +} \ +inline _Tpsvec operator < (const _Tpsvec& a, const _Tpsvec& b) \ +{ \ + return _Tpsvec(_mm_cmpgt_##suffix(b.val, a.val)); \ +} \ +inline _Tpsvec operator > (const _Tpsvec& a, const _Tpsvec& b) \ +{ \ + return _Tpsvec(_mm_cmpgt_##suffix(a.val, b.val)); \ +} \ +inline _Tpsvec operator <= (const _Tpsvec& a, const _Tpsvec& b) \ +{ \ + __m128i not_mask = _mm_set1_epi32(-1); \ + return _Tpsvec(_mm_xor_si128(_mm_cmpgt_##suffix(a.val, b.val), not_mask)); \ +} \ +inline _Tpsvec operator >= (const _Tpsvec& a, const _Tpsvec& b) \ +{ \ + __m128i not_mask = _mm_set1_epi32(-1); \ + return _Tpsvec(_mm_xor_si128(_mm_cmpgt_##suffix(b.val, a.val), not_mask)); \ +} + +OPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint8x16, v_int8x16, epi8, (char)-128) +OPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint16x8, v_int16x8, epi16, (short)-32768) +OPENCV_HAL_IMPL_SSE_INT_CMP_OP(v_uint32x4, v_int32x4, epi32, (int)0x80000000) + +#define OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(_Tpvec, suffix) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmpeq_##suffix(a.val, b.val)); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmpneq_##suffix(a.val, b.val)); } \ +inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmplt_##suffix(a.val, b.val)); } \ +inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmpgt_##suffix(a.val, b.val)); } \ +inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmple_##suffix(a.val, b.val)); } \ +inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmpge_##suffix(a.val, b.val)); } + +OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float32x4, ps) +OPENCV_HAL_IMPL_SSE_FLT_CMP_OP(v_float64x2, pd) + +#if CV_SSE4_1 +#define OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(_Tpvec) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(_mm_cmpeq_epi64(a.val, b.val)); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return ~(a == b); } +#else +#define OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(_Tpvec) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ __m128i cmp = _mm_cmpeq_epi32(a.val, b.val); \ + return _Tpvec(_mm_and_si128(cmp, _mm_shuffle_epi32(cmp, _MM_SHUFFLE(2, 3, 0, 1)))); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return ~(a == b); } +#endif + +OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(v_uint64x2) +OPENCV_HAL_IMPL_SSE_64BIT_CMP_OP(v_int64x2) + +inline v_float32x4 v_not_nan(const v_float32x4& a) +{ return v_float32x4(_mm_cmpord_ps(a.val, a.val)); } +inline v_float64x2 v_not_nan(const v_float64x2& a) +{ return v_float64x2(_mm_cmpord_pd(a.val, a.val)); } + +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_add_wrap, _mm_add_epi8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int8x16, v_add_wrap, _mm_add_epi8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_add_wrap, _mm_add_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_add_wrap, _mm_add_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint8x16, v_sub_wrap, _mm_sub_epi8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int8x16, v_sub_wrap, _mm_sub_epi8) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_sub_wrap, _mm_sub_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_sub_wrap, _mm_sub_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_uint16x8, v_mul_wrap, _mm_mullo_epi16) +OPENCV_HAL_IMPL_SSE_BIN_FUNC(v_int16x8, v_mul_wrap, _mm_mullo_epi16) + +inline v_uint8x16 v_mul_wrap(const v_uint8x16& a, const v_uint8x16& b) +{ + __m128i ad = _mm_srai_epi16(a.val, 8); + __m128i bd = _mm_srai_epi16(b.val, 8); + __m128i p0 = _mm_mullo_epi16(a.val, b.val); // even + __m128i p1 = _mm_slli_epi16(_mm_mullo_epi16(ad, bd), 8); // odd + const __m128i b01 = _mm_set1_epi32(0xFF00FF00); + return v_uint8x16(_v128_blendv_epi8(p0, p1, b01)); +} +inline v_int8x16 v_mul_wrap(const v_int8x16& a, const v_int8x16& b) +{ + return v_reinterpret_as_s8(v_mul_wrap(v_reinterpret_as_u8(a), v_reinterpret_as_u8(b))); +} + +/** Absolute difference **/ + +inline v_uint8x16 v_absdiff(const v_uint8x16& a, const v_uint8x16& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint16x8 v_absdiff(const v_uint16x8& a, const v_uint16x8& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint32x4 v_absdiff(const v_uint32x4& a, const v_uint32x4& b) +{ return v_max(a, b) - v_min(a, b); } + +inline v_uint8x16 v_absdiff(const v_int8x16& a, const v_int8x16& b) +{ + v_int8x16 d = v_sub_wrap(a, b); + v_int8x16 m = a < b; + return v_reinterpret_as_u8(v_sub_wrap(d ^ m, m)); +} +inline v_uint16x8 v_absdiff(const v_int16x8& a, const v_int16x8& b) +{ + return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))); +} +inline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b) +{ + v_int32x4 d = a - b; + v_int32x4 m = a < b; + return v_reinterpret_as_u32((d ^ m) - m); +} + +/** Saturating absolute difference **/ +inline v_int8x16 v_absdiffs(const v_int8x16& a, const v_int8x16& b) +{ + v_int8x16 d = a - b; + v_int8x16 m = a < b; + return (d ^ m) - m; + } +inline v_int16x8 v_absdiffs(const v_int16x8& a, const v_int16x8& b) +{ return v_max(a, b) - v_min(a, b); } + + +inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return a * b + c; +} + +inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_fma(a, b, c); +} + +inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ +#if CV_FMA3 + return v_float32x4(_mm_fmadd_ps(a.val, b.val, c.val)); +#else + return v_float32x4(_mm_add_ps(_mm_mul_ps(a.val, b.val), c.val)); +#endif +} + +inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ +#if CV_FMA3 + return v_float64x2(_mm_fmadd_pd(a.val, b.val, c.val)); +#else + return v_float64x2(_mm_add_pd(_mm_mul_pd(a.val, b.val), c.val)); +#endif +} + +#define OPENCV_HAL_IMPL_SSE_MISC_FLT_OP(_Tpvec, _Tp, _Tpreg, suffix, absmask_vec) \ +inline _Tpvec v_absdiff(const _Tpvec& a, const _Tpvec& b) \ +{ \ + _Tpreg absmask = _mm_castsi128_##suffix(absmask_vec); \ + return _Tpvec(_mm_and_##suffix(_mm_sub_##suffix(a.val, b.val), absmask)); \ +} \ +inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ \ + _Tpvec res = v_fma(a, a, b*b); \ + return _Tpvec(_mm_sqrt_##suffix(res.val)); \ +} \ +inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return v_fma(a, a, b*b); \ +} \ +inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ +{ \ + return v_fma(a, b, c); \ +} + +OPENCV_HAL_IMPL_SSE_MISC_FLT_OP(v_float32x4, float, __m128, ps, _mm_set1_epi32((int)0x7fffffff)) +OPENCV_HAL_IMPL_SSE_MISC_FLT_OP(v_float64x2, double, __m128d, pd, _mm_srli_epi64(_mm_set1_epi32(-1), 1)) + +#define OPENCV_HAL_IMPL_SSE_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, srai) \ +inline _Tpuvec operator << (const _Tpuvec& a, int imm) \ +{ \ + return _Tpuvec(_mm_slli_##suffix(a.val, imm)); \ +} \ +inline _Tpsvec operator << (const _Tpsvec& a, int imm) \ +{ \ + return _Tpsvec(_mm_slli_##suffix(a.val, imm)); \ +} \ +inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \ +{ \ + return _Tpuvec(_mm_srli_##suffix(a.val, imm)); \ +} \ +inline _Tpsvec operator >> (const _Tpsvec& a, int imm) \ +{ \ + return _Tpsvec(srai(a.val, imm)); \ +} \ +template \ +inline _Tpuvec v_shl(const _Tpuvec& a) \ +{ \ + return _Tpuvec(_mm_slli_##suffix(a.val, imm)); \ +} \ +template \ +inline _Tpsvec v_shl(const _Tpsvec& a) \ +{ \ + return _Tpsvec(_mm_slli_##suffix(a.val, imm)); \ +} \ +template \ +inline _Tpuvec v_shr(const _Tpuvec& a) \ +{ \ + return _Tpuvec(_mm_srli_##suffix(a.val, imm)); \ +} \ +template \ +inline _Tpsvec v_shr(const _Tpsvec& a) \ +{ \ + return _Tpsvec(srai(a.val, imm)); \ +} + +OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint16x8, v_int16x8, epi16, _mm_srai_epi16) +OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint32x4, v_int32x4, epi32, _mm_srai_epi32) +OPENCV_HAL_IMPL_SSE_SHIFT_OP(v_uint64x2, v_int64x2, epi64, v_srai_epi64) + +namespace hal_sse_internal +{ + template 16)), + bool is_first = (imm == 0), + bool is_half = (imm == 8), + bool is_second = (imm == 16), + bool is_other = (((imm > 0) && (imm < 8)) || ((imm > 8) && (imm < 16)))> + class v_sse_palignr_u8_class; + + template + class v_sse_palignr_u8_class; + + template + class v_sse_palignr_u8_class + { + public: + inline __m128i operator()(const __m128i& a, const __m128i&) const + { + return a; + } + }; + + template + class v_sse_palignr_u8_class + { + public: + inline __m128i operator()(const __m128i& a, const __m128i& b) const + { + return _mm_unpacklo_epi64(_mm_unpackhi_epi64(a, a), b); + } + }; + + template + class v_sse_palignr_u8_class + { + public: + inline __m128i operator()(const __m128i&, const __m128i& b) const + { + return b; + } + }; + + template + class v_sse_palignr_u8_class + { +#if CV_SSSE3 + public: + inline __m128i operator()(const __m128i& a, const __m128i& b) const + { + return _mm_alignr_epi8(b, a, imm); + } +#else + public: + inline __m128i operator()(const __m128i& a, const __m128i& b) const + { + enum { imm2 = (sizeof(__m128i) - imm) }; + return _mm_or_si128(_mm_srli_si128(a, imm), _mm_slli_si128(b, imm2)); + } +#endif + }; + + template + inline __m128i v_sse_palignr_u8(const __m128i& a, const __m128i& b) + { + CV_StaticAssert((imm >= 0) && (imm <= 16), "Invalid imm for v_sse_palignr_u8."); + return v_sse_palignr_u8_class()(a, b); + } +} + +template +inline _Tpvec v_rotate_right(const _Tpvec &a) +{ + using namespace hal_sse_internal; + enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_sse_reinterpret_as( + _mm_srli_si128( + v_sse_reinterpret_as<__m128i>(a.val), imm2))); +} + +template +inline _Tpvec v_rotate_left(const _Tpvec &a) +{ + using namespace hal_sse_internal; + enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_sse_reinterpret_as( + _mm_slli_si128( + v_sse_reinterpret_as<__m128i>(a.val), imm2))); +} + +template +inline _Tpvec v_rotate_right(const _Tpvec &a, const _Tpvec &b) +{ + using namespace hal_sse_internal; + enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_sse_reinterpret_as( + v_sse_palignr_u8( + v_sse_reinterpret_as<__m128i>(a.val), + v_sse_reinterpret_as<__m128i>(b.val)))); +} + +template +inline _Tpvec v_rotate_left(const _Tpvec &a, const _Tpvec &b) +{ + using namespace hal_sse_internal; + enum { imm2 = ((_Tpvec::nlanes - imm) * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_sse_reinterpret_as( + v_sse_palignr_u8( + v_sse_reinterpret_as<__m128i>(b.val), + v_sse_reinterpret_as<__m128i>(a.val)))); +} + +#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(_Tpvec, _Tp) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(_mm_loadu_si128((const __m128i*)ptr)); } \ +inline _Tpvec v_load_aligned(const _Tp* ptr) \ +{ return _Tpvec(_mm_load_si128((const __m128i*)ptr)); } \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ return _Tpvec(_mm_loadl_epi64((const __m128i*)ptr)); } \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ \ + return _Tpvec(_mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)ptr0), \ + _mm_loadl_epi64((const __m128i*)ptr1))); \ +} \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ _mm_storeu_si128((__m128i*)ptr, a.val); } \ +inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ +{ _mm_store_si128((__m128i*)ptr, a.val); } \ +inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ +{ _mm_stream_si128((__m128i*)ptr, a.val); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ +{ \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm_storeu_si128((__m128i*)ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm_stream_si128((__m128i*)ptr, a.val); \ + else \ + _mm_store_si128((__m128i*)ptr, a.val); \ +} \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ _mm_storel_epi64((__m128i*)ptr, a.val); } \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ _mm_storel_epi64((__m128i*)ptr, _mm_unpackhi_epi64(a.val, a.val)); } + +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint8x16, uchar) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int8x16, schar) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint16x8, ushort) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int16x8, short) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint32x4, unsigned) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int32x4, int) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_uint64x2, uint64) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INT_OP(v_int64x2, int64) + +#define OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(_Tpvec, _Tp, suffix) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(_mm_loadu_##suffix(ptr)); } \ +inline _Tpvec v_load_aligned(const _Tp* ptr) \ +{ return _Tpvec(_mm_load_##suffix(ptr)); } \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ return _Tpvec(_mm_castsi128_##suffix(_mm_loadl_epi64((const __m128i*)ptr))); } \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ \ + return _Tpvec(_mm_castsi128_##suffix( \ + _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i*)ptr0), \ + _mm_loadl_epi64((const __m128i*)ptr1)))); \ +} \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ _mm_storeu_##suffix(ptr, a.val); } \ +inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ +{ _mm_store_##suffix(ptr, a.val); } \ +inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ +{ _mm_stream_##suffix(ptr, a.val); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ +{ \ + if( mode == hal::STORE_UNALIGNED ) \ + _mm_storeu_##suffix(ptr, a.val); \ + else if( mode == hal::STORE_ALIGNED_NOCACHE ) \ + _mm_stream_##suffix(ptr, a.val); \ + else \ + _mm_store_##suffix(ptr, a.val); \ +} \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ _mm_storel_epi64((__m128i*)ptr, _mm_cast##suffix##_si128(a.val)); } \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ \ + __m128i a1 = _mm_cast##suffix##_si128(a.val); \ + _mm_storel_epi64((__m128i*)ptr, _mm_unpackhi_epi64(a1, a1)); \ +} + +OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float32x4, float, ps) +OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float64x2, double, pd) + +inline unsigned v_reduce_sum(const v_uint8x16& a) +{ + __m128i half = _mm_sad_epu8(a.val, _mm_setzero_si128()); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); +} +inline int v_reduce_sum(const v_int8x16& a) +{ + __m128i half = _mm_set1_epi8((schar)-128); + half = _mm_sad_epu8(_mm_xor_si128(a.val, half), _mm_setzero_si128()); + return _mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))) - 2048; +} +#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_16(func) \ +inline schar v_reduce_##func(const v_int8x16& a) \ +{ \ + __m128i val = a.val; \ + __m128i smask = _mm_set1_epi8((schar)-128); \ + val = _mm_xor_si128(val, smask); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,8)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,4)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,2)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,1)); \ + return (schar)_mm_cvtsi128_si32(val) ^ (schar)-128; \ +} \ +inline uchar v_reduce_##func(const v_uint8x16& a) \ +{ \ + __m128i val = a.val; \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,8)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,4)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,2)); \ + val = _mm_##func##_epu8(val, _mm_srli_si128(val,1)); \ + return (uchar)_mm_cvtsi128_si32(val); \ +} +OPENCV_HAL_IMPL_SSE_REDUCE_OP_16(max) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_16(min) + +#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(_Tpvec, scalartype, func, suffix, sbit) \ +inline scalartype v_reduce_##func(const v_##_Tpvec& a) \ +{ \ + __m128i val = a.val; \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,8)); \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,4)); \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,2)); \ + return (scalartype)_mm_cvtsi128_si32(val); \ +} \ +inline unsigned scalartype v_reduce_##func(const v_u##_Tpvec& a) \ +{ \ + __m128i val = a.val; \ + __m128i smask = _mm_set1_epi16(sbit); \ + val = _mm_xor_si128(val, smask); \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,8)); \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,4)); \ + val = _mm_##func##_##suffix(val, _mm_srli_si128(val,2)); \ + return (unsigned scalartype)(_mm_cvtsi128_si32(val) ^ sbit); \ +} +OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(int16x8, short, max, epi16, (short)-32768) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(int16x8, short, min, epi16, (short)-32768) + +#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(_Tpvec, scalartype, regtype, suffix, cast_from, cast_to, extract) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + regtype val = a.val; \ + val = _mm_add_##suffix(val, cast_to(_mm_srli_si128(cast_from(val), 8))); \ + val = _mm_add_##suffix(val, cast_to(_mm_srli_si128(cast_from(val), 4))); \ + return (scalartype)_mm_cvt##extract(val); \ +} + +#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + scalartype CV_DECL_ALIGNED(16) buf[4]; \ + v_store_aligned(buf, a); \ + scalartype s0 = scalar_func(buf[0], buf[1]); \ + scalartype s1 = scalar_func(buf[2], buf[3]); \ + return scalar_func(s0, s1); \ +} + +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_uint32x4, unsigned, __m128i, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP, si128_si32) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_int32x4, int, __m128i, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP, si128_si32) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4_SUM(v_float32x4, float, __m128, ps, _mm_castps_si128, _mm_castsi128_ps, ss_f32) + +inline int v_reduce_sum(const v_int16x8& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } +inline unsigned v_reduce_sum(const v_uint16x8& a) +{ return v_reduce_sum(v_expand_low(a) + v_expand_high(a)); } + +inline uint64 v_reduce_sum(const v_uint64x2& a) +{ + uint64 CV_DECL_ALIGNED(32) idx[2]; + v_store_aligned(idx, a); + return idx[0] + idx[1]; +} +inline int64 v_reduce_sum(const v_int64x2& a) +{ + int64 CV_DECL_ALIGNED(32) idx[2]; + v_store_aligned(idx, a); + return idx[0] + idx[1]; +} +inline double v_reduce_sum(const v_float64x2& a) +{ + double CV_DECL_ALIGNED(32) idx[2]; + v_store_aligned(idx, a); + return idx[0] + idx[1]; +} + +inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d) +{ +#if CV_SSE3 + __m128 ab = _mm_hadd_ps(a.val, b.val); + __m128 cd = _mm_hadd_ps(c.val, d.val); + return v_float32x4(_mm_hadd_ps(ab, cd)); +#else + __m128 ac = _mm_add_ps(_mm_unpacklo_ps(a.val, c.val), _mm_unpackhi_ps(a.val, c.val)); + __m128 bd = _mm_add_ps(_mm_unpacklo_ps(b.val, d.val), _mm_unpackhi_ps(b.val, d.val)); + return v_float32x4(_mm_add_ps(_mm_unpacklo_ps(ac, bd), _mm_unpackhi_ps(ac, bd))); +#endif +} + +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, max, std::max) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_uint32x4, unsigned, min, std::min) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, max, std::max) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_int32x4, int, min, std::min) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, max, std::max) +OPENCV_HAL_IMPL_SSE_REDUCE_OP_4(v_float32x4, float, min, std::min) + +inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b) +{ + __m128i half = _mm_sad_epu8(a.val, b.val); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); +} +inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b) +{ + __m128i half = _mm_set1_epi8(0x7f); + half = _mm_sad_epu8(_mm_add_epi8(a.val, half), _mm_add_epi8(b.val, half)); + return (unsigned)_mm_cvtsi128_si32(_mm_add_epi32(half, _mm_unpackhi_epi64(half, half))); +} +inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b) +{ + v_uint32x4 l, h; + v_expand(v_absdiff(a, b), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b) +{ + v_uint32x4 l, h; + v_expand(v_absdiff(a, b), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} +inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} +inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} + +inline v_uint8x16 v_popcount(const v_uint8x16& a) +{ + __m128i m1 = _mm_set1_epi32(0x55555555); + __m128i m2 = _mm_set1_epi32(0x33333333); + __m128i m4 = _mm_set1_epi32(0x0f0f0f0f); + __m128i p = a.val; + p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 1), m1), _mm_and_si128(p, m1)); + p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 2), m2), _mm_and_si128(p, m2)); + p = _mm_add_epi32(_mm_and_si128(_mm_srli_epi32(p, 4), m4), _mm_and_si128(p, m4)); + return v_uint8x16(p); +} +inline v_uint16x8 v_popcount(const v_uint16x8& a) +{ + v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + return v_reinterpret_as_u16(p) & v_setall_u16(0x00ff); +} +inline v_uint32x4 v_popcount(const v_uint32x4& a) +{ + v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + p += v_rotate_right<2>(p); + return v_reinterpret_as_u32(p) & v_setall_u32(0x000000ff); +} +inline v_uint64x2 v_popcount(const v_uint64x2& a) +{ + return v_uint64x2(_mm_sad_epu8(v_popcount(v_reinterpret_as_u8(a)).val, _mm_setzero_si128())); +} +inline v_uint8x16 v_popcount(const v_int8x16& a) +{ return v_popcount(v_reinterpret_as_u8(a)); } +inline v_uint16x8 v_popcount(const v_int16x8& a) +{ return v_popcount(v_reinterpret_as_u16(a)); } +inline v_uint32x4 v_popcount(const v_int32x4& a) +{ return v_popcount(v_reinterpret_as_u32(a)); } +inline v_uint64x2 v_popcount(const v_int64x2& a) +{ return v_popcount(v_reinterpret_as_u64(a)); } + +#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(_Tpvec, suffix, cast_op, allmask) \ +inline int v_signmask(const _Tpvec& a) { return _mm_movemask_##suffix(cast_op(a.val)); } \ +inline bool v_check_all(const _Tpvec& a) { return _mm_movemask_##suffix(cast_op(a.val)) == allmask; } \ +inline bool v_check_any(const _Tpvec& a) { return _mm_movemask_##suffix(cast_op(a.val)) != 0; } +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint8x16, epi8, OPENCV_HAL_NOP, 65535) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int8x16, epi8, OPENCV_HAL_NOP, 65535) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint32x4, ps, _mm_castsi128_ps, 15) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int32x4, ps, _mm_castsi128_ps, 15) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_uint64x2, pd, _mm_castsi128_pd, 3) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_int64x2, pd, _mm_castsi128_pd, 3) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float32x4, ps, OPENCV_HAL_NOP, 15) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS(v_float64x2, pd, OPENCV_HAL_NOP, 3) + +#define OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(_Tpvec) \ +inline int v_signmask(const _Tpvec& a) { return _mm_movemask_epi8(_mm_packs_epi16(a.val, a.val)) & 255; } \ +inline bool v_check_all(const _Tpvec& a) { return (_mm_movemask_epi8(a.val) & 0xaaaa) == 0xaaaa; } \ +inline bool v_check_any(const _Tpvec& a) { return (_mm_movemask_epi8(a.val) & 0xaaaa) != 0; } +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(v_uint16x8) +OPENCV_HAL_IMPL_SSE_CHECK_SIGNS_SHORT(v_int16x8) + +inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } + +#if CV_SSE4_1 +#define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, cast_ret, cast, suffix) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(cast_ret(_mm_blendv_##suffix(cast(b.val), cast(a.val), cast(mask.val)))); \ +} + +OPENCV_HAL_IMPL_SSE_SELECT(v_uint8x16, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8) +OPENCV_HAL_IMPL_SSE_SELECT(v_int8x16, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8) +OPENCV_HAL_IMPL_SSE_SELECT(v_uint16x8, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8) +OPENCV_HAL_IMPL_SSE_SELECT(v_int16x8, OPENCV_HAL_NOP, OPENCV_HAL_NOP, epi8) +OPENCV_HAL_IMPL_SSE_SELECT(v_uint32x4, _mm_castps_si128, _mm_castsi128_ps, ps) +OPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, _mm_castps_si128, _mm_castsi128_ps, ps) +// OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, TBD, TBD, pd) +// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, TBD, TBD, ps) +OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, OPENCV_HAL_NOP, OPENCV_HAL_NOP, ps) +OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, OPENCV_HAL_NOP, OPENCV_HAL_NOP, pd) + +#else // CV_SSE4_1 + +#define OPENCV_HAL_IMPL_SSE_SELECT(_Tpvec, suffix) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(_mm_xor_##suffix(b.val, _mm_and_##suffix(_mm_xor_##suffix(b.val, a.val), mask.val))); \ +} + +OPENCV_HAL_IMPL_SSE_SELECT(v_uint8x16, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_int8x16, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_uint16x8, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_int16x8, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_uint32x4, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_int32x4, si128) +// OPENCV_HAL_IMPL_SSE_SELECT(v_uint64x2, si128) +// OPENCV_HAL_IMPL_SSE_SELECT(v_int64x2, si128) +OPENCV_HAL_IMPL_SSE_SELECT(v_float32x4, ps) +OPENCV_HAL_IMPL_SSE_SELECT(v_float64x2, pd) +#endif + +/* Expand */ +#define OPENCV_HAL_IMPL_SSE_EXPAND(_Tpvec, _Tpwvec, _Tp, intrin) \ + inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ + { \ + b0.val = intrin(a.val); \ + b1.val = __CV_CAT(intrin, _high)(a.val); \ + } \ + inline _Tpwvec v_expand_low(const _Tpvec& a) \ + { return _Tpwvec(intrin(a.val)); } \ + inline _Tpwvec v_expand_high(const _Tpvec& a) \ + { return _Tpwvec(__CV_CAT(intrin, _high)(a.val)); } \ + inline _Tpwvec v_load_expand(const _Tp* ptr) \ + { \ + __m128i a = _mm_loadl_epi64((const __m128i*)ptr); \ + return _Tpwvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_SSE_EXPAND(v_uint8x16, v_uint16x8, uchar, _v128_cvtepu8_epi16) +OPENCV_HAL_IMPL_SSE_EXPAND(v_int8x16, v_int16x8, schar, _v128_cvtepi8_epi16) +OPENCV_HAL_IMPL_SSE_EXPAND(v_uint16x8, v_uint32x4, ushort, _v128_cvtepu16_epi32) +OPENCV_HAL_IMPL_SSE_EXPAND(v_int16x8, v_int32x4, short, _v128_cvtepi16_epi32) +OPENCV_HAL_IMPL_SSE_EXPAND(v_uint32x4, v_uint64x2, unsigned, _v128_cvtepu32_epi64) +OPENCV_HAL_IMPL_SSE_EXPAND(v_int32x4, v_int64x2, int, _v128_cvtepi32_epi64) + +#define OPENCV_HAL_IMPL_SSE_EXPAND_Q(_Tpvec, _Tp, intrin) \ + inline _Tpvec v_load_expand_q(const _Tp* ptr) \ + { \ + __m128i a = _mm_cvtsi32_si128(*(const int*)ptr); \ + return _Tpvec(intrin(a)); \ + } + +OPENCV_HAL_IMPL_SSE_EXPAND_Q(v_uint32x4, uchar, _v128_cvtepu8_epi32) +OPENCV_HAL_IMPL_SSE_EXPAND_Q(v_int32x4, schar, _v128_cvtepi8_epi32) + +#define OPENCV_HAL_IMPL_SSE_UNPACKS(_Tpvec, suffix, cast_from, cast_to) \ +inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) \ +{ \ + b0.val = _mm_unpacklo_##suffix(a0.val, a1.val); \ + b1.val = _mm_unpackhi_##suffix(a0.val, a1.val); \ +} \ +inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ +{ \ + __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \ + return _Tpvec(cast_to(_mm_unpacklo_epi64(a1, b1))); \ +} \ +inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ +{ \ + __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \ + return _Tpvec(cast_to(_mm_unpackhi_epi64(a1, b1))); \ +} \ +inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) \ +{ \ + __m128i a1 = cast_from(a.val), b1 = cast_from(b.val); \ + c.val = cast_to(_mm_unpacklo_epi64(a1, b1)); \ + d.val = cast_to(_mm_unpackhi_epi64(a1, b1)); \ +} + +OPENCV_HAL_IMPL_SSE_UNPACKS(v_uint8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_int8x16, epi8, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_uint16x8, epi16, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_int16x8, epi16, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_uint32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps) +OPENCV_HAL_IMPL_SSE_UNPACKS(v_float64x2, pd, _mm_castpd_si128, _mm_castsi128_pd) + +inline v_uint8x16 v_reverse(const v_uint8x16 &a) +{ +#if CV_SSSE3 + static const __m128i perm = _mm_setr_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return v_uint8x16(_mm_shuffle_epi8(a.val, perm)); +#else + uchar CV_DECL_ALIGNED(32) d[16]; + v_store_aligned(d, a); + return v_uint8x16(d[15], d[14], d[13], d[12], d[11], d[10], d[9], d[8], d[7], d[6], d[5], d[4], d[3], d[2], d[1], d[0]); +#endif +} + +inline v_int8x16 v_reverse(const v_int8x16 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x8 v_reverse(const v_uint16x8 &a) +{ +#if CV_SSSE3 + static const __m128i perm = _mm_setr_epi8(14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1); + return v_uint16x8(_mm_shuffle_epi8(a.val, perm)); +#else + __m128i r = _mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 1, 2, 3)); + r = _mm_shufflelo_epi16(r, _MM_SHUFFLE(2, 3, 0, 1)); + r = _mm_shufflehi_epi16(r, _MM_SHUFFLE(2, 3, 0, 1)); + return v_uint16x8(r); +#endif +} + +inline v_int16x8 v_reverse(const v_int16x8 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x4 v_reverse(const v_uint32x4 &a) +{ + return v_uint32x4(_mm_shuffle_epi32(a.val, _MM_SHUFFLE(0, 1, 2, 3))); +} + +inline v_int32x4 v_reverse(const v_int32x4 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x4 v_reverse(const v_float32x4 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x2 v_reverse(const v_uint64x2 &a) +{ + return v_uint64x2(_mm_shuffle_epi32(a.val, _MM_SHUFFLE(1, 0, 3, 2))); +} + +inline v_int64x2 v_reverse(const v_int64x2 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x2 v_reverse(const v_float64x2 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + +template +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) +{ + return v_rotate_right(a, b); +} + +inline v_int32x4 v_round(const v_float32x4& a) +{ return v_int32x4(_mm_cvtps_epi32(a.val)); } + +inline v_int32x4 v_floor(const v_float32x4& a) +{ + __m128i a1 = _mm_cvtps_epi32(a.val); + __m128i mask = _mm_castps_si128(_mm_cmpgt_ps(_mm_cvtepi32_ps(a1), a.val)); + return v_int32x4(_mm_add_epi32(a1, mask)); +} + +inline v_int32x4 v_ceil(const v_float32x4& a) +{ + __m128i a1 = _mm_cvtps_epi32(a.val); + __m128i mask = _mm_castps_si128(_mm_cmpgt_ps(a.val, _mm_cvtepi32_ps(a1))); + return v_int32x4(_mm_sub_epi32(a1, mask)); +} + +inline v_int32x4 v_trunc(const v_float32x4& a) +{ return v_int32x4(_mm_cvttps_epi32(a.val)); } + +inline v_int32x4 v_round(const v_float64x2& a) +{ return v_int32x4(_mm_cvtpd_epi32(a.val)); } + +inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) +{ + __m128i ai = _mm_cvtpd_epi32(a.val), bi = _mm_cvtpd_epi32(b.val); + return v_int32x4(_mm_unpacklo_epi64(ai, bi)); +} + +inline v_int32x4 v_floor(const v_float64x2& a) +{ + __m128i a1 = _mm_cvtpd_epi32(a.val); + __m128i mask = _mm_castpd_si128(_mm_cmpgt_pd(_mm_cvtepi32_pd(a1), a.val)); + mask = _mm_srli_si128(_mm_slli_si128(mask, 4), 8); // m0 m0 m1 m1 => m0 m1 0 0 + return v_int32x4(_mm_add_epi32(a1, mask)); +} + +inline v_int32x4 v_ceil(const v_float64x2& a) +{ + __m128i a1 = _mm_cvtpd_epi32(a.val); + __m128i mask = _mm_castpd_si128(_mm_cmpgt_pd(a.val, _mm_cvtepi32_pd(a1))); + mask = _mm_srli_si128(_mm_slli_si128(mask, 4), 8); // m0 m0 m1 m1 => m0 m1 0 0 + return v_int32x4(_mm_sub_epi32(a1, mask)); +} + +inline v_int32x4 v_trunc(const v_float64x2& a) +{ return v_int32x4(_mm_cvttpd_epi32(a.val)); } + +#define OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(_Tpvec, suffix, cast_from, cast_to) \ +inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, \ + _Tpvec& b2, _Tpvec& b3) \ +{ \ + __m128i t0 = cast_from(_mm_unpacklo_##suffix(a0.val, a1.val)); \ + __m128i t1 = cast_from(_mm_unpacklo_##suffix(a2.val, a3.val)); \ + __m128i t2 = cast_from(_mm_unpackhi_##suffix(a0.val, a1.val)); \ + __m128i t3 = cast_from(_mm_unpackhi_##suffix(a2.val, a3.val)); \ +\ + b0.val = cast_to(_mm_unpacklo_epi64(t0, t1)); \ + b1.val = cast_to(_mm_unpackhi_epi64(t0, t1)); \ + b2.val = cast_to(_mm_unpacklo_epi64(t2, t3)); \ + b3.val = cast_to(_mm_unpackhi_epi64(t2, t3)); \ +} + +OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_uint32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) +OPENCV_HAL_IMPL_SSE_TRANSPOSE4x4(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps) + +// load deinterleave +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b) +{ + __m128i t00 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16)); + + __m128i t10 = _mm_unpacklo_epi8(t00, t01); + __m128i t11 = _mm_unpackhi_epi8(t00, t01); + + __m128i t20 = _mm_unpacklo_epi8(t10, t11); + __m128i t21 = _mm_unpackhi_epi8(t10, t11); + + __m128i t30 = _mm_unpacklo_epi8(t20, t21); + __m128i t31 = _mm_unpackhi_epi8(t20, t21); + + a.val = _mm_unpacklo_epi8(t30, t31); + b.val = _mm_unpackhi_epi8(t30, t31); +} + +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c) +{ +#if CV_SSE4_1 + const __m128i m0 = _mm_setr_epi8(0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0); + const __m128i m1 = _mm_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0); + __m128i s0 = _mm_loadu_si128((const __m128i*)ptr); + __m128i s1 = _mm_loadu_si128((const __m128i*)(ptr + 16)); + __m128i s2 = _mm_loadu_si128((const __m128i*)(ptr + 32)); + __m128i a0 = _mm_blendv_epi8(_mm_blendv_epi8(s0, s1, m0), s2, m1); + __m128i b0 = _mm_blendv_epi8(_mm_blendv_epi8(s1, s2, m0), s0, m1); + __m128i c0 = _mm_blendv_epi8(_mm_blendv_epi8(s2, s0, m0), s1, m1); + const __m128i sh_b = _mm_setr_epi8(0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14, 1, 4, 7, 10, 13); + const __m128i sh_g = _mm_setr_epi8(1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, 5, 8, 11, 14); + const __m128i sh_r = _mm_setr_epi8(2, 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15); + a0 = _mm_shuffle_epi8(a0, sh_b); + b0 = _mm_shuffle_epi8(b0, sh_g); + c0 = _mm_shuffle_epi8(c0, sh_r); + a.val = a0; + b.val = b0; + c.val = c0; +#elif CV_SSSE3 + const __m128i m0 = _mm_setr_epi8(0, 3, 6, 9, 12, 15, 1, 4, 7, 10, 13, 2, 5, 8, 11, 14); + const __m128i m1 = _mm_alignr_epi8(m0, m0, 11); + const __m128i m2 = _mm_alignr_epi8(m0, m0, 6); + + __m128i t0 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 16)); + __m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 32)); + + __m128i s0 = _mm_shuffle_epi8(t0, m0); + __m128i s1 = _mm_shuffle_epi8(t1, m1); + __m128i s2 = _mm_shuffle_epi8(t2, m2); + + t0 = _mm_alignr_epi8(s1, _mm_slli_si128(s0, 10), 5); + a.val = _mm_alignr_epi8(s2, t0, 5); + + t1 = _mm_alignr_epi8(_mm_srli_si128(s1, 5), _mm_slli_si128(s0, 5), 6); + b.val = _mm_alignr_epi8(_mm_srli_si128(s2, 5), t1, 5); + + t2 = _mm_alignr_epi8(_mm_srli_si128(s2, 10), s1, 11); + c.val = _mm_alignr_epi8(t2, s0, 11); +#else + __m128i t00 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 16)); + __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 32)); + + __m128i t10 = _mm_unpacklo_epi8(t00, _mm_unpackhi_epi64(t01, t01)); + __m128i t11 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t00, t00), t02); + __m128i t12 = _mm_unpacklo_epi8(t01, _mm_unpackhi_epi64(t02, t02)); + + __m128i t20 = _mm_unpacklo_epi8(t10, _mm_unpackhi_epi64(t11, t11)); + __m128i t21 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t10, t10), t12); + __m128i t22 = _mm_unpacklo_epi8(t11, _mm_unpackhi_epi64(t12, t12)); + + __m128i t30 = _mm_unpacklo_epi8(t20, _mm_unpackhi_epi64(t21, t21)); + __m128i t31 = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t20, t20), t22); + __m128i t32 = _mm_unpacklo_epi8(t21, _mm_unpackhi_epi64(t22, t22)); + + a.val = _mm_unpacklo_epi8(t30, _mm_unpackhi_epi64(t31, t31)); + b.val = _mm_unpacklo_epi8(_mm_unpackhi_epi64(t30, t30), t32); + c.val = _mm_unpacklo_epi8(t31, _mm_unpackhi_epi64(t32, t32)); +#endif +} + +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d) +{ + __m128i u0 = _mm_loadu_si128((const __m128i*)ptr); // a0 b0 c0 d0 a1 b1 c1 d1 ... + __m128i u1 = _mm_loadu_si128((const __m128i*)(ptr + 16)); // a4 b4 c4 d4 ... + __m128i u2 = _mm_loadu_si128((const __m128i*)(ptr + 32)); // a8 b8 c8 d8 ... + __m128i u3 = _mm_loadu_si128((const __m128i*)(ptr + 48)); // a12 b12 c12 d12 ... + + __m128i v0 = _mm_unpacklo_epi8(u0, u2); // a0 a8 b0 b8 ... + __m128i v1 = _mm_unpackhi_epi8(u0, u2); // a2 a10 b2 b10 ... + __m128i v2 = _mm_unpacklo_epi8(u1, u3); // a4 a12 b4 b12 ... + __m128i v3 = _mm_unpackhi_epi8(u1, u3); // a6 a14 b6 b14 ... + + u0 = _mm_unpacklo_epi8(v0, v2); // a0 a4 a8 a12 ... + u1 = _mm_unpacklo_epi8(v1, v3); // a2 a6 a10 a14 ... + u2 = _mm_unpackhi_epi8(v0, v2); // a1 a5 a9 a13 ... + u3 = _mm_unpackhi_epi8(v1, v3); // a3 a7 a11 a15 ... + + v0 = _mm_unpacklo_epi8(u0, u1); // a0 a2 a4 a6 ... + v1 = _mm_unpacklo_epi8(u2, u3); // a1 a3 a5 a7 ... + v2 = _mm_unpackhi_epi8(u0, u1); // c0 c2 c4 c6 ... + v3 = _mm_unpackhi_epi8(u2, u3); // c1 c3 c5 c7 ... + + a.val = _mm_unpacklo_epi8(v0, v1); + b.val = _mm_unpackhi_epi8(v0, v1); + c.val = _mm_unpacklo_epi8(v2, v3); + d.val = _mm_unpackhi_epi8(v2, v3); +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b) +{ + __m128i v0 = _mm_loadu_si128((__m128i*)(ptr)); // a0 b0 a1 b1 a2 b2 a3 b3 + __m128i v1 = _mm_loadu_si128((__m128i*)(ptr + 8)); // a4 b4 a5 b5 a6 b6 a7 b7 + + __m128i v2 = _mm_unpacklo_epi16(v0, v1); // a0 a4 b0 b4 a1 a5 b1 b5 + __m128i v3 = _mm_unpackhi_epi16(v0, v1); // a2 a6 b2 b6 a3 a7 b3 b7 + __m128i v4 = _mm_unpacklo_epi16(v2, v3); // a0 a2 a4 a6 b0 b2 b4 b6 + __m128i v5 = _mm_unpackhi_epi16(v2, v3); // a1 a3 a5 a7 b1 b3 b5 b7 + + a.val = _mm_unpacklo_epi16(v4, v5); // a0 a1 a2 a3 a4 a5 a6 a7 + b.val = _mm_unpackhi_epi16(v4, v5); // b0 b1 ab b3 b4 b5 b6 b7 +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c) +{ +#if CV_SSE4_1 + __m128i v0 = _mm_loadu_si128((__m128i*)(ptr)); + __m128i v1 = _mm_loadu_si128((__m128i*)(ptr + 8)); + __m128i v2 = _mm_loadu_si128((__m128i*)(ptr + 16)); + __m128i a0 = _mm_blend_epi16(_mm_blend_epi16(v0, v1, 0x92), v2, 0x24); + __m128i b0 = _mm_blend_epi16(_mm_blend_epi16(v2, v0, 0x92), v1, 0x24); + __m128i c0 = _mm_blend_epi16(_mm_blend_epi16(v1, v2, 0x92), v0, 0x24); + + const __m128i sh_a = _mm_setr_epi8(0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11); + const __m128i sh_b = _mm_setr_epi8(2, 3, 8, 9, 14, 15, 4, 5, 10, 11, 0, 1, 6, 7, 12, 13); + const __m128i sh_c = _mm_setr_epi8(4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15); + a0 = _mm_shuffle_epi8(a0, sh_a); + b0 = _mm_shuffle_epi8(b0, sh_b); + c0 = _mm_shuffle_epi8(c0, sh_c); + + a.val = a0; + b.val = b0; + c.val = c0; +#else + __m128i t00 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 8)); + __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 16)); + + __m128i t10 = _mm_unpacklo_epi16(t00, _mm_unpackhi_epi64(t01, t01)); + __m128i t11 = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t00, t00), t02); + __m128i t12 = _mm_unpacklo_epi16(t01, _mm_unpackhi_epi64(t02, t02)); + + __m128i t20 = _mm_unpacklo_epi16(t10, _mm_unpackhi_epi64(t11, t11)); + __m128i t21 = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t10, t10), t12); + __m128i t22 = _mm_unpacklo_epi16(t11, _mm_unpackhi_epi64(t12, t12)); + + a.val = _mm_unpacklo_epi16(t20, _mm_unpackhi_epi64(t21, t21)); + b.val = _mm_unpacklo_epi16(_mm_unpackhi_epi64(t20, t20), t22); + c.val = _mm_unpacklo_epi16(t21, _mm_unpackhi_epi64(t22, t22)); +#endif +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c, v_uint16x8& d) +{ + __m128i u0 = _mm_loadu_si128((const __m128i*)ptr); // a0 b0 c0 d0 a1 b1 c1 d1 + __m128i u1 = _mm_loadu_si128((const __m128i*)(ptr + 8)); // a2 b2 c2 d2 ... + __m128i u2 = _mm_loadu_si128((const __m128i*)(ptr + 16)); // a4 b4 c4 d4 ... + __m128i u3 = _mm_loadu_si128((const __m128i*)(ptr + 24)); // a6 b6 c6 d6 ... + + __m128i v0 = _mm_unpacklo_epi16(u0, u2); // a0 a4 b0 b4 ... + __m128i v1 = _mm_unpackhi_epi16(u0, u2); // a1 a5 b1 b5 ... + __m128i v2 = _mm_unpacklo_epi16(u1, u3); // a2 a6 b2 b6 ... + __m128i v3 = _mm_unpackhi_epi16(u1, u3); // a3 a7 b3 b7 ... + + u0 = _mm_unpacklo_epi16(v0, v2); // a0 a2 a4 a6 ... + u1 = _mm_unpacklo_epi16(v1, v3); // a1 a3 a5 a7 ... + u2 = _mm_unpackhi_epi16(v0, v2); // c0 c2 c4 c6 ... + u3 = _mm_unpackhi_epi16(v1, v3); // c1 c3 c5 c7 ... + + a.val = _mm_unpacklo_epi16(u0, u1); + b.val = _mm_unpackhi_epi16(u0, u1); + c.val = _mm_unpacklo_epi16(u2, u3); + d.val = _mm_unpackhi_epi16(u2, u3); +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b) +{ + __m128i v0 = _mm_loadu_si128((__m128i*)(ptr)); // a0 b0 a1 b1 + __m128i v1 = _mm_loadu_si128((__m128i*)(ptr + 4)); // a2 b2 a3 b3 + + __m128i v2 = _mm_unpacklo_epi32(v0, v1); // a0 a2 b0 b2 + __m128i v3 = _mm_unpackhi_epi32(v0, v1); // a1 a3 b1 b3 + + a.val = _mm_unpacklo_epi32(v2, v3); // a0 a1 a2 a3 + b.val = _mm_unpackhi_epi32(v2, v3); // b0 b1 ab b3 +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c) +{ + __m128i t00 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t01 = _mm_loadu_si128((const __m128i*)(ptr + 4)); + __m128i t02 = _mm_loadu_si128((const __m128i*)(ptr + 8)); + + __m128i t10 = _mm_unpacklo_epi32(t00, _mm_unpackhi_epi64(t01, t01)); + __m128i t11 = _mm_unpacklo_epi32(_mm_unpackhi_epi64(t00, t00), t02); + __m128i t12 = _mm_unpacklo_epi32(t01, _mm_unpackhi_epi64(t02, t02)); + + a.val = _mm_unpacklo_epi32(t10, _mm_unpackhi_epi64(t11, t11)); + b.val = _mm_unpacklo_epi32(_mm_unpackhi_epi64(t10, t10), t12); + c.val = _mm_unpacklo_epi32(t11, _mm_unpackhi_epi64(t12, t12)); +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c, v_uint32x4& d) +{ + v_uint32x4 s0(_mm_loadu_si128((const __m128i*)ptr)); // a0 b0 c0 d0 + v_uint32x4 s1(_mm_loadu_si128((const __m128i*)(ptr + 4))); // a1 b1 c1 d1 + v_uint32x4 s2(_mm_loadu_si128((const __m128i*)(ptr + 8))); // a2 b2 c2 d2 + v_uint32x4 s3(_mm_loadu_si128((const __m128i*)(ptr + 12))); // a3 b3 c3 d3 + + v_transpose4x4(s0, s1, s2, s3, a, b, c, d); +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b) +{ + __m128 u0 = _mm_loadu_ps(ptr); // a0 b0 a1 b1 + __m128 u1 = _mm_loadu_ps((ptr + 4)); // a2 b2 a3 b3 + + a.val = _mm_shuffle_ps(u0, u1, _MM_SHUFFLE(2, 0, 2, 0)); // a0 a1 a2 a3 + b.val = _mm_shuffle_ps(u0, u1, _MM_SHUFFLE(3, 1, 3, 1)); // b0 b1 ab b3 +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c) +{ + __m128 t0 = _mm_loadu_ps(ptr + 0); + __m128 t1 = _mm_loadu_ps(ptr + 4); + __m128 t2 = _mm_loadu_ps(ptr + 8); + + __m128 at12 = _mm_shuffle_ps(t1, t2, _MM_SHUFFLE(0, 1, 0, 2)); + a.val = _mm_shuffle_ps(t0, at12, _MM_SHUFFLE(2, 0, 3, 0)); + + __m128 bt01 = _mm_shuffle_ps(t0, t1, _MM_SHUFFLE(0, 0, 0, 1)); + __m128 bt12 = _mm_shuffle_ps(t1, t2, _MM_SHUFFLE(0, 2, 0, 3)); + b.val = _mm_shuffle_ps(bt01, bt12, _MM_SHUFFLE(2, 0, 2, 0)); + + __m128 ct01 = _mm_shuffle_ps(t0, t1, _MM_SHUFFLE(0, 1, 0, 2)); + c.val = _mm_shuffle_ps(ct01, t2, _MM_SHUFFLE(3, 0, 2, 0)); +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c, v_float32x4& d) +{ + __m128 t0 = _mm_loadu_ps(ptr + 0); + __m128 t1 = _mm_loadu_ps(ptr + 4); + __m128 t2 = _mm_loadu_ps(ptr + 8); + __m128 t3 = _mm_loadu_ps(ptr + 12); + __m128 t02lo = _mm_unpacklo_ps(t0, t2); + __m128 t13lo = _mm_unpacklo_ps(t1, t3); + __m128 t02hi = _mm_unpackhi_ps(t0, t2); + __m128 t13hi = _mm_unpackhi_ps(t1, t3); + a.val = _mm_unpacklo_ps(t02lo, t13lo); + b.val = _mm_unpackhi_ps(t02lo, t13lo); + c.val = _mm_unpacklo_ps(t02hi, t13hi); + d.val = _mm_unpackhi_ps(t02hi, t13hi); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b) +{ + __m128i t0 = _mm_loadu_si128((const __m128i*)ptr); + __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 2)); + + a = v_uint64x2(_mm_unpacklo_epi64(t0, t1)); + b = v_uint64x2(_mm_unpackhi_epi64(t0, t1)); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b, v_uint64x2& c) +{ + __m128i t0 = _mm_loadu_si128((const __m128i*)ptr); // a0, b0 + __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 2)); // c0, a1 + __m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 4)); // b1, c1 + + t1 = _mm_shuffle_epi32(t1, 0x4e); // a1, c0 + + a = v_uint64x2(_mm_unpacklo_epi64(t0, t1)); + b = v_uint64x2(_mm_unpacklo_epi64(_mm_unpackhi_epi64(t0, t0), t2)); + c = v_uint64x2(_mm_unpackhi_epi64(t1, t2)); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, + v_uint64x2& b, v_uint64x2& c, v_uint64x2& d) +{ + __m128i t0 = _mm_loadu_si128((const __m128i*)ptr); // a0 b0 + __m128i t1 = _mm_loadu_si128((const __m128i*)(ptr + 2)); // c0 d0 + __m128i t2 = _mm_loadu_si128((const __m128i*)(ptr + 4)); // a1 b1 + __m128i t3 = _mm_loadu_si128((const __m128i*)(ptr + 6)); // c1 d1 + + a = v_uint64x2(_mm_unpacklo_epi64(t0, t2)); + b = v_uint64x2(_mm_unpackhi_epi64(t0, t2)); + c = v_uint64x2(_mm_unpacklo_epi64(t1, t3)); + d = v_uint64x2(_mm_unpackhi_epi64(t1, t3)); +} + +// store interleave + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi8(a.val, b.val); + __m128i v1 = _mm_unpackhi_epi8(a.val, b.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 16), v1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 16), v1); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 16), v1); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + const v_uint8x16& c, hal::StoreMode mode = hal::STORE_UNALIGNED) +{ +#if CV_SSE4_1 + const __m128i sh_a = _mm_setr_epi8(0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, 5); + const __m128i sh_b = _mm_setr_epi8(5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10); + const __m128i sh_c = _mm_setr_epi8(10, 5, 0, 11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15); + __m128i a0 = _mm_shuffle_epi8(a.val, sh_a); + __m128i b0 = _mm_shuffle_epi8(b.val, sh_b); + __m128i c0 = _mm_shuffle_epi8(c.val, sh_c); + + const __m128i m0 = _mm_setr_epi8(0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0); + const __m128i m1 = _mm_setr_epi8(0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0); + __m128i v0 = _mm_blendv_epi8(_mm_blendv_epi8(a0, b0, m1), c0, m0); + __m128i v1 = _mm_blendv_epi8(_mm_blendv_epi8(b0, c0, m1), a0, m0); + __m128i v2 = _mm_blendv_epi8(_mm_blendv_epi8(c0, a0, m1), b0, m0); +#elif CV_SSSE3 + const __m128i m0 = _mm_setr_epi8(0, 6, 11, 1, 7, 12, 2, 8, 13, 3, 9, 14, 4, 10, 15, 5); + const __m128i m1 = _mm_setr_epi8(5, 11, 0, 6, 12, 1, 7, 13, 2, 8, 14, 3, 9, 15, 4, 10); + const __m128i m2 = _mm_setr_epi8(10, 0, 5, 11, 1, 6, 12, 2, 7, 13, 3, 8, 14, 4, 9, 15); + + __m128i t0 = _mm_alignr_epi8(b.val, _mm_slli_si128(a.val, 10), 5); + t0 = _mm_alignr_epi8(c.val, t0, 5); + __m128i v0 = _mm_shuffle_epi8(t0, m0); + + __m128i t1 = _mm_alignr_epi8(_mm_srli_si128(b.val, 5), _mm_slli_si128(a.val, 5), 6); + t1 = _mm_alignr_epi8(_mm_srli_si128(c.val, 5), t1, 5); + __m128i v1 = _mm_shuffle_epi8(t1, m1); + + __m128i t2 = _mm_alignr_epi8(_mm_srli_si128(c.val, 10), b.val, 11); + t2 = _mm_alignr_epi8(t2, a.val, 11); + __m128i v2 = _mm_shuffle_epi8(t2, m2); +#else + __m128i z = _mm_setzero_si128(); + __m128i ab0 = _mm_unpacklo_epi8(a.val, b.val); + __m128i ab1 = _mm_unpackhi_epi8(a.val, b.val); + __m128i c0 = _mm_unpacklo_epi8(c.val, z); + __m128i c1 = _mm_unpackhi_epi8(c.val, z); + + __m128i p00 = _mm_unpacklo_epi16(ab0, c0); + __m128i p01 = _mm_unpackhi_epi16(ab0, c0); + __m128i p02 = _mm_unpacklo_epi16(ab1, c1); + __m128i p03 = _mm_unpackhi_epi16(ab1, c1); + + __m128i p10 = _mm_unpacklo_epi32(p00, p01); + __m128i p11 = _mm_unpackhi_epi32(p00, p01); + __m128i p12 = _mm_unpacklo_epi32(p02, p03); + __m128i p13 = _mm_unpackhi_epi32(p02, p03); + + __m128i p20 = _mm_unpacklo_epi64(p10, p11); + __m128i p21 = _mm_unpackhi_epi64(p10, p11); + __m128i p22 = _mm_unpacklo_epi64(p12, p13); + __m128i p23 = _mm_unpackhi_epi64(p12, p13); + + p20 = _mm_slli_si128(p20, 1); + p22 = _mm_slli_si128(p22, 1); + + __m128i p30 = _mm_slli_epi64(_mm_unpacklo_epi32(p20, p21), 8); + __m128i p31 = _mm_srli_epi64(_mm_unpackhi_epi32(p20, p21), 8); + __m128i p32 = _mm_slli_epi64(_mm_unpacklo_epi32(p22, p23), 8); + __m128i p33 = _mm_srli_epi64(_mm_unpackhi_epi32(p22, p23), 8); + + __m128i p40 = _mm_unpacklo_epi64(p30, p31); + __m128i p41 = _mm_unpackhi_epi64(p30, p31); + __m128i p42 = _mm_unpacklo_epi64(p32, p33); + __m128i p43 = _mm_unpackhi_epi64(p32, p33); + + __m128i v0 = _mm_or_si128(_mm_srli_si128(p40, 2), _mm_slli_si128(p41, 10)); + __m128i v1 = _mm_or_si128(_mm_srli_si128(p41, 6), _mm_slli_si128(p42, 6)); + __m128i v2 = _mm_or_si128(_mm_srli_si128(p42, 10), _mm_slli_si128(p43, 2)); +#endif + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 16), v1); + _mm_stream_si128((__m128i*)(ptr + 32), v2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 16), v1); + _mm_store_si128((__m128i*)(ptr + 32), v2); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 16), v1); + _mm_storeu_si128((__m128i*)(ptr + 32), v2); + } +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + const v_uint8x16& c, const v_uint8x16& d, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + // a0 a1 a2 a3 .... + // b0 b1 b2 b3 .... + // c0 c1 c2 c3 .... + // d0 d1 d2 d3 .... + __m128i u0 = _mm_unpacklo_epi8(a.val, c.val); // a0 c0 a1 c1 ... + __m128i u1 = _mm_unpackhi_epi8(a.val, c.val); // a8 c8 a9 c9 ... + __m128i u2 = _mm_unpacklo_epi8(b.val, d.val); // b0 d0 b1 d1 ... + __m128i u3 = _mm_unpackhi_epi8(b.val, d.val); // b8 d8 b9 d9 ... + + __m128i v0 = _mm_unpacklo_epi8(u0, u2); // a0 b0 c0 d0 ... + __m128i v1 = _mm_unpackhi_epi8(u0, u2); // a4 b4 c4 d4 ... + __m128i v2 = _mm_unpacklo_epi8(u1, u3); // a8 b8 c8 d8 ... + __m128i v3 = _mm_unpackhi_epi8(u1, u3); // a12 b12 c12 d12 ... + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 16), v1); + _mm_stream_si128((__m128i*)(ptr + 32), v2); + _mm_stream_si128((__m128i*)(ptr + 48), v3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 16), v1); + _mm_store_si128((__m128i*)(ptr + 32), v2); + _mm_store_si128((__m128i*)(ptr + 48), v3); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 16), v1); + _mm_storeu_si128((__m128i*)(ptr + 32), v2); + _mm_storeu_si128((__m128i*)(ptr + 48), v3); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi16(a.val, b.val); + __m128i v1 = _mm_unpackhi_epi16(a.val, b.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 8), v1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 8), v1); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 8), v1); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, + const v_uint16x8& b, const v_uint16x8& c, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ +#if CV_SSE4_1 + const __m128i sh_a = _mm_setr_epi8(0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5, 10, 11); + const __m128i sh_b = _mm_setr_epi8(10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15, 4, 5); + const __m128i sh_c = _mm_setr_epi8(4, 5, 10, 11, 0, 1, 6, 7, 12, 13, 2, 3, 8, 9, 14, 15); + __m128i a0 = _mm_shuffle_epi8(a.val, sh_a); + __m128i b0 = _mm_shuffle_epi8(b.val, sh_b); + __m128i c0 = _mm_shuffle_epi8(c.val, sh_c); + + __m128i v0 = _mm_blend_epi16(_mm_blend_epi16(a0, b0, 0x92), c0, 0x24); + __m128i v1 = _mm_blend_epi16(_mm_blend_epi16(c0, a0, 0x92), b0, 0x24); + __m128i v2 = _mm_blend_epi16(_mm_blend_epi16(b0, c0, 0x92), a0, 0x24); +#else + __m128i z = _mm_setzero_si128(); + __m128i ab0 = _mm_unpacklo_epi16(a.val, b.val); + __m128i ab1 = _mm_unpackhi_epi16(a.val, b.val); + __m128i c0 = _mm_unpacklo_epi16(c.val, z); + __m128i c1 = _mm_unpackhi_epi16(c.val, z); + + __m128i p10 = _mm_unpacklo_epi32(ab0, c0); + __m128i p11 = _mm_unpackhi_epi32(ab0, c0); + __m128i p12 = _mm_unpacklo_epi32(ab1, c1); + __m128i p13 = _mm_unpackhi_epi32(ab1, c1); + + __m128i p20 = _mm_unpacklo_epi64(p10, p11); + __m128i p21 = _mm_unpackhi_epi64(p10, p11); + __m128i p22 = _mm_unpacklo_epi64(p12, p13); + __m128i p23 = _mm_unpackhi_epi64(p12, p13); + + p20 = _mm_slli_si128(p20, 2); + p22 = _mm_slli_si128(p22, 2); + + __m128i p30 = _mm_unpacklo_epi64(p20, p21); + __m128i p31 = _mm_unpackhi_epi64(p20, p21); + __m128i p32 = _mm_unpacklo_epi64(p22, p23); + __m128i p33 = _mm_unpackhi_epi64(p22, p23); + + __m128i v0 = _mm_or_si128(_mm_srli_si128(p30, 2), _mm_slli_si128(p31, 10)); + __m128i v1 = _mm_or_si128(_mm_srli_si128(p31, 6), _mm_slli_si128(p32, 6)); + __m128i v2 = _mm_or_si128(_mm_srli_si128(p32, 10), _mm_slli_si128(p33, 2)); +#endif + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 8), v1); + _mm_stream_si128((__m128i*)(ptr + 16), v2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 8), v1); + _mm_store_si128((__m128i*)(ptr + 16), v2); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 8), v1); + _mm_storeu_si128((__m128i*)(ptr + 16), v2); + } +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b, + const v_uint16x8& c, const v_uint16x8& d, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + // a0 a1 a2 a3 .... + // b0 b1 b2 b3 .... + // c0 c1 c2 c3 .... + // d0 d1 d2 d3 .... + __m128i u0 = _mm_unpacklo_epi16(a.val, c.val); // a0 c0 a1 c1 ... + __m128i u1 = _mm_unpackhi_epi16(a.val, c.val); // a4 c4 a5 c5 ... + __m128i u2 = _mm_unpacklo_epi16(b.val, d.val); // b0 d0 b1 d1 ... + __m128i u3 = _mm_unpackhi_epi16(b.val, d.val); // b4 d4 b5 d5 ... + + __m128i v0 = _mm_unpacklo_epi16(u0, u2); // a0 b0 c0 d0 ... + __m128i v1 = _mm_unpackhi_epi16(u0, u2); // a2 b2 c2 d2 ... + __m128i v2 = _mm_unpacklo_epi16(u1, u3); // a4 b4 c4 d4 ... + __m128i v3 = _mm_unpackhi_epi16(u1, u3); // a6 b6 c6 d6 ... + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 8), v1); + _mm_stream_si128((__m128i*)(ptr + 16), v2); + _mm_stream_si128((__m128i*)(ptr + 24), v3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 8), v1); + _mm_store_si128((__m128i*)(ptr + 16), v2); + _mm_store_si128((__m128i*)(ptr + 24), v3); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 8), v1); + _mm_storeu_si128((__m128i*)(ptr + 16), v2); + _mm_storeu_si128((__m128i*)(ptr + 24), v3); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi32(a.val, b.val); + __m128i v1 = _mm_unpackhi_epi32(a.val, b.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 4), v1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 4), v1); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 4), v1); + } +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + v_uint32x4 z = v_setzero_u32(), u0, u1, u2, u3; + v_transpose4x4(a, b, c, z, u0, u1, u2, u3); + + __m128i v0 = _mm_or_si128(u0.val, _mm_slli_si128(u1.val, 12)); + __m128i v1 = _mm_or_si128(_mm_srli_si128(u1.val, 4), _mm_slli_si128(u2.val, 8)); + __m128i v2 = _mm_or_si128(_mm_srli_si128(u2.val, 8), _mm_slli_si128(u3.val, 4)); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 4), v1); + _mm_stream_si128((__m128i*)(ptr + 8), v2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 4), v1); + _mm_store_si128((__m128i*)(ptr + 8), v2); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 4), v1); + _mm_storeu_si128((__m128i*)(ptr + 8), v2); + } +} + +inline void v_store_interleave(unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + v_uint32x4 v0, v1, v2, v3; + v_transpose4x4(a, b, c, d, v0, v1, v2, v3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0.val); + _mm_stream_si128((__m128i*)(ptr + 4), v1.val); + _mm_stream_si128((__m128i*)(ptr + 8), v2.val); + _mm_stream_si128((__m128i*)(ptr + 12), v3.val); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0.val); + _mm_store_si128((__m128i*)(ptr + 4), v1.val); + _mm_store_si128((__m128i*)(ptr + 8), v2.val); + _mm_store_si128((__m128i*)(ptr + 12), v3.val); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0.val); + _mm_storeu_si128((__m128i*)(ptr + 4), v1.val); + _mm_storeu_si128((__m128i*)(ptr + 8), v2.val); + _mm_storeu_si128((__m128i*)(ptr + 12), v3.val); + } +} + +// 2-channel, float only +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128 v0 = _mm_unpacklo_ps(a.val, b.val); // a0 b0 a1 b1 + __m128 v1 = _mm_unpackhi_ps(a.val, b.val); // a2 b2 a3 b3 + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_ps(ptr, v0); + _mm_stream_ps(ptr + 4, v1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_ps(ptr, v0); + _mm_store_ps(ptr + 4, v1); + } + else + { + _mm_storeu_ps(ptr, v0); + _mm_storeu_ps(ptr + 4, v1); + } +} + +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128 u0 = _mm_shuffle_ps(a.val, b.val, _MM_SHUFFLE(0, 0, 0, 0)); + __m128 u1 = _mm_shuffle_ps(c.val, a.val, _MM_SHUFFLE(1, 1, 0, 0)); + __m128 v0 = _mm_shuffle_ps(u0, u1, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 u2 = _mm_shuffle_ps(b.val, c.val, _MM_SHUFFLE(1, 1, 1, 1)); + __m128 u3 = _mm_shuffle_ps(a.val, b.val, _MM_SHUFFLE(2, 2, 2, 2)); + __m128 v1 = _mm_shuffle_ps(u2, u3, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 u4 = _mm_shuffle_ps(c.val, a.val, _MM_SHUFFLE(3, 3, 2, 2)); + __m128 u5 = _mm_shuffle_ps(b.val, c.val, _MM_SHUFFLE(3, 3, 3, 3)); + __m128 v2 = _mm_shuffle_ps(u4, u5, _MM_SHUFFLE(2, 0, 2, 0)); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_ps(ptr, v0); + _mm_stream_ps(ptr + 4, v1); + _mm_stream_ps(ptr + 8, v2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_ps(ptr, v0); + _mm_store_ps(ptr + 4, v1); + _mm_store_ps(ptr + 8, v2); + } + else + { + _mm_storeu_ps(ptr, v0); + _mm_storeu_ps(ptr + 4, v1); + _mm_storeu_ps(ptr + 8, v2); + } +} + +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128 u0 = _mm_unpacklo_ps(a.val, c.val); + __m128 u1 = _mm_unpacklo_ps(b.val, d.val); + __m128 u2 = _mm_unpackhi_ps(a.val, c.val); + __m128 u3 = _mm_unpackhi_ps(b.val, d.val); + __m128 v0 = _mm_unpacklo_ps(u0, u1); + __m128 v2 = _mm_unpacklo_ps(u2, u3); + __m128 v1 = _mm_unpackhi_ps(u0, u1); + __m128 v3 = _mm_unpackhi_ps(u2, u3); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_ps(ptr, v0); + _mm_stream_ps(ptr + 4, v1); + _mm_stream_ps(ptr + 8, v2); + _mm_stream_ps(ptr + 12, v3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_ps(ptr, v0); + _mm_store_ps(ptr + 4, v1); + _mm_store_ps(ptr + 8, v2); + _mm_store_ps(ptr + 12, v3); + } + else + { + _mm_storeu_ps(ptr, v0); + _mm_storeu_ps(ptr + 4, v1); + _mm_storeu_ps(ptr + 8, v2); + _mm_storeu_ps(ptr + 12, v3); + } +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi64(a.val, b.val); + __m128i v1 = _mm_unpackhi_epi64(a.val, b.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 2), v1); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 2), v1); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 2), v1); + } +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + const v_uint64x2& c, hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi64(a.val, b.val); + __m128i v1 = _mm_unpacklo_epi64(c.val, _mm_unpackhi_epi64(a.val, a.val)); + __m128i v2 = _mm_unpackhi_epi64(b.val, c.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 2), v1); + _mm_stream_si128((__m128i*)(ptr + 4), v2); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 2), v1); + _mm_store_si128((__m128i*)(ptr + 4), v2); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 2), v1); + _mm_storeu_si128((__m128i*)(ptr + 4), v2); + } +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + const v_uint64x2& c, const v_uint64x2& d, + hal::StoreMode mode = hal::STORE_UNALIGNED) +{ + __m128i v0 = _mm_unpacklo_epi64(a.val, b.val); + __m128i v1 = _mm_unpacklo_epi64(c.val, d.val); + __m128i v2 = _mm_unpackhi_epi64(a.val, b.val); + __m128i v3 = _mm_unpackhi_epi64(c.val, d.val); + + if( mode == hal::STORE_ALIGNED_NOCACHE ) + { + _mm_stream_si128((__m128i*)(ptr), v0); + _mm_stream_si128((__m128i*)(ptr + 2), v1); + _mm_stream_si128((__m128i*)(ptr + 4), v2); + _mm_stream_si128((__m128i*)(ptr + 6), v3); + } + else if( mode == hal::STORE_ALIGNED ) + { + _mm_store_si128((__m128i*)(ptr), v0); + _mm_store_si128((__m128i*)(ptr + 2), v1); + _mm_store_si128((__m128i*)(ptr + 4), v2); + _mm_store_si128((__m128i*)(ptr + 6), v3); + } + else + { + _mm_storeu_si128((__m128i*)(ptr), v0); + _mm_storeu_si128((__m128i*)(ptr + 2), v1); + _mm_storeu_si128((__m128i*)(ptr + 4), v2); + _mm_storeu_si128((__m128i*)(ptr + 6), v3); + } +} + +#define OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(_Tpvec0, _Tp0, suffix0, _Tpvec1, _Tp1, suffix1) \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0 ) \ +{ \ + _Tpvec1 a1, b1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0 ) \ +{ \ + _Tpvec1 a1, b1, c1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0, _Tpvec0& d0 ) \ +{ \ + _Tpvec1 a1, b1, c1, d1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1, d1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ + d0 = v_reinterpret_as_##suffix0(d1); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, const _Tpvec0& d0, \ + hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + _Tpvec1 d1 = v_reinterpret_as_##suffix1(d0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, d1, mode); \ +} + +OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int8x16, schar, s8, v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int16x8, short, s16, v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int32x4, int, s32, v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_int64x2, int64, s64, v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_SSE_LOADSTORE_INTERLEAVE(v_float64x2, double, f64, v_uint64x2, uint64, u64) + +inline v_float32x4 v_cvt_f32(const v_int32x4& a) +{ + return v_float32x4(_mm_cvtepi32_ps(a.val)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a) +{ + return v_float32x4(_mm_cvtpd_ps(a.val)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) +{ + return v_float32x4(_mm_movelh_ps(_mm_cvtpd_ps(a.val), _mm_cvtpd_ps(b.val))); +} + +inline v_float64x2 v_cvt_f64(const v_int32x4& a) +{ + return v_float64x2(_mm_cvtepi32_pd(a.val)); +} + +inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) +{ + return v_float64x2(_mm_cvtepi32_pd(_mm_srli_si128(a.val,8))); +} + +inline v_float64x2 v_cvt_f64(const v_float32x4& a) +{ + return v_float64x2(_mm_cvtps_pd(a.val)); +} + +inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) +{ + return v_float64x2(_mm_cvtps_pd(_mm_movehl_ps(a.val, a.val))); +} + +// from (Mysticial and wim) https://stackoverflow.com/q/41144668 +inline v_float64x2 v_cvt_f64(const v_int64x2& v) +{ + // constants encoded as floating-point + __m128i magic_i_hi32 = _mm_set1_epi64x(0x4530000080000000); // 2^84 + 2^63 + __m128i magic_i_all = _mm_set1_epi64x(0x4530000080100000); // 2^84 + 2^63 + 2^52 + __m128d magic_d_all = _mm_castsi128_pd(magic_i_all); + // Blend the 32 lowest significant bits of v with magic_int_lo +#if CV_SSE4_1 + __m128i magic_i_lo = _mm_set1_epi64x(0x4330000000000000); // 2^52 + __m128i v_lo = _mm_blend_epi16(v.val, magic_i_lo, 0xcc); +#else + __m128i magic_i_lo = _mm_set1_epi32(0x43300000); // 2^52 + __m128i v_lo = _mm_unpacklo_epi32(_mm_shuffle_epi32(v.val, _MM_SHUFFLE(0, 0, 2, 0)), magic_i_lo); +#endif + // Extract the 32 most significant bits of v + __m128i v_hi = _mm_srli_epi64(v.val, 32); + // Flip the msb of v_hi and blend with 0x45300000 + v_hi = _mm_xor_si128(v_hi, magic_i_hi32); + // Compute in double precision + __m128d v_hi_dbl = _mm_sub_pd(_mm_castsi128_pd(v_hi), magic_d_all); + // (v_hi - magic_d_all) + v_lo Do not assume associativity of floating point addition + __m128d result = _mm_add_pd(v_hi_dbl, _mm_castsi128_pd(v_lo)); + return v_float64x2(result); +} + +////////////// Lookup table access //////////////////// + +inline v_int8x16 v_lut(const schar* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int8x16(_mm_setr_epi8(tab[idx[0]], tab[idx[1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]], + tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]])); +#else + return v_int8x16(_mm_setr_epi64( + _mm_setr_pi8(tab[idx[0]], tab[idx[1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]]), + _mm_setr_pi8(tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]]) + )); +#endif +} +inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int8x16(_mm_setr_epi16(*(const short*)(tab + idx[0]), *(const short*)(tab + idx[1]), *(const short*)(tab + idx[2]), *(const short*)(tab + idx[3]), + *(const short*)(tab + idx[4]), *(const short*)(tab + idx[5]), *(const short*)(tab + idx[6]), *(const short*)(tab + idx[7]))); +#else + return v_int8x16(_mm_setr_epi64( + _mm_setr_pi16(*(const short*)(tab + idx[0]), *(const short*)(tab + idx[1]), *(const short*)(tab + idx[2]), *(const short*)(tab + idx[3])), + _mm_setr_pi16(*(const short*)(tab + idx[4]), *(const short*)(tab + idx[5]), *(const short*)(tab + idx[6]), *(const short*)(tab + idx[7])) + )); +#endif +} +inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int8x16(_mm_setr_epi32(*(const int*)(tab + idx[0]), *(const int*)(tab + idx[1]), + *(const int*)(tab + idx[2]), *(const int*)(tab + idx[3]))); +#else + return v_int8x16(_mm_setr_epi64( + _mm_setr_pi32(*(const int*)(tab + idx[0]), *(const int*)(tab + idx[1])), + _mm_setr_pi32(*(const int*)(tab + idx[2]), *(const int*)(tab + idx[3])) + )); +#endif +} +inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((const schar *)tab, idx)); } +inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((const schar *)tab, idx)); } +inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((const schar *)tab, idx)); } + +inline v_int16x8 v_lut(const short* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int16x8(_mm_setr_epi16(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], + tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]])); +#else + return v_int16x8(_mm_setr_epi64( + _mm_setr_pi16(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]), + _mm_setr_pi16(tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]]) + )); +#endif +} +inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int16x8(_mm_setr_epi32(*(const int*)(tab + idx[0]), *(const int*)(tab + idx[1]), + *(const int*)(tab + idx[2]), *(const int*)(tab + idx[3]))); +#else + return v_int16x8(_mm_setr_epi64( + _mm_setr_pi32(*(const int*)(tab + idx[0]), *(const int*)(tab + idx[1])), + _mm_setr_pi32(*(const int*)(tab + idx[2]), *(const int*)(tab + idx[3])) + )); +#endif +} +inline v_int16x8 v_lut_quads(const short* tab, const int* idx) +{ + return v_int16x8(_mm_set_epi64x(*(const int64_t*)(tab + idx[1]), *(const int64_t*)(tab + idx[0]))); +} +inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((const short *)tab, idx)); } +inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((const short *)tab, idx)); } +inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((const short *)tab, idx)); } + +inline v_int32x4 v_lut(const int* tab, const int* idx) +{ +#if defined(_MSC_VER) + return v_int32x4(_mm_setr_epi32(tab[idx[0]], tab[idx[1]], + tab[idx[2]], tab[idx[3]])); +#else + return v_int32x4(_mm_setr_epi64( + _mm_setr_pi32(tab[idx[0]], tab[idx[1]]), + _mm_setr_pi32(tab[idx[2]], tab[idx[3]]) + )); +#endif +} +inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) +{ + return v_int32x4(_mm_set_epi64x(*(const int64_t*)(tab + idx[1]), *(const int64_t*)(tab + idx[0]))); +} +inline v_int32x4 v_lut_quads(const int* tab, const int* idx) +{ + return v_int32x4(_mm_loadu_si128((const __m128i*)(tab + idx[0]))); +} +inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((const int *)tab, idx)); } +inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((const int *)tab, idx)); } +inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((const int *)tab, idx)); } + +inline v_int64x2 v_lut(const int64_t* tab, const int* idx) +{ + return v_int64x2(_mm_set_epi64x(tab[idx[1]], tab[idx[0]])); +} +inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) +{ + return v_int64x2(_mm_loadu_si128((const __m128i*)(tab + idx[0]))); +} +inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } +inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } + +inline v_float32x4 v_lut(const float* tab, const int* idx) +{ + return v_float32x4(_mm_setr_ps(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]])); +} +inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_pairs((const int *)tab, idx)); } +inline v_float32x4 v_lut_quads(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_quads((const int *)tab, idx)); } + +inline v_float64x2 v_lut(const double* tab, const int* idx) +{ + return v_float64x2(_mm_setr_pd(tab[idx[0]], tab[idx[1]])); +} +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) { return v_float64x2(_mm_castsi128_pd(_mm_loadu_si128((const __m128i*)(tab + idx[0])))); } + +inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + return v_int32x4(_mm_setr_epi32(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]])); +} + +inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) +{ + return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec)); +} + +inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + return v_float32x4(_mm_setr_ps(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]])); +} + +inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) +{ + int idx[2]; + v_store_low(idx, idxvec); + return v_float64x2(_mm_setr_pd(tab[idx[0]], tab[idx[1]])); +} + +// loads pairs from the table and deinterleaves them, e.g. returns: +// x = (tab[idxvec[0], tab[idxvec[1]], tab[idxvec[2]], tab[idxvec[3]]), +// y = (tab[idxvec[0]+1], tab[idxvec[1]+1], tab[idxvec[2]+1], tab[idxvec[3]+1]) +// note that the indices are float's indices, not the float-pair indices. +// in theory, this function can be used to implement bilinear interpolation, +// when idxvec are the offsets within the image. +inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) +{ + int CV_DECL_ALIGNED(32) idx[4]; + v_store_aligned(idx, idxvec); + __m128 z = _mm_setzero_ps(); + __m128 xy01 = _mm_loadl_pi(z, (__m64*)(tab + idx[0])); + __m128 xy23 = _mm_loadl_pi(z, (__m64*)(tab + idx[2])); + xy01 = _mm_loadh_pi(xy01, (__m64*)(tab + idx[1])); + xy23 = _mm_loadh_pi(xy23, (__m64*)(tab + idx[3])); + __m128 xxyy02 = _mm_unpacklo_ps(xy01, xy23); + __m128 xxyy13 = _mm_unpackhi_ps(xy01, xy23); + x = v_float32x4(_mm_unpacklo_ps(xxyy02, xxyy13)); + y = v_float32x4(_mm_unpackhi_ps(xxyy02, xxyy13)); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) +{ + int idx[2]; + v_store_low(idx, idxvec); + __m128d xy0 = _mm_loadu_pd(tab + idx[0]); + __m128d xy1 = _mm_loadu_pd(tab + idx[1]); + x = v_float64x2(_mm_unpacklo_pd(xy0, xy1)); + y = v_float64x2(_mm_unpackhi_pd(xy0, xy1)); +} + +inline v_int8x16 v_interleave_pairs(const v_int8x16& vec) +{ +#if CV_SSSE3 + return v_int8x16(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0x0f0d0e0c0b090a08, 0x0705060403010200))); +#else + __m128i a = _mm_shufflelo_epi16(vec.val, _MM_SHUFFLE(3, 1, 2, 0)); + a = _mm_shufflehi_epi16(a, _MM_SHUFFLE(3, 1, 2, 0)); + a = _mm_shuffle_epi32(a, _MM_SHUFFLE(3, 1, 2, 0)); + return v_int8x16(_mm_unpacklo_epi8(a, _mm_unpackhi_epi64(a, a))); +#endif +} +inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x16 v_interleave_quads(const v_int8x16& vec) +{ +#if CV_SSSE3 + return v_int8x16(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0x0f0b0e0a0d090c08, 0x0703060205010400))); +#else + __m128i a = _mm_shuffle_epi32(vec.val, _MM_SHUFFLE(3, 1, 2, 0)); + return v_int8x16(_mm_unpacklo_epi8(a, _mm_unpackhi_epi64(a, a))); +#endif +} +inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) +{ +#if CV_SSSE3 + return v_int16x8(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0x0f0e0b0a0d0c0908, 0x0706030205040100))); +#else + __m128i a = _mm_shufflelo_epi16(vec.val, _MM_SHUFFLE(3, 1, 2, 0)); + return v_int16x8(_mm_shufflehi_epi16(a, _MM_SHUFFLE(3, 1, 2, 0))); +#endif +} +inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } +inline v_int16x8 v_interleave_quads(const v_int16x8& vec) +{ +#if CV_SSSE3 + return v_int16x8(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0x0f0e07060d0c0504, 0x0b0a030209080100))); +#else + return v_int16x8(_mm_unpacklo_epi16(vec.val, _mm_unpackhi_epi64(vec.val, vec.val))); +#endif +} +inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_interleave_pairs(const v_int32x4& vec) +{ + return v_int32x4(_mm_shuffle_epi32(vec.val, _MM_SHUFFLE(3, 1, 2, 0))); +} +inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x4 v_interleave_pairs(const v_float32x4& vec) { return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x16 v_pack_triplets(const v_int8x16& vec) +{ +#if CV_SSSE3 + return v_int8x16(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0xffffff0f0e0d0c0a, 0x0908060504020100))); +#else + __m128i mask = _mm_set1_epi64x(0x00000000FFFFFFFF); + __m128i a = _mm_srli_si128(_mm_or_si128(_mm_andnot_si128(mask, vec.val), _mm_and_si128(mask, _mm_sll_epi32(vec.val, _mm_set_epi64x(0, 8)))), 1); + return v_int8x16(_mm_srli_si128(_mm_shufflelo_epi16(a, _MM_SHUFFLE(2, 1, 0, 3)), 2)); +#endif +} +inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_pack_triplets(const v_int16x8& vec) +{ +#if CV_SSSE3 + return v_int16x8(_mm_shuffle_epi8(vec.val, _mm_set_epi64x(0xffff0f0e0d0c0b0a, 0x0908050403020100))); +#else + return v_int16x8(_mm_srli_si128(_mm_shufflelo_epi16(vec.val, _MM_SHUFFLE(2, 1, 0, 3)), 2)); +#endif +} +inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + +template +inline uchar v_extract_n(const v_uint8x16& v) +{ +#if CV_SSE4_1 + return (uchar)_mm_extract_epi8(v.val, i); +#else + return v_rotate_right(v).get0(); +#endif +} + +template +inline schar v_extract_n(const v_int8x16& v) +{ + return (schar)v_extract_n(v_reinterpret_as_u8(v)); +} + +template +inline ushort v_extract_n(const v_uint16x8& v) +{ + return (ushort)_mm_extract_epi16(v.val, i); +} + +template +inline short v_extract_n(const v_int16x8& v) +{ + return (short)v_extract_n(v_reinterpret_as_u16(v)); +} + +template +inline uint v_extract_n(const v_uint32x4& v) +{ +#if CV_SSE4_1 + return (uint)_mm_extract_epi32(v.val, i); +#else + return v_rotate_right(v).get0(); +#endif +} + +template +inline int v_extract_n(const v_int32x4& v) +{ + return (int)v_extract_n(v_reinterpret_as_u32(v)); +} + +template +inline uint64 v_extract_n(const v_uint64x2& v) +{ +#ifdef CV__SIMD_NATIVE_mm_extract_epi64 + return (uint64)_v128_extract_epi64(v.val); +#else + return v_rotate_right(v).get0(); +#endif +} + +template +inline int64 v_extract_n(const v_int64x2& v) +{ + return (int64)v_extract_n(v_reinterpret_as_u64(v)); +} + +template +inline float v_extract_n(const v_float32x4& v) +{ + union { uint iv; float fv; } d; + d.iv = v_extract_n(v_reinterpret_as_u32(v)); + return d.fv; +} + +template +inline double v_extract_n(const v_float64x2& v) +{ + union { uint64 iv; double dv; } d; + d.iv = v_extract_n(v_reinterpret_as_u64(v)); + return d.dv; +} + +template +inline v_int32x4 v_broadcast_element(const v_int32x4& v) +{ + return v_int32x4(_mm_shuffle_epi32(v.val, _MM_SHUFFLE(i,i,i,i))); +} + +template +inline v_uint32x4 v_broadcast_element(const v_uint32x4& v) +{ + return v_uint32x4(_mm_shuffle_epi32(v.val, _MM_SHUFFLE(i,i,i,i))); +} + +template +inline v_float32x4 v_broadcast_element(const v_float32x4& v) +{ + return v_float32x4(_mm_shuffle_ps(v.val, v.val, _MM_SHUFFLE((char)i,(char)i,(char)i,(char)i))); +} + +////////////// FP16 support /////////////////////////// + +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ +#if CV_FP16 + return v_float32x4(_mm_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr))); +#else + const __m128i z = _mm_setzero_si128(), delta = _mm_set1_epi32(0x38000000); + const __m128i signmask = _mm_set1_epi32(0x80000000), maxexp = _mm_set1_epi32(0x7c000000); + const __m128 deltaf = _mm_castsi128_ps(_mm_set1_epi32(0x38800000)); + __m128i bits = _mm_unpacklo_epi16(z, _mm_loadl_epi64((const __m128i*)ptr)); // h << 16 + __m128i e = _mm_and_si128(bits, maxexp), sign = _mm_and_si128(bits, signmask); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_xor_si128(bits, sign), 3), delta); // ((h & 0x7fff) << 13) + delta + __m128i zt = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_add_epi32(t, _mm_set1_epi32(1 << 23))), deltaf)); + + t = _mm_add_epi32(t, _mm_and_si128(delta, _mm_cmpeq_epi32(maxexp, e))); + __m128i zmask = _mm_cmpeq_epi32(e, z); + __m128i ft = v_select_si128(zmask, zt, t); + return v_float32x4(_mm_castsi128_ps(_mm_or_si128(ft, sign))); +#endif +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ +#if CV_FP16 + __m128i fp16_value = _mm_cvtps_ph(v.val, 0); + _mm_storel_epi64((__m128i*)ptr, fp16_value); +#else + const __m128i signmask = _mm_set1_epi32(0x80000000); + const __m128i rval = _mm_set1_epi32(0x3f000000); + + __m128i t = _mm_castps_si128(v.val); + __m128i sign = _mm_srai_epi32(_mm_and_si128(t, signmask), 16); + t = _mm_andnot_si128(signmask, t); + + __m128i finitemask = _mm_cmpgt_epi32(_mm_set1_epi32(0x47800000), t); + __m128i isnan = _mm_cmpgt_epi32(t, _mm_set1_epi32(0x7f800000)); + __m128i naninf = v_select_si128(isnan, _mm_set1_epi32(0x7e00), _mm_set1_epi32(0x7c00)); + __m128i tinymask = _mm_cmpgt_epi32(_mm_set1_epi32(0x38800000), t); + __m128i tt = _mm_castps_si128(_mm_add_ps(_mm_castsi128_ps(t), _mm_castsi128_ps(rval))); + tt = _mm_sub_epi32(tt, rval); + __m128i odd = _mm_and_si128(_mm_srli_epi32(t, 13), _mm_set1_epi32(1)); + __m128i nt = _mm_add_epi32(t, _mm_set1_epi32(0xc8000fff)); + nt = _mm_srli_epi32(_mm_add_epi32(nt, odd), 13); + t = v_select_si128(tinymask, tt, nt); + t = v_select_si128(finitemask, t, naninf); + t = _mm_or_si128(t, sign); + t = _mm_packs_epi32(t, t); + _mm_storel_epi64((__m128i*)ptr, t); +#endif +} + +inline void v_cleanup() {} + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse_em.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse_em.hpp new file mode 100644 index 00000000..6fb08816 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_sse_em.hpp @@ -0,0 +1,180 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_HAL_INTRIN_SSE_EM_HPP +#define OPENCV_HAL_INTRIN_SSE_EM_HPP + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +#define OPENCV_HAL_SSE_WRAP_1(fun, tp) \ + inline tp _v128_##fun(const tp& a) \ + { return _mm_##fun(a); } + +#define OPENCV_HAL_SSE_WRAP_2(fun, tp) \ + inline tp _v128_##fun(const tp& a, const tp& b) \ + { return _mm_##fun(a, b); } + +#define OPENCV_HAL_SSE_WRAP_3(fun, tp) \ + inline tp _v128_##fun(const tp& a, const tp& b, const tp& c) \ + { return _mm_##fun(a, b, c); } + +///////////////////////////// XOP ///////////////////////////// + +// [todo] define CV_XOP +#if 1 // CV_XOP +inline __m128i _v128_comgt_epu32(const __m128i& a, const __m128i& b) +{ + const __m128i delta = _mm_set1_epi32((int)0x80000000); + return _mm_cmpgt_epi32(_mm_xor_si128(a, delta), _mm_xor_si128(b, delta)); +} +// wrapping XOP +#else +OPENCV_HAL_SSE_WRAP_2(_v128_comgt_epu32, __m128i) +#endif // !CV_XOP + +///////////////////////////// SSE4.1 ///////////////////////////// + +#if !CV_SSE4_1 + +/** Swizzle **/ +inline __m128i _v128_blendv_epi8(const __m128i& a, const __m128i& b, const __m128i& mask) +{ return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(b, a), mask)); } + +/** Convert **/ +// 8 >> 16 +inline __m128i _v128_cvtepu8_epi16(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi8(a, z); +} +inline __m128i _v128_cvtepi8_epi16(const __m128i& a) +{ return _mm_srai_epi16(_mm_unpacklo_epi8(a, a), 8); } +// 8 >> 32 +inline __m128i _v128_cvtepu8_epi32(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi16(_mm_unpacklo_epi8(a, z), z); +} +inline __m128i _v128_cvtepi8_epi32(const __m128i& a) +{ + __m128i r = _mm_unpacklo_epi8(a, a); + r = _mm_unpacklo_epi8(r, r); + return _mm_srai_epi32(r, 24); +} +// 16 >> 32 +inline __m128i _v128_cvtepu16_epi32(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi16(a, z); +} +inline __m128i _v128_cvtepi16_epi32(const __m128i& a) +{ return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16); } +// 32 >> 64 +inline __m128i _v128_cvtepu32_epi64(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpacklo_epi32(a, z); +} +inline __m128i _v128_cvtepi32_epi64(const __m128i& a) +{ return _mm_unpacklo_epi32(a, _mm_srai_epi32(a, 31)); } + +/** Arithmetic **/ +inline __m128i _v128_mullo_epi32(const __m128i& a, const __m128i& b) +{ + __m128i c0 = _mm_mul_epu32(a, b); + __m128i c1 = _mm_mul_epu32(_mm_srli_epi64(a, 32), _mm_srli_epi64(b, 32)); + __m128i d0 = _mm_unpacklo_epi32(c0, c1); + __m128i d1 = _mm_unpackhi_epi32(c0, c1); + return _mm_unpacklo_epi64(d0, d1); +} + +/** Math **/ +inline __m128i _v128_min_epu32(const __m128i& a, const __m128i& b) +{ return _v128_blendv_epi8(a, b, _v128_comgt_epu32(a, b)); } + +// wrapping SSE4.1 +#else +OPENCV_HAL_SSE_WRAP_1(cvtepu8_epi16, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepi8_epi16, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepu8_epi32, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepi8_epi32, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepu16_epi32, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepi16_epi32, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepu32_epi64, __m128i) +OPENCV_HAL_SSE_WRAP_1(cvtepi32_epi64, __m128i) +OPENCV_HAL_SSE_WRAP_2(min_epu32, __m128i) +OPENCV_HAL_SSE_WRAP_2(mullo_epi32, __m128i) +OPENCV_HAL_SSE_WRAP_3(blendv_epi8, __m128i) +#endif // !CV_SSE4_1 + +///////////////////////////// Revolutionary ///////////////////////////// + +/** Convert **/ +// 16 << 8 +inline __m128i _v128_cvtepu8_epi16_high(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpackhi_epi8(a, z); +} +inline __m128i _v128_cvtepi8_epi16_high(const __m128i& a) +{ return _mm_srai_epi16(_mm_unpackhi_epi8(a, a), 8); } +// 32 << 16 +inline __m128i _v128_cvtepu16_epi32_high(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpackhi_epi16(a, z); +} +inline __m128i _v128_cvtepi16_epi32_high(const __m128i& a) +{ return _mm_srai_epi32(_mm_unpackhi_epi16(a, a), 16); } +// 64 << 32 +inline __m128i _v128_cvtepu32_epi64_high(const __m128i& a) +{ + const __m128i z = _mm_setzero_si128(); + return _mm_unpackhi_epi32(a, z); +} +inline __m128i _v128_cvtepi32_epi64_high(const __m128i& a) +{ return _mm_unpackhi_epi32(a, _mm_srai_epi32(a, 31)); } + +/** Miscellaneous **/ +inline __m128i _v128_packs_epu32(const __m128i& a, const __m128i& b) +{ + const __m128i m = _mm_set1_epi32(65535); + __m128i am = _v128_min_epu32(a, m); + __m128i bm = _v128_min_epu32(b, m); +#if CV_SSE4_1 + return _mm_packus_epi32(am, bm); +#else + const __m128i d = _mm_set1_epi32(32768), nd = _mm_set1_epi16(-32768); + am = _mm_sub_epi32(am, d); + bm = _mm_sub_epi32(bm, d); + am = _mm_packs_epi32(am, bm); + return _mm_sub_epi16(am, nd); +#endif +} + +template +inline int64 _v128_extract_epi64(const __m128i& a) +{ +#if defined(CV__SIMD_HAVE_mm_extract_epi64) || (CV_SSE4_1 && (defined(__x86_64__)/*GCC*/ || defined(_M_X64)/*MSVC*/)) +#define CV__SIMD_NATIVE_mm_extract_epi64 1 + return _mm_extract_epi64(a, i); +#else + CV_DECL_ALIGNED(16) int64 tmp[2]; + _mm_store_si128((__m128i*)tmp, a); + return tmp[i]; +#endif +} + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} // cv:: + +#endif // OPENCV_HAL_INTRIN_SSE_EM_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_vsx.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_vsx.hpp new file mode 100644 index 00000000..b198643c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_vsx.hpp @@ -0,0 +1,1608 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_HAL_VSX_HPP +#define OPENCV_HAL_VSX_HPP + +#include +#include "opencv2/core/utility.hpp" + +#define CV_SIMD128 1 +#define CV_SIMD128_64F 1 + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +///////// Types //////////// + +struct v_uint8x16 +{ + typedef uchar lane_type; + enum { nlanes = 16 }; + vec_uchar16 val; + + explicit v_uint8x16(const vec_uchar16& v) : val(v) + {} + v_uint8x16() + {} + v_uint8x16(vec_bchar16 v) : val(vec_uchar16_c(v)) + {} + v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) + : val(vec_uchar16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15)) + {} + + static inline v_uint8x16 zero() { return v_uint8x16(vec_uchar16_z); } + + uchar get0() const + { return vec_extract(val, 0); } +}; + +struct v_int8x16 +{ + typedef schar lane_type; + enum { nlanes = 16 }; + vec_char16 val; + + explicit v_int8x16(const vec_char16& v) : val(v) + {} + v_int8x16() + {} + v_int8x16(vec_bchar16 v) : val(vec_char16_c(v)) + {} + v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) + : val(vec_char16_set(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15)) + {} + + static inline v_int8x16 zero() { return v_int8x16(vec_char16_z); } + + schar get0() const + { return vec_extract(val, 0); } +}; + +struct v_uint16x8 +{ + typedef ushort lane_type; + enum { nlanes = 8 }; + vec_ushort8 val; + + explicit v_uint16x8(const vec_ushort8& v) : val(v) + {} + v_uint16x8() + {} + v_uint16x8(vec_bshort8 v) : val(vec_ushort8_c(v)) + {} + v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) + : val(vec_ushort8_set(v0, v1, v2, v3, v4, v5, v6, v7)) + {} + + static inline v_uint16x8 zero() { return v_uint16x8(vec_ushort8_z); } + + ushort get0() const + { return vec_extract(val, 0); } +}; + +struct v_int16x8 +{ + typedef short lane_type; + enum { nlanes = 8 }; + vec_short8 val; + + explicit v_int16x8(const vec_short8& v) : val(v) + {} + v_int16x8() + {} + v_int16x8(vec_bshort8 v) : val(vec_short8_c(v)) + {} + v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) + : val(vec_short8_set(v0, v1, v2, v3, v4, v5, v6, v7)) + {} + + static inline v_int16x8 zero() { return v_int16x8(vec_short8_z); } + + short get0() const + { return vec_extract(val, 0); } +}; + +struct v_uint32x4 +{ + typedef unsigned lane_type; + enum { nlanes = 4 }; + vec_uint4 val; + + explicit v_uint32x4(const vec_uint4& v) : val(v) + {} + v_uint32x4() + {} + v_uint32x4(vec_bint4 v) : val(vec_uint4_c(v)) + {} + v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) : val(vec_uint4_set(v0, v1, v2, v3)) + {} + + static inline v_uint32x4 zero() { return v_uint32x4(vec_uint4_z); } + + uint get0() const + { return vec_extract(val, 0); } +}; + +struct v_int32x4 +{ + typedef int lane_type; + enum { nlanes = 4 }; + vec_int4 val; + + explicit v_int32x4(const vec_int4& v) : val(v) + {} + v_int32x4() + {} + v_int32x4(vec_bint4 v) : val(vec_int4_c(v)) + {} + v_int32x4(int v0, int v1, int v2, int v3) : val(vec_int4_set(v0, v1, v2, v3)) + {} + + static inline v_int32x4 zero() { return v_int32x4(vec_int4_z); } + + int get0() const + { return vec_extract(val, 0); } +}; + +struct v_float32x4 +{ + typedef float lane_type; + enum { nlanes = 4 }; + vec_float4 val; + + explicit v_float32x4(const vec_float4& v) : val(v) + {} + v_float32x4() + {} + v_float32x4(vec_bint4 v) : val(vec_float4_c(v)) + {} + v_float32x4(float v0, float v1, float v2, float v3) : val(vec_float4_set(v0, v1, v2, v3)) + {} + + static inline v_float32x4 zero() { return v_float32x4(vec_float4_z); } + + float get0() const + { return vec_extract(val, 0); } +}; + +struct v_uint64x2 +{ + typedef uint64 lane_type; + enum { nlanes = 2 }; + vec_udword2 val; + + explicit v_uint64x2(const vec_udword2& v) : val(v) + {} + v_uint64x2() + {} + v_uint64x2(vec_bdword2 v) : val(vec_udword2_c(v)) + {} + v_uint64x2(uint64 v0, uint64 v1) : val(vec_udword2_set(v0, v1)) + {} + + static inline v_uint64x2 zero() { return v_uint64x2(vec_udword2_z); } + + uint64 get0() const + { return vec_extract(val, 0); } +}; + +struct v_int64x2 +{ + typedef int64 lane_type; + enum { nlanes = 2 }; + vec_dword2 val; + + explicit v_int64x2(const vec_dword2& v) : val(v) + {} + v_int64x2() + {} + v_int64x2(vec_bdword2 v) : val(vec_dword2_c(v)) + {} + v_int64x2(int64 v0, int64 v1) : val(vec_dword2_set(v0, v1)) + {} + + static inline v_int64x2 zero() { return v_int64x2(vec_dword2_z); } + + int64 get0() const + { return vec_extract(val, 0); } +}; + +struct v_float64x2 +{ + typedef double lane_type; + enum { nlanes = 2 }; + vec_double2 val; + + explicit v_float64x2(const vec_double2& v) : val(v) + {} + v_float64x2() + {} + v_float64x2(vec_bdword2 v) : val(vec_double2_c(v)) + {} + v_float64x2(double v0, double v1) : val(vec_double2_set(v0, v1)) + {} + + static inline v_float64x2 zero() { return v_float64x2(vec_double2_z); } + + double get0() const + { return vec_extract(val, 0); } +}; + +#define OPENCV_HAL_IMPL_VSX_EXTRACT_N(_Tpvec, _Tp) \ +template inline _Tp v_extract_n(VSX_UNUSED(_Tpvec v)) { return vec_extract(v.val, i); } + +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_uint8x16, uchar) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_int8x16, schar) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_uint16x8, ushort) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_int16x8, short) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_uint32x4, uint) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_int32x4, int) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_uint64x2, uint64) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_int64x2, int64) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_float32x4, float) +OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_float64x2, double) + +//////////////// Load and store operations /////////////// + +/* + * clang-5 aborted during parse "vec_xxx_c" only if it's + * inside a function template which is defined by preprocessor macro. + * + * if vec_xxx_c defined as C++ cast, clang-5 will pass it +*/ +#define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \ +inline _Tpvec v_setzero_##suffix() { return _Tpvec(vec_splats((_Tp)0)); } \ +inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \ +template inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \ +{ return _Tpvec((cast)a.val); } + +OPENCV_HAL_IMPL_VSX_INITVEC(v_uint8x16, uchar, u8, vec_uchar16) +OPENCV_HAL_IMPL_VSX_INITVEC(v_int8x16, schar, s8, vec_char16) +OPENCV_HAL_IMPL_VSX_INITVEC(v_uint16x8, ushort, u16, vec_ushort8) +OPENCV_HAL_IMPL_VSX_INITVEC(v_int16x8, short, s16, vec_short8) +OPENCV_HAL_IMPL_VSX_INITVEC(v_uint32x4, uint, u32, vec_uint4) +OPENCV_HAL_IMPL_VSX_INITVEC(v_int32x4, int, s32, vec_int4) +OPENCV_HAL_IMPL_VSX_INITVEC(v_uint64x2, uint64, u64, vec_udword2) +OPENCV_HAL_IMPL_VSX_INITVEC(v_int64x2, int64, s64, vec_dword2) +OPENCV_HAL_IMPL_VSX_INITVEC(v_float32x4, float, f32, vec_float4) +OPENCV_HAL_IMPL_VSX_INITVEC(v_float64x2, double, f64, vec_double2) + +#define OPENCV_HAL_IMPL_VSX_LOADSTORE_C(_Tpvec, _Tp, ld, ld_a, st, st_a) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(ld(0, ptr)); } \ +inline _Tpvec v_load_aligned(VSX_UNUSED(const _Tp* ptr)) \ +{ return _Tpvec(ld_a(0, ptr)); } \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ return _Tpvec(vec_ld_l8(ptr)); } \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ return _Tpvec(vec_mergesqh(vec_ld_l8(ptr0), vec_ld_l8(ptr1))); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ st(a.val, 0, ptr); } \ +inline void v_store_aligned(VSX_UNUSED(_Tp* ptr), const _Tpvec& a) \ +{ st_a(a.val, 0, ptr); } \ +inline void v_store_aligned_nocache(VSX_UNUSED(_Tp* ptr), const _Tpvec& a) \ +{ st_a(a.val, 0, ptr); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode mode) \ +{ if(mode == hal::STORE_UNALIGNED) st(a.val, 0, ptr); else st_a(a.val, 0, ptr); } \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ vec_st_l8(a.val, ptr); } \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ vec_st_h8(a.val, ptr); } + +// working around gcc bug for aligned ld/st +// if runtime check for vec_ld/st fail we failback to unaligned ld/st +// https://github.com/opencv/opencv/issues/13211 +#ifdef CV_COMPILER_VSX_BROKEN_ALIGNED + #define OPENCV_HAL_IMPL_VSX_LOADSTORE(_Tpvec, _Tp) \ + OPENCV_HAL_IMPL_VSX_LOADSTORE_C(_Tpvec, _Tp, vsx_ld, vsx_ld, vsx_st, vsx_st) +#else + #define OPENCV_HAL_IMPL_VSX_LOADSTORE(_Tpvec, _Tp) \ + OPENCV_HAL_IMPL_VSX_LOADSTORE_C(_Tpvec, _Tp, vsx_ld, vec_ld, vsx_st, vec_st) +#endif + +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint8x16, uchar) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int8x16, schar) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint16x8, ushort) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int16x8, short) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_uint32x4, uint) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_int32x4, int) +OPENCV_HAL_IMPL_VSX_LOADSTORE(v_float32x4, float) + +OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_float64x2, double, vsx_ld, vsx_ld, vsx_st, vsx_st) +OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_uint64x2, uint64, vsx_ld2, vsx_ld2, vsx_st2, vsx_st2) +OPENCV_HAL_IMPL_VSX_LOADSTORE_C(v_int64x2, int64, vsx_ld2, vsx_ld2, vsx_st2, vsx_st2) + +//////////////// Value reordering /////////////// + +/* de&interleave */ +#define OPENCV_HAL_IMPL_VSX_INTERLEAVE(_Tp, _Tpvec) \ +inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b) \ +{ vec_ld_deinterleave(ptr, a.val, b.val);} \ +inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, \ + _Tpvec& b, _Tpvec& c) \ +{ vec_ld_deinterleave(ptr, a.val, b.val, c.val); } \ +inline void v_load_deinterleave(const _Tp* ptr, _Tpvec& a, _Tpvec& b, \ + _Tpvec& c, _Tpvec& d) \ +{ vec_ld_deinterleave(ptr, a.val, b.val, c.val, d.val); } \ +inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ vec_st_interleave(a.val, b.val, ptr); } \ +inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, \ + const _Tpvec& b, const _Tpvec& c, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ vec_st_interleave(a.val, b.val, c.val, ptr); } \ +inline void v_store_interleave(_Tp* ptr, const _Tpvec& a, const _Tpvec& b, \ + const _Tpvec& c, const _Tpvec& d, \ + hal::StoreMode /*mode*/=hal::STORE_UNALIGNED) \ +{ vec_st_interleave(a.val, b.val, c.val, d.val, ptr); } + +OPENCV_HAL_IMPL_VSX_INTERLEAVE(uchar, v_uint8x16) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(schar, v_int8x16) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(ushort, v_uint16x8) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(short, v_int16x8) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint, v_uint32x4) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(int, v_int32x4) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(float, v_float32x4) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(double, v_float64x2) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(int64, v_int64x2) +OPENCV_HAL_IMPL_VSX_INTERLEAVE(uint64, v_uint64x2) + +/* Expand */ +#define OPENCV_HAL_IMPL_VSX_EXPAND(_Tpvec, _Tpwvec, _Tp, fl, fh) \ +inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ +{ \ + b0.val = fh(a.val); \ + b1.val = fl(a.val); \ +} \ +inline _Tpwvec v_expand_low(const _Tpvec& a) \ +{ return _Tpwvec(fh(a.val)); } \ +inline _Tpwvec v_expand_high(const _Tpvec& a) \ +{ return _Tpwvec(fl(a.val)); } \ +inline _Tpwvec v_load_expand(const _Tp* ptr) \ +{ return _Tpwvec(fh(vec_ld_l8(ptr))); } + +OPENCV_HAL_IMPL_VSX_EXPAND(v_uint8x16, v_uint16x8, uchar, vec_unpacklu, vec_unpackhu) +OPENCV_HAL_IMPL_VSX_EXPAND(v_int8x16, v_int16x8, schar, vec_unpackl, vec_unpackh) +OPENCV_HAL_IMPL_VSX_EXPAND(v_uint16x8, v_uint32x4, ushort, vec_unpacklu, vec_unpackhu) +OPENCV_HAL_IMPL_VSX_EXPAND(v_int16x8, v_int32x4, short, vec_unpackl, vec_unpackh) +OPENCV_HAL_IMPL_VSX_EXPAND(v_uint32x4, v_uint64x2, uint, vec_unpacklu, vec_unpackhu) +OPENCV_HAL_IMPL_VSX_EXPAND(v_int32x4, v_int64x2, int, vec_unpackl, vec_unpackh) + +/* Load and zero expand a 4 byte value into the second dword, first is don't care. */ +#if !defined(CV_COMPILER_VSX_BROKEN_ASM) + #define _LXSIWZX(out, ptr, T) __asm__ ("lxsiwzx %x0, 0, %1\r\n" : "=wa"(out) : "r" (ptr) : "memory"); +#else + /* This is compiler-agnostic, but will introduce an unneeded splat on the critical path. */ + #define _LXSIWZX(out, ptr, T) out = (T)vec_udword2_sp(*(uint32_t*)(ptr)); +#endif + +inline v_uint32x4 v_load_expand_q(const uchar* ptr) +{ + // Zero-extend the extra 24B instead of unpacking. Usually faster in small kernel + // Likewise note, value is zero extended and upper 4 bytes are zero'ed. + vec_uchar16 pmu = {8, 12, 12, 12, 9, 12, 12, 12, 10, 12, 12, 12, 11, 12, 12, 12}; + vec_uchar16 out; + + _LXSIWZX(out, ptr, vec_uchar16); + out = vec_perm(out, out, pmu); + return v_uint32x4((vec_uint4)out); +} + +inline v_int32x4 v_load_expand_q(const schar* ptr) +{ + vec_char16 out; + vec_short8 outs; + vec_int4 outw; + + _LXSIWZX(out, ptr, vec_char16); + outs = vec_unpackl(out); + outw = vec_unpackh(outs); + return v_int32x4(outw); +} + +/* pack */ +#define OPENCV_HAL_IMPL_VSX_PACK(_Tpvec, _Tp, _Tpwvec, _Tpvn, _Tpdel, sfnc, pkfnc, addfnc, pack) \ +inline _Tpvec v_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + return _Tpvec(pkfnc(a.val, b.val)); \ +} \ +inline void v_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + vec_st_l8(pkfnc(a.val, a.val), ptr); \ +} \ +template \ +inline _Tpvec v_rshr_##pack(const _Tpwvec& a, const _Tpwvec& b) \ +{ \ + const __vector _Tpvn vn = vec_splats((_Tpvn)n); \ + const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \ + return _Tpvec(pkfnc(sfnc(addfnc(a.val, delta), vn), sfnc(addfnc(b.val, delta), vn))); \ +} \ +template \ +inline void v_rshr_##pack##_store(_Tp* ptr, const _Tpwvec& a) \ +{ \ + const __vector _Tpvn vn = vec_splats((_Tpvn)n); \ + const __vector _Tpdel delta = vec_splats((_Tpdel)((_Tpdel)1 << (n-1))); \ + vec_st_l8(pkfnc(sfnc(addfnc(a.val, delta), vn), delta), ptr); \ +} + +OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_uint16x8, unsigned short, unsigned short, + vec_sr, vec_packs, vec_adds, pack) +OPENCV_HAL_IMPL_VSX_PACK(v_int8x16, schar, v_int16x8, unsigned short, short, + vec_sra, vec_packs, vec_adds, pack) + +OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_uint32x4, unsigned int, unsigned int, + vec_sr, vec_packs, vec_add, pack) +OPENCV_HAL_IMPL_VSX_PACK(v_int16x8, short, v_int32x4, unsigned int, int, + vec_sra, vec_packs, vec_add, pack) + +OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_uint64x2, unsigned long long, unsigned long long, + vec_sr, vec_pack, vec_add, pack) +OPENCV_HAL_IMPL_VSX_PACK(v_int32x4, int, v_int64x2, unsigned long long, long long, + vec_sra, vec_pack, vec_add, pack) + +OPENCV_HAL_IMPL_VSX_PACK(v_uint8x16, uchar, v_int16x8, unsigned short, short, + vec_sra, vec_packsu, vec_adds, pack_u) +OPENCV_HAL_IMPL_VSX_PACK(v_uint16x8, ushort, v_int32x4, unsigned int, int, + vec_sra, vec_packsu, vec_add, pack_u) +// Following variant is not implemented on other platforms: +//OPENCV_HAL_IMPL_VSX_PACK(v_uint32x4, uint, v_int64x2, unsigned long long, long long, +// vec_sra, vec_packsu, vec_add, pack_u) + +// pack boolean +inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) +{ + vec_uchar16 ab = vec_pack(a.val, b.val); + return v_uint8x16(ab); +} + +inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d) +{ + vec_ushort8 ab = vec_pack(a.val, b.val); + vec_ushort8 cd = vec_pack(c.val, d.val); + return v_uint8x16(vec_pack(ab, cd)); +} + +inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, + const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, + const v_uint64x2& g, const v_uint64x2& h) +{ + vec_uint4 ab = vec_pack(a.val, b.val); + vec_uint4 cd = vec_pack(c.val, d.val); + vec_uint4 ef = vec_pack(e.val, f.val); + vec_uint4 gh = vec_pack(g.val, h.val); + + vec_ushort8 abcd = vec_pack(ab, cd); + vec_ushort8 efgh = vec_pack(ef, gh); + return v_uint8x16(vec_pack(abcd, efgh)); +} + +/* Recombine */ +template +inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) +{ + b0.val = vec_mergeh(a0.val, a1.val); + b1.val = vec_mergel(a0.val, a1.val); +} + +template +inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) +{ return _Tpvec(vec_mergesql(a.val, b.val)); } + +template +inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) +{ return _Tpvec(vec_mergesqh(a.val, b.val)); } + +template +inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) +{ + c.val = vec_mergesqh(a.val, b.val); + d.val = vec_mergesql(a.val, b.val); +} + +////////// Arithmetic, bitwise and comparison operations ///////// + +/* Element-wise binary and unary operations */ +/** Arithmetics **/ +#define OPENCV_HAL_IMPL_VSX_BIN_OP(bin_op, _Tpvec, intrin) \ +inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(intrin(a.val, b.val)); } \ +inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ +{ a.val = intrin(a.val, b.val); return a; } + +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint8x16, vec_adds) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint8x16, vec_subs) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int8x16, vec_adds) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int8x16, vec_subs) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint16x8, vec_adds) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint16x8, vec_subs) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int16x8, vec_adds) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int16x8, vec_subs) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint32x4, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint32x4, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_uint32x4, vec_mul) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int32x4, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int32x4, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_int32x4, vec_mul) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float32x4, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float32x4, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float32x4, vec_mul) +OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float32x4, vec_div) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_float64x2, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_float64x2, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_OP(*, v_float64x2, vec_mul) +OPENCV_HAL_IMPL_VSX_BIN_OP(/, v_float64x2, vec_div) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_uint64x2, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_uint64x2, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_OP(+, v_int64x2, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_OP(-, v_int64x2, vec_sub) + +// saturating multiply +#define OPENCV_HAL_IMPL_VSX_MUL_SAT(_Tpvec, _Tpwvec) \ + inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b) \ + { \ + _Tpwvec c, d; \ + v_mul_expand(a, b, c, d); \ + return v_pack(c, d); \ + } \ + inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b) \ + { a = a * b; return a; } + +OPENCV_HAL_IMPL_VSX_MUL_SAT(v_int8x16, v_int16x8) +OPENCV_HAL_IMPL_VSX_MUL_SAT(v_uint8x16, v_uint16x8) +OPENCV_HAL_IMPL_VSX_MUL_SAT(v_int16x8, v_int32x4) +OPENCV_HAL_IMPL_VSX_MUL_SAT(v_uint16x8, v_uint32x4) + +template +inline void v_mul_expand(const Tvec& a, const Tvec& b, Twvec& c, Twvec& d) +{ + Twvec p0 = Twvec(vec_mule(a.val, b.val)); + Twvec p1 = Twvec(vec_mulo(a.val, b.val)); + v_zip(p0, p1, c, d); +} + +inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) +{ + vec_int4 p0 = vec_mule(a.val, b.val); + vec_int4 p1 = vec_mulo(a.val, b.val); + static const vec_uchar16 perm = {2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}; + return v_int16x8(vec_perm(vec_short8_c(p0), vec_short8_c(p1), perm)); +} +inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) +{ + vec_uint4 p0 = vec_mule(a.val, b.val); + vec_uint4 p1 = vec_mulo(a.val, b.val); + static const vec_uchar16 perm = {2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}; + return v_uint16x8(vec_perm(vec_ushort8_c(p0), vec_ushort8_c(p1), perm)); +} + +/** Non-saturating arithmetics **/ +#define OPENCV_HAL_IMPL_VSX_BIN_FUNC(func, intrin) \ +template \ +inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(intrin(a.val, b.val)); } + +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_add_wrap, vec_add) +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_sub_wrap, vec_sub) +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_mul_wrap, vec_mul) + +/** Bitwise shifts **/ +#define OPENCV_HAL_IMPL_VSX_SHIFT_OP(_Tpvec, shr, splfunc) \ +inline _Tpvec operator << (const _Tpvec& a, int imm) \ +{ return _Tpvec(vec_sl(a.val, splfunc(imm))); } \ +inline _Tpvec operator >> (const _Tpvec& a, int imm) \ +{ return _Tpvec(shr(a.val, splfunc(imm))); } \ +template inline _Tpvec v_shl(const _Tpvec& a) \ +{ return _Tpvec(vec_sl(a.val, splfunc(imm))); } \ +template inline _Tpvec v_shr(const _Tpvec& a) \ +{ return _Tpvec(shr(a.val, splfunc(imm))); } + +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint8x16, vec_sr, vec_uchar16_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint16x8, vec_sr, vec_ushort8_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint32x4, vec_sr, vec_uint4_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_uint64x2, vec_sr, vec_udword2_sp) +// algebraic right shift +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int8x16, vec_sra, vec_uchar16_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int16x8, vec_sra, vec_ushort8_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int32x4, vec_sra, vec_uint4_sp) +OPENCV_HAL_IMPL_VSX_SHIFT_OP(v_int64x2, vec_sra, vec_udword2_sp) + +/** Bitwise logic **/ +#define OPENCV_HAL_IMPL_VSX_LOGIC_OP(_Tpvec) \ +OPENCV_HAL_IMPL_VSX_BIN_OP(&, _Tpvec, vec_and) \ +OPENCV_HAL_IMPL_VSX_BIN_OP(|, _Tpvec, vec_or) \ +OPENCV_HAL_IMPL_VSX_BIN_OP(^, _Tpvec, vec_xor) \ +inline _Tpvec operator ~ (const _Tpvec& a) \ +{ return _Tpvec(vec_not(a.val)); } + +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint8x16) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int8x16) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint16x8) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int16x8) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint32x4) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int32x4) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_uint64x2) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_int64x2) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float32x4) +OPENCV_HAL_IMPL_VSX_LOGIC_OP(v_float64x2) + +/** Bitwise select **/ +#define OPENCV_HAL_IMPL_VSX_SELECT(_Tpvec, cast) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_sel(b.val, a.val, cast(mask.val))); } + +OPENCV_HAL_IMPL_VSX_SELECT(v_uint8x16, vec_bchar16_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_int8x16, vec_bchar16_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_uint16x8, vec_bshort8_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_int16x8, vec_bshort8_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_uint32x4, vec_bint4_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_int32x4, vec_bint4_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_float32x4, vec_bint4_c) +OPENCV_HAL_IMPL_VSX_SELECT(v_float64x2, vec_bdword2_c) + +/** Comparison **/ +#define OPENCV_HAL_IMPL_VSX_INT_CMP_OP(_Tpvec) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmpeq(a.val, b.val)); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmpne(a.val, b.val)); } \ +inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmplt(a.val, b.val)); } \ +inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmpgt(a.val, b.val)); } \ +inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmple(a.val, b.val)); } \ +inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_cmpge(a.val, b.val)); } + +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint8x16) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int8x16) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint16x8) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int16x8) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint32x4) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int32x4) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float32x4) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_float64x2) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_uint64x2) +OPENCV_HAL_IMPL_VSX_INT_CMP_OP(v_int64x2) + +inline v_float32x4 v_not_nan(const v_float32x4& a) +{ return v_float32x4(vec_cmpeq(a.val, a.val)); } +inline v_float64x2 v_not_nan(const v_float64x2& a) +{ return v_float64x2(vec_cmpeq(a.val, a.val)); } + +/** min/max **/ +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_min, vec_min) +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_max, vec_max) + +/** Rotate **/ +#define OPENCV_IMPL_VSX_ROTATE(_Tpvec, suffix, shf, cast) \ +template \ +inline _Tpvec v_rotate_##suffix(const _Tpvec& a) \ +{ \ + const int wd = imm * sizeof(typename _Tpvec::lane_type); \ + if (wd > 15) \ + return _Tpvec::zero(); \ + return _Tpvec((cast)shf(vec_uchar16_c(a.val), vec_uchar16_sp(wd << 3))); \ +} + +#define OPENCV_IMPL_VSX_ROTATE_LR(_Tpvec, cast) \ +OPENCV_IMPL_VSX_ROTATE(_Tpvec, left, vec_slo, cast) \ +OPENCV_IMPL_VSX_ROTATE(_Tpvec, right, vec_sro, cast) + +OPENCV_IMPL_VSX_ROTATE_LR(v_uint8x16, vec_uchar16) +OPENCV_IMPL_VSX_ROTATE_LR(v_int8x16, vec_char16) +OPENCV_IMPL_VSX_ROTATE_LR(v_uint16x8, vec_ushort8) +OPENCV_IMPL_VSX_ROTATE_LR(v_int16x8, vec_short8) +OPENCV_IMPL_VSX_ROTATE_LR(v_uint32x4, vec_uint4) +OPENCV_IMPL_VSX_ROTATE_LR(v_int32x4, vec_int4) +OPENCV_IMPL_VSX_ROTATE_LR(v_float32x4, vec_float4) +OPENCV_IMPL_VSX_ROTATE_LR(v_uint64x2, vec_udword2) +OPENCV_IMPL_VSX_ROTATE_LR(v_int64x2, vec_dword2) +OPENCV_IMPL_VSX_ROTATE_LR(v_float64x2, vec_double2) + +template +inline _Tpvec v_rotate_right(const _Tpvec& a, const _Tpvec& b) +{ + enum { CV_SHIFT = 16 - imm * (sizeof(typename _Tpvec::lane_type)) }; + if (CV_SHIFT == 16) + return a; +#ifdef __IBMCPP__ + return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT & 15)); +#else + return _Tpvec(vec_sld(b.val, a.val, CV_SHIFT)); +#endif +} + +template +inline _Tpvec v_rotate_left(const _Tpvec& a, const _Tpvec& b) +{ + enum { CV_SHIFT = imm * (sizeof(typename _Tpvec::lane_type)) }; + if (CV_SHIFT == 16) + return b; + return _Tpvec(vec_sld(a.val, b.val, CV_SHIFT)); +} + +#define OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, suffix, rg1, rg2) \ +template \ +inline _Tpvec v_rotate_##suffix(const _Tpvec& a, const _Tpvec& b) \ +{ \ + if (imm == 1) \ + return _Tpvec(vec_permi(rg1.val, rg2.val, 2)); \ + return imm ? b : a; \ +} + +#define OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(_Tpvec) \ +OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, left, b, a) \ +OPENCV_IMPL_VSX_ROTATE_64_2RG(_Tpvec, right, a, b) + +OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_float64x2) +OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_uint64x2) +OPENCV_IMPL_VSX_ROTATE_64_2RG_LR(v_int64x2) + +/* Reverse */ +inline v_uint8x16 v_reverse(const v_uint8x16 &a) +{ + static const vec_uchar16 perm = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + vec_uchar16 vec = (vec_uchar16)a.val; + return v_uint8x16(vec_perm(vec, vec, perm)); +} + +inline v_int8x16 v_reverse(const v_int8x16 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x8 v_reverse(const v_uint16x8 &a) +{ + static const vec_uchar16 perm = {14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1}; + vec_uchar16 vec = (vec_uchar16)a.val; + return v_reinterpret_as_u16(v_uint8x16(vec_perm(vec, vec, perm))); +} + +inline v_int16x8 v_reverse(const v_int16x8 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x4 v_reverse(const v_uint32x4 &a) +{ + static const vec_uchar16 perm = {12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3}; + vec_uchar16 vec = (vec_uchar16)a.val; + return v_reinterpret_as_u32(v_uint8x16(vec_perm(vec, vec, perm))); +} + +inline v_int32x4 v_reverse(const v_int32x4 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x4 v_reverse(const v_float32x4 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x2 v_reverse(const v_uint64x2 &a) +{ + static const vec_uchar16 perm = {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}; + vec_uchar16 vec = (vec_uchar16)a.val; + return v_reinterpret_as_u64(v_uint8x16(vec_perm(vec, vec, perm))); +} + +inline v_int64x2 v_reverse(const v_int64x2 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x2 v_reverse(const v_float64x2 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + +/* Extract */ +template +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) +{ return v_rotate_right(a, b); } + +////////// Reduce and mask ///////// + +/** Reduce **/ +inline uint v_reduce_sum(const v_uint8x16& a) +{ + const vec_uint4 zero4 = vec_uint4_z; + vec_uint4 sum4 = vec_sum4s(a.val, zero4); + return (uint)vec_extract(vec_sums(vec_int4_c(sum4), vec_int4_c(zero4)), 3); +} +inline int v_reduce_sum(const v_int8x16& a) +{ + const vec_int4 zero4 = vec_int4_z; + vec_int4 sum4 = vec_sum4s(a.val, zero4); + return (int)vec_extract(vec_sums(sum4, zero4), 3); +} +inline int v_reduce_sum(const v_int16x8& a) +{ + const vec_int4 zero = vec_int4_z; + return saturate_cast(vec_extract(vec_sums(vec_sum4s(a.val, zero), zero), 3)); +} +inline uint v_reduce_sum(const v_uint16x8& a) +{ + const vec_int4 v4 = vec_int4_c(vec_unpackhu(vec_adds(a.val, vec_sld(a.val, a.val, 8)))); + return saturate_cast(vec_extract(vec_sums(v4, vec_int4_z), 3)); +} + +#define OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(_Tpvec, _Tpvec2, scalartype, suffix, func) \ +inline scalartype v_reduce_##suffix(const _Tpvec& a) \ +{ \ + const _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \ + return vec_extract(func(rs, vec_sld(rs, rs, 4)), 0); \ +} +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, sum, vec_add) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_uint32x4, vec_uint4, uint, min, vec_min) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, sum, vec_add) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_int32x4, vec_int4, int, min, vec_min) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, sum, vec_add) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_4(v_float32x4, vec_float4, float, min, vec_min) + +inline uint64 v_reduce_sum(const v_uint64x2& a) +{ + return vec_extract(vec_add(a.val, vec_permi(a.val, a.val, 3)), 0); +} +inline int64 v_reduce_sum(const v_int64x2& a) +{ + return vec_extract(vec_add(a.val, vec_permi(a.val, a.val, 3)), 0); +} +inline double v_reduce_sum(const v_float64x2& a) +{ + return vec_extract(vec_add(a.val, vec_permi(a.val, a.val, 3)), 0); +} + +#define OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(_Tpvec, _Tpvec2, scalartype, suffix, func) \ +inline scalartype v_reduce_##suffix(const _Tpvec& a) \ +{ \ + _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \ + rs = func(rs, vec_sld(rs, rs, 4)); \ + return vec_extract(func(rs, vec_sld(rs, rs, 2)), 0); \ +} +OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_uint16x8, vec_ushort8, ushort, min, vec_min) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_8(v_int16x8, vec_short8, short, min, vec_min) + +#define OPENCV_HAL_IMPL_VSX_REDUCE_OP_16(_Tpvec, _Tpvec2, scalartype, suffix, func) \ +inline scalartype v_reduce_##suffix(const _Tpvec& a) \ +{ \ + _Tpvec2 rs = func(a.val, vec_sld(a.val, a.val, 8)); \ + rs = func(rs, vec_sld(rs, rs, 4)); \ + rs = func(rs, vec_sld(rs, rs, 2)); \ + return vec_extract(func(rs, vec_sld(rs, rs, 1)), 0); \ +} +OPENCV_HAL_IMPL_VSX_REDUCE_OP_16(v_uint8x16, vec_uchar16, uchar, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_16(v_uint8x16, vec_uchar16, uchar, min, vec_min) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_16(v_int8x16, vec_char16, schar, max, vec_max) +OPENCV_HAL_IMPL_VSX_REDUCE_OP_16(v_int8x16, vec_char16, schar, min, vec_min) + +inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d) +{ + vec_float4 ac = vec_add(vec_mergel(a.val, c.val), vec_mergeh(a.val, c.val)); + ac = vec_add(ac, vec_sld(ac, ac, 8)); + + vec_float4 bd = vec_add(vec_mergel(b.val, d.val), vec_mergeh(b.val, d.val)); + bd = vec_add(bd, vec_sld(bd, bd, 8)); + return v_float32x4(vec_mergeh(ac, bd)); +} + +inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b) +{ + const vec_uint4 zero4 = vec_uint4_z; + vec_uint4 sum4 = vec_sum4s(vec_absd(a.val, b.val), zero4); + return (unsigned)vec_extract(vec_sums(vec_int4_c(sum4), vec_int4_c(zero4)), 3); +} +inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b) +{ + const vec_int4 zero4 = vec_int4_z; + vec_char16 ad = vec_abss(vec_subs(a.val, b.val)); + vec_int4 sum4 = vec_sum4s(ad, zero4); + return (unsigned)vec_extract(vec_sums(sum4, zero4), 3); +} +inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b) +{ + vec_ushort8 ad = vec_absd(a.val, b.val); + VSX_UNUSED(vec_int4) sum = vec_sums(vec_int4_c(vec_unpackhu(ad)) + vec_int4_c(vec_unpacklu(ad)), vec_int4_z); + return (unsigned)vec_extract(sum, 3); +} +inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b) +{ + const vec_int4 zero4 = vec_int4_z; + vec_short8 ad = vec_abss(vec_subs(a.val, b.val)); + vec_int4 sum4 = vec_sum4s(ad, zero4); + return (unsigned)vec_extract(vec_sums(sum4, zero4), 3); +} +inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b) +{ + const vec_uint4 ad = vec_absd(a.val, b.val); + const vec_uint4 rd = vec_add(ad, vec_sld(ad, ad, 8)); + return vec_extract(vec_add(rd, vec_sld(rd, rd, 4)), 0); +} +inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b) +{ + vec_int4 ad = vec_abss(vec_sub(a.val, b.val)); + return (unsigned)vec_extract(vec_sums(ad, vec_int4_z), 3); +} +inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b) +{ + const vec_float4 ad = vec_abs(vec_sub(a.val, b.val)); + const vec_float4 rd = vec_add(ad, vec_sld(ad, ad, 8)); + return vec_extract(vec_add(rd, vec_sld(rd, rd, 4)), 0); +} + +/** Popcount **/ +inline v_uint8x16 v_popcount(const v_uint8x16& a) +{ return v_uint8x16(vec_popcntu(a.val)); } +inline v_uint8x16 v_popcount(const v_int8x16& a) +{ return v_uint8x16(vec_popcntu(a.val)); } +inline v_uint16x8 v_popcount(const v_uint16x8& a) +{ return v_uint16x8(vec_popcntu(a.val)); } +inline v_uint16x8 v_popcount(const v_int16x8& a) +{ return v_uint16x8(vec_popcntu(a.val)); } +inline v_uint32x4 v_popcount(const v_uint32x4& a) +{ return v_uint32x4(vec_popcntu(a.val)); } +inline v_uint32x4 v_popcount(const v_int32x4& a) +{ return v_uint32x4(vec_popcntu(a.val)); } +inline v_uint64x2 v_popcount(const v_uint64x2& a) +{ return v_uint64x2(vec_popcntu(a.val)); } +inline v_uint64x2 v_popcount(const v_int64x2& a) +{ return v_uint64x2(vec_popcntu(a.val)); } + +/** Mask **/ +inline int v_signmask(const v_uint8x16& a) +{ + static const vec_uchar16 qperm = {120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0}; + return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2); +} +inline int v_signmask(const v_int8x16& a) +{ return v_signmask(v_reinterpret_as_u8(a)); } + +inline int v_signmask(const v_int16x8& a) +{ + static const vec_uchar16 qperm = {112, 96, 80, 64, 48, 32, 16, 0, 128, 128, 128, 128, 128, 128, 128, 128}; + return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2); +} +inline int v_signmask(const v_uint16x8& a) +{ return v_signmask(v_reinterpret_as_s16(a)); } + +inline int v_signmask(const v_int32x4& a) +{ + static const vec_uchar16 qperm = {96, 64, 32, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; + return vec_extract((vec_int4)vec_vbpermq(v_reinterpret_as_u8(a).val, qperm), 2); +} +inline int v_signmask(const v_uint32x4& a) +{ return v_signmask(v_reinterpret_as_s32(a)); } +inline int v_signmask(const v_float32x4& a) +{ return v_signmask(v_reinterpret_as_s32(a)); } + +inline int v_signmask(const v_int64x2& a) +{ + VSX_UNUSED(const vec_dword2) sv = vec_sr(a.val, vec_udword2_sp(63)); + return (int)vec_extract(sv, 0) | (int)vec_extract(sv, 1) << 1; +} +inline int v_signmask(const v_uint64x2& a) +{ return v_signmask(v_reinterpret_as_s64(a)); } +inline int v_signmask(const v_float64x2& a) +{ return v_signmask(v_reinterpret_as_s64(a)); } + +inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(a)); } +inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(a)); } + +template +inline bool v_check_all(const _Tpvec& a) +{ return vec_all_lt(a.val, _Tpvec::zero().val); } +inline bool v_check_all(const v_uint8x16& a) +{ return v_check_all(v_reinterpret_as_s8(a)); } +inline bool v_check_all(const v_uint16x8& a) +{ return v_check_all(v_reinterpret_as_s16(a)); } +inline bool v_check_all(const v_uint32x4& a) +{ return v_check_all(v_reinterpret_as_s32(a)); } +inline bool v_check_all(const v_uint64x2& a) +{ return v_check_all(v_reinterpret_as_s64(a)); } +inline bool v_check_all(const v_float32x4& a) +{ return v_check_all(v_reinterpret_as_s32(a)); } +inline bool v_check_all(const v_float64x2& a) +{ return v_check_all(v_reinterpret_as_s64(a)); } + +template +inline bool v_check_any(const _Tpvec& a) +{ return vec_any_lt(a.val, _Tpvec::zero().val); } +inline bool v_check_any(const v_uint8x16& a) +{ return v_check_any(v_reinterpret_as_s8(a)); } +inline bool v_check_any(const v_uint16x8& a) +{ return v_check_any(v_reinterpret_as_s16(a)); } +inline bool v_check_any(const v_uint32x4& a) +{ return v_check_any(v_reinterpret_as_s32(a)); } +inline bool v_check_any(const v_uint64x2& a) +{ return v_check_any(v_reinterpret_as_s64(a)); } +inline bool v_check_any(const v_float32x4& a) +{ return v_check_any(v_reinterpret_as_s32(a)); } +inline bool v_check_any(const v_float64x2& a) +{ return v_check_any(v_reinterpret_as_s64(a)); } + +////////// Other math ///////// + +/** Some frequent operations **/ +inline v_float32x4 v_sqrt(const v_float32x4& x) +{ return v_float32x4(vec_sqrt(x.val)); } +inline v_float64x2 v_sqrt(const v_float64x2& x) +{ return v_float64x2(vec_sqrt(x.val)); } + +inline v_float32x4 v_invsqrt(const v_float32x4& x) +{ return v_float32x4(vec_rsqrt(x.val)); } +inline v_float64x2 v_invsqrt(const v_float64x2& x) +{ return v_float64x2(vec_rsqrt(x.val)); } + +#define OPENCV_HAL_IMPL_VSX_MULADD(_Tpvec) \ +inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_sqrt(vec_madd(a.val, a.val, vec_mul(b.val, b.val)))); } \ +inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(vec_madd(a.val, a.val, vec_mul(b.val, b.val))); } \ +inline _Tpvec v_fma(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ +{ return _Tpvec(vec_madd(a.val, b.val, c.val)); } \ +inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ +{ return _Tpvec(vec_madd(a.val, b.val, c.val)); } + +OPENCV_HAL_IMPL_VSX_MULADD(v_float32x4) +OPENCV_HAL_IMPL_VSX_MULADD(v_float64x2) + +inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ return a * b + c; } + +// TODO: exp, log, sin, cos + +/** Absolute values **/ +inline v_uint8x16 v_abs(const v_int8x16& x) +{ return v_uint8x16(vec_uchar16_c(vec_abs(x.val))); } + +inline v_uint16x8 v_abs(const v_int16x8& x) +{ return v_uint16x8(vec_ushort8_c(vec_abs(x.val))); } + +inline v_uint32x4 v_abs(const v_int32x4& x) +{ return v_uint32x4(vec_uint4_c(vec_abs(x.val))); } + +inline v_float32x4 v_abs(const v_float32x4& x) +{ return v_float32x4(vec_abs(x.val)); } + +inline v_float64x2 v_abs(const v_float64x2& x) +{ return v_float64x2(vec_abs(x.val)); } + +/** Absolute difference **/ +// unsigned +OPENCV_HAL_IMPL_VSX_BIN_FUNC(v_absdiff, vec_absd) + +inline v_uint8x16 v_absdiff(const v_int8x16& a, const v_int8x16& b) +{ return v_reinterpret_as_u8(v_sub_wrap(v_max(a, b), v_min(a, b))); } +inline v_uint16x8 v_absdiff(const v_int16x8& a, const v_int16x8& b) +{ return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))); } +inline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b) +{ return v_reinterpret_as_u32(v_max(a, b) - v_min(a, b)); } + +inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b) +{ return v_abs(a - b); } +inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b) +{ return v_abs(a - b); } + +/** Absolute difference for signed integers **/ +inline v_int8x16 v_absdiffs(const v_int8x16& a, const v_int8x16& b) +{ return v_int8x16(vec_abss(vec_subs(a.val, b.val))); } +inline v_int16x8 v_absdiffs(const v_int16x8& a, const v_int16x8& b) +{ return v_int16x8(vec_abss(vec_subs(a.val, b.val))); } + +////////// Conversions ///////// + +/** Rounding **/ +inline v_int32x4 v_round(const v_float32x4& a) +{ return v_int32x4(vec_cts(vec_rint(a.val))); } + +inline v_int32x4 v_round(const v_float64x2& a) +{ return v_int32x4(vec_mergesqo(vec_ctso(vec_rint(a.val)), vec_int4_z)); } + +inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) +{ return v_int32x4(vec_mergesqo(vec_ctso(vec_rint(a.val)), vec_ctso(vec_rint(b.val)))); } + +inline v_int32x4 v_floor(const v_float32x4& a) +{ return v_int32x4(vec_cts(vec_floor(a.val))); } + +inline v_int32x4 v_floor(const v_float64x2& a) +{ return v_int32x4(vec_mergesqo(vec_ctso(vec_floor(a.val)), vec_int4_z)); } + +inline v_int32x4 v_ceil(const v_float32x4& a) +{ return v_int32x4(vec_cts(vec_ceil(a.val))); } + +inline v_int32x4 v_ceil(const v_float64x2& a) +{ return v_int32x4(vec_mergesqo(vec_ctso(vec_ceil(a.val)), vec_int4_z)); } + +inline v_int32x4 v_trunc(const v_float32x4& a) +{ return v_int32x4(vec_cts(a.val)); } + +inline v_int32x4 v_trunc(const v_float64x2& a) +{ return v_int32x4(vec_mergesqo(vec_ctso(a.val), vec_int4_z)); } + +/** To float **/ +inline v_float32x4 v_cvt_f32(const v_int32x4& a) +{ return v_float32x4(vec_ctf(a.val)); } + +inline v_float32x4 v_cvt_f32(const v_float64x2& a) +{ return v_float32x4(vec_mergesqo(vec_cvfo(a.val), vec_float4_z)); } + +inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) +{ return v_float32x4(vec_mergesqo(vec_cvfo(a.val), vec_cvfo(b.val))); } + +inline v_float64x2 v_cvt_f64(const v_int32x4& a) +{ return v_float64x2(vec_ctdo(vec_mergeh(a.val, a.val))); } + +inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) +{ return v_float64x2(vec_ctdo(vec_mergel(a.val, a.val))); } + +inline v_float64x2 v_cvt_f64(const v_float32x4& a) +{ return v_float64x2(vec_cvfo(vec_mergeh(a.val, a.val))); } + +inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) +{ return v_float64x2(vec_cvfo(vec_mergel(a.val, a.val))); } + +inline v_float64x2 v_cvt_f64(const v_int64x2& a) +{ return v_float64x2(vec_ctd(a.val)); } + +////////////// Lookup table access //////////////////// + +inline v_int8x16 v_lut(const schar* tab, const int* idx) +{ + return v_int8x16(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]], + tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]]); +} +inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) +{ + return v_reinterpret_as_s8(v_int16x8(*(const short*)(tab+idx[0]), *(const short*)(tab+idx[1]), *(const short*)(tab+idx[2]), *(const short*)(tab+idx[3]), + *(const short*)(tab+idx[4]), *(const short*)(tab+idx[5]), *(const short*)(tab+idx[6]), *(const short*)(tab+idx[7]))); +} +inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) +{ + return v_reinterpret_as_s8(v_int32x4(*(const int*)(tab+idx[0]), *(const int*)(tab+idx[1]), *(const int*)(tab+idx[2]), *(const int*)(tab+idx[3]))); +} +inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((const schar*)tab, idx)); } +inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((const schar*)tab, idx)); } +inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((const schar*)tab, idx)); } + +inline v_int16x8 v_lut(const short* tab, const int* idx) +{ + return v_int16x8(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]]); +} +inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) +{ + return v_reinterpret_as_s16(v_int32x4(*(const int*)(tab + idx[0]), *(const int*)(tab + idx[1]), *(const int*)(tab + idx[2]), *(const int*)(tab + idx[3]))); +} +inline v_int16x8 v_lut_quads(const short* tab, const int* idx) +{ + return v_reinterpret_as_s16(v_int64x2(*(const int64*)(tab + idx[0]), *(const int64*)(tab + idx[1]))); +} +inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((const short*)tab, idx)); } +inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((const short*)tab, idx)); } +inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((const short*)tab, idx)); } + +inline v_int32x4 v_lut(const int* tab, const int* idx) +{ + return v_int32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} +inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) +{ + return v_reinterpret_as_s32(v_int64x2(*(const int64*)(tab + idx[0]), *(const int64*)(tab + idx[1]))); +} +inline v_int32x4 v_lut_quads(const int* tab, const int* idx) +{ + return v_int32x4(vsx_ld(0, tab + idx[0])); +} +inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((const int*)tab, idx)); } +inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((const int*)tab, idx)); } +inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((const int*)tab, idx)); } + +inline v_int64x2 v_lut(const int64_t* tab, const int* idx) +{ + return v_int64x2(tab[idx[0]], tab[idx[1]]); +} +inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) +{ + return v_int64x2(vsx_ld2(0, tab + idx[0])); +} +inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } +inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } + +inline v_float32x4 v_lut(const float* tab, const int* idx) +{ + return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} +inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_pairs((const int*)tab, idx)); } +inline v_float32x4 v_lut_quads(const float* tab, const int* idx) { return v_load(tab + *idx); } + +inline v_float64x2 v_lut(const double* tab, const int* idx) +{ + return v_float64x2(tab[idx[0]], tab[idx[1]]); +} +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) { return v_load(tab + *idx); } + +inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) +{ + const int idx[4] = { + vec_extract(idxvec.val, 0), + vec_extract(idxvec.val, 1), + vec_extract(idxvec.val, 2), + vec_extract(idxvec.val, 3) + }; + return v_int32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} + +inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) +{ + const int idx[4] = { + vec_extract(idxvec.val, 0), + vec_extract(idxvec.val, 1), + vec_extract(idxvec.val, 2), + vec_extract(idxvec.val, 3) + }; + return v_uint32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} + +inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) +{ + const int idx[4] = { + vec_extract(idxvec.val, 0), + vec_extract(idxvec.val, 1), + vec_extract(idxvec.val, 2), + vec_extract(idxvec.val, 3) + }; + return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} + +inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) +{ + const int idx[2] = { + vec_extract(idxvec.val, 0), + vec_extract(idxvec.val, 1) + }; + return v_float64x2(tab[idx[0]], tab[idx[1]]); +} + +inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) +{ + vec_float4 xy0 = vec_ld_l8(tab + vec_extract(idxvec.val, 0)); + vec_float4 xy1 = vec_ld_l8(tab + vec_extract(idxvec.val, 1)); + vec_float4 xy2 = vec_ld_l8(tab + vec_extract(idxvec.val, 2)); + vec_float4 xy3 = vec_ld_l8(tab + vec_extract(idxvec.val, 3)); + vec_float4 xy02 = vec_mergeh(xy0, xy2); // x0, x2, y0, y2 + vec_float4 xy13 = vec_mergeh(xy1, xy3); // x1, x3, y1, y3 + x.val = vec_mergeh(xy02, xy13); + y.val = vec_mergel(xy02, xy13); +} +inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) +{ + vec_double2 xy0 = vsx_ld(vec_extract(idxvec.val, 0), tab); + vec_double2 xy1 = vsx_ld(vec_extract(idxvec.val, 1), tab); + x.val = vec_mergeh(xy0, xy1); + y.val = vec_mergel(xy0, xy1); +} + +inline v_int8x16 v_interleave_pairs(const v_int8x16& vec) +{ + static const vec_uchar16 perm = {0, 2, 1, 3, 4, 6, 5, 7, 8, 10, 9, 11, 12, 14, 13, 15}; + return v_int8x16(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) +{ return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } + +inline v_int8x16 v_interleave_quads(const v_int8x16& vec) +{ + static const vec_uchar16 perm = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15}; + return v_int8x16(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) +{ return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) +{ + static const vec_uchar16 perm = {0,1, 4,5, 2,3, 6,7, 8,9, 12,13, 10,11, 14,15}; + return v_int16x8(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) +{ return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } + +inline v_int16x8 v_interleave_quads(const v_int16x8& vec) +{ + static const vec_uchar16 perm = {0,1, 8,9, 2,3, 10,11, 4,5, 12,13, 6,7, 14,15}; + return v_int16x8(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) +{ return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_interleave_pairs(const v_int32x4& vec) +{ + static const vec_uchar16 perm = {0,1,2,3, 8,9,10,11, 4,5,6,7, 12,13,14,15}; + return v_int32x4(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) +{ return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x4 v_interleave_pairs(const v_float32x4& vec) +{ return v_reinterpret_as_f32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } + +inline v_int8x16 v_pack_triplets(const v_int8x16& vec) +{ + static const vec_uchar16 perm = {0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 15, 15, 15}; + return v_int8x16(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) +{ return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_pack_triplets(const v_int16x8& vec) +{ + static const vec_uchar16 perm = {0,1, 2,3, 4,5, 8,9, 10,11, 12,13, 14,15, 14,15}; + return v_int16x8(vec_perm(vec.val, vec.val, perm)); +} +inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) +{ return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) +{ return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) +{ return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) +{ return vec; } + +/////// FP16 support //////// + +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ + vec_ushort8 vf16 = vec_ld_l8((const ushort*)ptr); +#if CV_VSX3 && defined(vec_extract_fp_from_shorth) + return v_float32x4(vec_extract_fp_from_shorth(vf16)); +#elif CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM) + vec_float4 vf32; + __asm__ __volatile__ ("xvcvhpsp %x0,%x1" : "=wa" (vf32) : "wa" (vec_mergeh(vf16, vf16))); + return v_float32x4(vf32); +#else + const vec_int4 z = vec_int4_z, delta = vec_int4_sp(0x38000000); + const vec_int4 signmask = vec_int4_sp(0x80000000); + const vec_int4 maxexp = vec_int4_sp(0x7c000000); + const vec_float4 deltaf = vec_float4_c(vec_int4_sp(0x38800000)); + + vec_int4 bits = vec_int4_c(vec_mergeh(vec_short8_c(z), vec_short8_c(vf16))); + vec_int4 e = vec_and(bits, maxexp), sign = vec_and(bits, signmask); + vec_int4 t = vec_add(vec_sr(vec_xor(bits, sign), vec_uint4_sp(3)), delta); // ((h & 0x7fff) << 13) + delta + vec_int4 zt = vec_int4_c(vec_sub(vec_float4_c(vec_add(t, vec_int4_sp(1 << 23))), deltaf)); + + t = vec_add(t, vec_and(delta, vec_cmpeq(maxexp, e))); + vec_bint4 zmask = vec_cmpeq(e, z); + vec_int4 ft = vec_sel(t, zt, zmask); + return v_float32x4(vec_float4_c(vec_or(ft, sign))); +#endif +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ +// fixme: Is there any builtin op or intrinsic that cover "xvcvsphp"? +#if CV_VSX3 && !defined(CV_COMPILER_VSX_BROKEN_ASM) + vec_ushort8 vf16; + __asm__ __volatile__ ("xvcvsphp %x0,%x1" : "=wa" (vf16) : "wa" (v.val)); + vec_st_l8(vec_mergesqe(vf16, vf16), ptr); +#else + const vec_int4 signmask = vec_int4_sp(0x80000000); + const vec_int4 rval = vec_int4_sp(0x3f000000); + + vec_int4 t = vec_int4_c(v.val); + vec_int4 sign = vec_sra(vec_and(t, signmask), vec_uint4_sp(16)); + t = vec_and(vec_nor(signmask, signmask), t); + + vec_bint4 finitemask = vec_cmpgt(vec_int4_sp(0x47800000), t); + vec_bint4 isnan = vec_cmpgt(t, vec_int4_sp(0x7f800000)); + vec_int4 naninf = vec_sel(vec_int4_sp(0x7c00), vec_int4_sp(0x7e00), isnan); + vec_bint4 tinymask = vec_cmpgt(vec_int4_sp(0x38800000), t); + vec_int4 tt = vec_int4_c(vec_add(vec_float4_c(t), vec_float4_c(rval))); + tt = vec_sub(tt, rval); + vec_int4 odd = vec_and(vec_sr(t, vec_uint4_sp(13)), vec_int4_sp(1)); + vec_int4 nt = vec_add(t, vec_int4_sp(0xc8000fff)); + nt = vec_sr(vec_add(nt, odd), vec_uint4_sp(13)); + t = vec_sel(nt, tt, tinymask); + t = vec_sel(naninf, t, finitemask); + t = vec_or(t, sign); + vec_st_l8(vec_packs(t, t), ptr); +#endif +} + +inline void v_cleanup() {} + + +/** Reinterpret **/ +/** its up there with load and store operations **/ + +////////// Matrix operations ///////// + +//////// Dot Product //////// +// 16 >> 32 +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) +{ return v_int32x4(vec_msum(a.val, b.val, vec_int4_z)); } +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_int32x4(vec_msum(a.val, b.val, c.val)); } + +// 32 >> 64 +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) +{ + vec_dword2 even = vec_mule(a.val, b.val); + vec_dword2 odd = vec_mulo(a.val, b.val); + return v_int64x2(vec_add(even, odd)); +} +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod(a, b) + c; } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_uint32x4(vec_msum(a.val, b.val, c.val)); } +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) +{ return v_uint32x4(vec_msum(a.val, b.val, vec_uint4_z)); } + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) +{ + const vec_ushort8 eight = vec_ushort8_sp(8); + vec_short8 a0 = vec_sra((vec_short8)vec_sld(a.val, a.val, 1), eight); // even + vec_short8 a1 = vec_sra((vec_short8)a.val, eight); // odd + vec_short8 b0 = vec_sra((vec_short8)vec_sld(b.val, b.val, 1), eight); + vec_short8 b1 = vec_sra((vec_short8)b.val, eight); + return v_int32x4(vec_msum(a0, b0, vec_msum(a1, b1, vec_int4_z))); +} + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ + const vec_ushort8 eight = vec_ushort8_sp(8); + vec_short8 a0 = vec_sra((vec_short8)vec_sld(a.val, a.val, 1), eight); // even + vec_short8 a1 = vec_sra((vec_short8)a.val, eight); // odd + vec_short8 b0 = vec_sra((vec_short8)vec_sld(b.val, b.val, 1), eight); + vec_short8 b1 = vec_sra((vec_short8)b.val, eight); + return v_int32x4(vec_msum(a0, b0, vec_msum(a1, b1, c.val))); +} + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) +{ + const vec_uint4 zero = vec_uint4_z; + vec_uint4 even = vec_mule(a.val, b.val); + vec_uint4 odd = vec_mulo(a.val, b.val); + vec_udword2 e0 = (vec_udword2)vec_mergee(even, zero); + vec_udword2 e1 = (vec_udword2)vec_mergeo(even, zero); + vec_udword2 o0 = (vec_udword2)vec_mergee(odd, zero); + vec_udword2 o1 = (vec_udword2)vec_mergeo(odd, zero); + vec_udword2 s0 = vec_add(e0, o0); + vec_udword2 s1 = vec_add(e1, o1); + return v_uint64x2(vec_add(s0, s1)); +} +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) +{ + v_int32x4 prod = v_dotprod(a, b); + v_int64x2 c, d; + v_expand(prod, c, d); + return v_int64x2(vec_add(vec_mergeh(c.val, d.val), vec_mergel(c.val, d.val))); +} +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod(a, b); } +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_int32x4(vec_msum(a.val, b.val, vec_int4_z)) + c; } +// 32 >> 64 +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod(a, b); } +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod(a, b, c); } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) +{ return v_dotprod_expand(a, b); } +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_uint32x4(vec_msum(a.val, b.val, vec_uint4_z)) + c; } + +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) +{ + vec_short8 a0 = vec_unpackh(a.val); + vec_short8 a1 = vec_unpackl(a.val); + vec_short8 b0 = vec_unpackh(b.val); + vec_short8 b1 = vec_unpackl(b.val); + return v_int32x4(vec_msum(a0, b0, vec_msum(a1, b1, vec_int4_z))); +} +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) +{ return v_dotprod_expand(a, b); } +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b, c); } + +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) +{ + v_int32x4 prod = v_dotprod(a, b); + v_int64x2 c, d; + v_expand(prod, c, d); + return c + d; +} +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand_fast(a, b) + c; } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod_expand(a, b); } +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b, c); } + +inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& m3) +{ + const vec_float4 v0 = vec_splat(v.val, 0); + const vec_float4 v1 = vec_splat(v.val, 1); + const vec_float4 v2 = vec_splat(v.val, 2); + VSX_UNUSED(const vec_float4) v3 = vec_splat(v.val, 3); + return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, vec_mul(v3, m3.val))))); +} + +inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& a) +{ + const vec_float4 v0 = vec_splat(v.val, 0); + const vec_float4 v1 = vec_splat(v.val, 1); + const vec_float4 v2 = vec_splat(v.val, 2); + return v_float32x4(vec_madd(v0, m0.val, vec_madd(v1, m1.val, vec_madd(v2, m2.val, a.val)))); +} + +#define OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(_Tpvec, _Tpvec2) \ +inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, _Tpvec& b2, _Tpvec& b3) \ +{ \ + _Tpvec2 a02 = vec_mergeh(a0.val, a2.val); \ + _Tpvec2 a13 = vec_mergeh(a1.val, a3.val); \ + b0.val = vec_mergeh(a02, a13); \ + b1.val = vec_mergel(a02, a13); \ + a02 = vec_mergel(a0.val, a2.val); \ + a13 = vec_mergel(a1.val, a3.val); \ + b2.val = vec_mergeh(a02, a13); \ + b3.val = vec_mergel(a02, a13); \ +} +OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_uint32x4, vec_uint4) +OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_int32x4, vec_int4) +OPENCV_HAL_IMPL_VSX_TRANSPOSE4x4(v_float32x4, vec_float4) + +template +inline Tvec v_broadcast_element(const Tvec& v) +{ return Tvec(vec_splat(v.val, i)); } + + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} + +#endif // OPENCV_HAL_VSX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_wasm.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_wasm.hpp new file mode 100644 index 00000000..b4178af8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/intrin_wasm.hpp @@ -0,0 +1,2782 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_HAL_INTRIN_WASM_HPP +#define OPENCV_HAL_INTRIN_WASM_HPP + +#include +#include +#include +#include "opencv2/core/saturate.hpp" + +#define CV_SIMD128 1 +#define CV_SIMD128_64F 0 // Now all implementation of f64 use fallback, so disable it. +#define CV_SIMD128_FP16 0 + +namespace cv +{ + +//! @cond IGNORED + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN + +#if (__EMSCRIPTEN_major__ * 1000000 + __EMSCRIPTEN_minor__ * 1000 + __EMSCRIPTEN_tiny__) < (1038046) +// handle renames: https://github.com/emscripten-core/emscripten/pull/9440 (https://github.com/emscripten-core/emscripten/commit/755d5b46cb84d0aa120c10981b11d05646c29673) +#define wasm_i32x4_trunc_saturate_f32x4 wasm_trunc_saturate_i32x4_f32x4 +#define wasm_u32x4_trunc_saturate_f32x4 wasm_trunc_saturate_u32x4_f32x4 +#define wasm_i64x2_trunc_saturate_f64x2 wasm_trunc_saturate_i64x2_f64x2 +#define wasm_u64x2_trunc_saturate_f64x2 wasm_trunc_saturate_u64x2_f64x2 +#define wasm_f32x4_convert_i32x4 wasm_convert_f32x4_i32x4 +#define wasm_f32x4_convert_u32x4 wasm_convert_f32x4_u32x4 +#define wasm_f64x2_convert_i64x2 wasm_convert_f64x2_i64x2 +#define wasm_f64x2_convert_u64x2 wasm_convert_f64x2_u64x2 +#endif // COMPATIBILITY: <1.38.46 + +///////// Types /////////// + +struct v_uint8x16 +{ + typedef uchar lane_type; + typedef v128_t vector_type; + enum { nlanes = 16 }; + + v_uint8x16() {} + explicit v_uint8x16(v128_t v) : val(v) {} + v_uint8x16(uchar v0, uchar v1, uchar v2, uchar v3, uchar v4, uchar v5, uchar v6, uchar v7, + uchar v8, uchar v9, uchar v10, uchar v11, uchar v12, uchar v13, uchar v14, uchar v15) + { + uchar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = wasm_v128_load(v); + } + + uchar get0() const + { + return (uchar)wasm_i8x16_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_int8x16 +{ + typedef schar lane_type; + typedef v128_t vector_type; + enum { nlanes = 16 }; + + v_int8x16() {} + explicit v_int8x16(v128_t v) : val(v) {} + v_int8x16(schar v0, schar v1, schar v2, schar v3, schar v4, schar v5, schar v6, schar v7, + schar v8, schar v9, schar v10, schar v11, schar v12, schar v13, schar v14, schar v15) + { + schar v[] = {v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15}; + val = wasm_v128_load(v); + } + + schar get0() const + { + return wasm_i8x16_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_uint16x8 +{ + typedef ushort lane_type; + typedef v128_t vector_type; + enum { nlanes = 8 }; + + v_uint16x8() {} + explicit v_uint16x8(v128_t v) : val(v) {} + v_uint16x8(ushort v0, ushort v1, ushort v2, ushort v3, ushort v4, ushort v5, ushort v6, ushort v7) + { + ushort v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = wasm_v128_load(v); + } + + ushort get0() const + { + return (ushort)wasm_i16x8_extract_lane(val, 0); // wasm_u16x8_extract_lane() unimplemented yet + } + + v128_t val; +}; + +struct v_int16x8 +{ + typedef short lane_type; + typedef v128_t vector_type; + enum { nlanes = 8 }; + + v_int16x8() {} + explicit v_int16x8(v128_t v) : val(v) {} + v_int16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) + { + short v[] = {v0, v1, v2, v3, v4, v5, v6, v7}; + val = wasm_v128_load(v); + } + + short get0() const + { + return wasm_i16x8_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_uint32x4 +{ + typedef unsigned lane_type; + typedef v128_t vector_type; + enum { nlanes = 4 }; + + v_uint32x4() {} + explicit v_uint32x4(v128_t v) : val(v) {} + v_uint32x4(unsigned v0, unsigned v1, unsigned v2, unsigned v3) + { + unsigned v[] = {v0, v1, v2, v3}; + val = wasm_v128_load(v); + } + + unsigned get0() const + { + return (unsigned)wasm_i32x4_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_int32x4 +{ + typedef int lane_type; + typedef v128_t vector_type; + enum { nlanes = 4 }; + + v_int32x4() {} + explicit v_int32x4(v128_t v) : val(v) {} + v_int32x4(int v0, int v1, int v2, int v3) + { + int v[] = {v0, v1, v2, v3}; + val = wasm_v128_load(v); + } + + int get0() const + { + return wasm_i32x4_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_float32x4 +{ + typedef float lane_type; + typedef v128_t vector_type; + enum { nlanes = 4 }; + + v_float32x4() {} + explicit v_float32x4(v128_t v) : val(v) {} + v_float32x4(float v0, float v1, float v2, float v3) + { + float v[] = {v0, v1, v2, v3}; + val = wasm_v128_load(v); + } + + float get0() const + { + return wasm_f32x4_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_uint64x2 +{ + typedef uint64 lane_type; + typedef v128_t vector_type; + enum { nlanes = 2 }; + + v_uint64x2() {} + explicit v_uint64x2(v128_t v) : val(v) {} + v_uint64x2(uint64 v0, uint64 v1) + { + uint64 v[] = {v0, v1}; + val = wasm_v128_load(v); + } + + uint64 get0() const + { + return (uint64)wasm_i64x2_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_int64x2 +{ + typedef int64 lane_type; + typedef v128_t vector_type; + enum { nlanes = 2 }; + + v_int64x2() {} + explicit v_int64x2(v128_t v) : val(v) {} + v_int64x2(int64 v0, int64 v1) + { + int64 v[] = {v0, v1}; + val = wasm_v128_load(v); + } + + int64 get0() const + { + return wasm_i64x2_extract_lane(val, 0); + } + + v128_t val; +}; + +struct v_float64x2 +{ + typedef double lane_type; + typedef v128_t vector_type; + enum { nlanes = 2 }; + + v_float64x2() {} + explicit v_float64x2(v128_t v) : val(v) {} + v_float64x2(double v0, double v1) + { + double v[] = {v0, v1}; + val = wasm_v128_load(v); + } + + double get0() const + { + return wasm_f64x2_extract_lane(val, 0); + } + + v128_t val; +}; + +namespace +{ +#define OPENCV_HAL_IMPL_REINTERPRET_INT(ft, tt) \ +inline tt reinterpret_int(ft x) { union { ft l; tt i; } v; v.l = x; return v.i; } +OPENCV_HAL_IMPL_REINTERPRET_INT(uchar, schar) +OPENCV_HAL_IMPL_REINTERPRET_INT(schar, schar) +OPENCV_HAL_IMPL_REINTERPRET_INT(ushort, short) +OPENCV_HAL_IMPL_REINTERPRET_INT(short, short) +OPENCV_HAL_IMPL_REINTERPRET_INT(unsigned, int) +OPENCV_HAL_IMPL_REINTERPRET_INT(int, int) +OPENCV_HAL_IMPL_REINTERPRET_INT(float, int) +OPENCV_HAL_IMPL_REINTERPRET_INT(uint64, int64) +OPENCV_HAL_IMPL_REINTERPRET_INT(int64, int64) +OPENCV_HAL_IMPL_REINTERPRET_INT(double, int64) + +static const unsigned char popCountTable[] = +{ + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8, +}; +} // namespace + +static v128_t wasm_unpacklo_i8x16(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23); +} + +static v128_t wasm_unpacklo_i16x8(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 0,1,16,17,2,3,18,19,4,5,20,21,6,7,22,23); +} + +static v128_t wasm_unpacklo_i32x4(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 0,1,2,3,16,17,18,19,4,5,6,7,20,21,22,23); +} + +static v128_t wasm_unpacklo_i64x2(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); +} + +static v128_t wasm_unpackhi_i8x16(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31); +} + +static v128_t wasm_unpackhi_i16x8(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 8,9,24,25,10,11,26,27,12,13,28,29,14,15,30,31); +} + +static v128_t wasm_unpackhi_i32x4(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 8,9,10,11,24,25,26,27,12,13,14,15,28,29,30,31); +} + +static v128_t wasm_unpackhi_i64x2(v128_t a, v128_t b) { + return wasm_v8x16_shuffle(a, b, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); +} + +/** Convert **/ +// 8 >> 16 +inline v128_t v128_cvtu8x16_i16x8(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpacklo_i8x16(a, z); +} +inline v128_t v128_cvti8x16_i16x8(const v128_t& a) +{ return wasm_i16x8_shr(wasm_unpacklo_i8x16(a, a), 8); } +// 8 >> 32 +inline v128_t v128_cvtu8x16_i32x4(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpacklo_i16x8(wasm_unpacklo_i8x16(a, z), z); +} +inline v128_t v128_cvti8x16_i32x4(const v128_t& a) +{ + v128_t r = wasm_unpacklo_i8x16(a, a); + r = wasm_unpacklo_i8x16(r, r); + return wasm_i32x4_shr(r, 24); +} +// 16 >> 32 +inline v128_t v128_cvtu16x8_i32x4(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpacklo_i16x8(a, z); +} +inline v128_t v128_cvti16x8_i32x4(const v128_t& a) +{ return wasm_i32x4_shr(wasm_unpacklo_i16x8(a, a), 16); } +// 32 >> 64 +inline v128_t v128_cvtu32x4_i64x2(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpacklo_i32x4(a, z); +} +inline v128_t v128_cvti32x4_i64x2(const v128_t& a) +{ return wasm_unpacklo_i32x4(a, wasm_i32x4_shr(a, 31)); } + +// 16 << 8 +inline v128_t v128_cvtu8x16_i16x8_high(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpackhi_i8x16(a, z); +} +inline v128_t v128_cvti8x16_i16x8_high(const v128_t& a) +{ return wasm_i16x8_shr(wasm_unpackhi_i8x16(a, a), 8); } +// 32 << 16 +inline v128_t v128_cvtu16x8_i32x4_high(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpackhi_i16x8(a, z); +} +inline v128_t v128_cvti16x8_i32x4_high(const v128_t& a) +{ return wasm_i32x4_shr(wasm_unpackhi_i16x8(a, a), 16); } +// 64 << 32 +inline v128_t v128_cvtu32x4_i64x2_high(const v128_t& a) +{ + const v128_t z = wasm_i8x16_splat(0); + return wasm_unpackhi_i32x4(a, z); +} +inline v128_t v128_cvti32x4_i64x2_high(const v128_t& a) +{ return wasm_unpackhi_i32x4(a, wasm_i32x4_shr(a, 31)); } + +#define OPENCV_HAL_IMPL_WASM_INITVEC(_Tpvec, _Tp, suffix, zsuffix, _Tps) \ +inline _Tpvec v_setzero_##suffix() { return _Tpvec(wasm_##zsuffix##_splat((_Tps)0)); } \ +inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(wasm_##zsuffix##_splat((_Tps)v)); } \ +template inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \ +{ return _Tpvec(a.val); } + +OPENCV_HAL_IMPL_WASM_INITVEC(v_uint8x16, uchar, u8, i8x16, schar) +OPENCV_HAL_IMPL_WASM_INITVEC(v_int8x16, schar, s8, i8x16, schar) +OPENCV_HAL_IMPL_WASM_INITVEC(v_uint16x8, ushort, u16, i16x8, short) +OPENCV_HAL_IMPL_WASM_INITVEC(v_int16x8, short, s16, i16x8, short) +OPENCV_HAL_IMPL_WASM_INITVEC(v_uint32x4, unsigned, u32, i32x4, int) +OPENCV_HAL_IMPL_WASM_INITVEC(v_int32x4, int, s32, i32x4, int) +OPENCV_HAL_IMPL_WASM_INITVEC(v_float32x4, float, f32, f32x4, float) +OPENCV_HAL_IMPL_WASM_INITVEC(v_uint64x2, uint64, u64, i64x2, int64) +OPENCV_HAL_IMPL_WASM_INITVEC(v_int64x2, int64, s64, i64x2, int64) +OPENCV_HAL_IMPL_WASM_INITVEC(v_float64x2, double, f64, f64x2, double) + +//////////////// PACK /////////////// +inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b) +{ + v128_t maxval = wasm_i16x8_splat(255); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u16x8_gt(b.val, maxval)); + return v_uint8x16(wasm_v8x16_shuffle(a1, b1, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +inline v_int8x16 v_pack(const v_int16x8& a, const v_int16x8& b) +{ + v128_t maxval = wasm_i16x8_splat(127); + v128_t minval = wasm_i16x8_splat(-128); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i16x8_gt(b.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval)); + v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i16x8_lt(b1, minval)); + return v_int8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +inline v_uint16x8 v_pack(const v_uint32x4& a, const v_uint32x4& b) +{ + v128_t maxval = wasm_i32x4_splat(65535); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u32x4_gt(b.val, maxval)); + return v_uint16x8(wasm_v8x16_shuffle(a1, b1, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} +inline v_int16x8 v_pack(const v_int32x4& a, const v_int32x4& b) +{ + v128_t maxval = wasm_i32x4_splat(32767); + v128_t minval = wasm_i32x4_splat(-32768); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i32x4_gt(b.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval)); + v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i32x4_lt(b1, minval)); + return v_int16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} +inline v_uint32x4 v_pack(const v_uint64x2& a, const v_uint64x2& b) +{ + return v_uint32x4(wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); +} +inline v_int32x4 v_pack(const v_int64x2& a, const v_int64x2& b) +{ + return v_int32x4(wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); +} +inline v_uint8x16 v_pack_u(const v_int16x8& a, const v_int16x8& b) +{ + v128_t maxval = wasm_i16x8_splat(255); + v128_t minval = wasm_i16x8_splat(0); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i16x8_gt(b.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval)); + v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i16x8_lt(b1, minval)); + return v_uint8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +inline v_uint16x8 v_pack_u(const v_int32x4& a, const v_int32x4& b) +{ + v128_t maxval = wasm_i32x4_splat(65535); + v128_t minval = wasm_i32x4_splat(0); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_i32x4_gt(b.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval)); + v128_t b2 = wasm_v128_bitselect(minval, b1, wasm_i32x4_lt(b1, minval)); + return v_uint16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} + +template +inline v_uint8x16 v_rshr_pack(const v_uint16x8& a, const v_uint16x8& b) +{ + v128_t delta = wasm_i16x8_splat(((short)1 << (n-1))); + v128_t a1 = wasm_u16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t b1 = wasm_u16x8_shr(wasm_i16x8_add(b.val, delta), n); + v128_t maxval = wasm_i16x8_splat(255); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u16x8_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_u16x8_gt(b1, maxval)); + return v_uint8x16(wasm_v8x16_shuffle(a2, b2, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +template +inline v_int8x16 v_rshr_pack(const v_int16x8& a, const v_int16x8& b) +{ + v128_t delta = wasm_i16x8_splat(((short)1 << (n-1))); + v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t b1 = wasm_i16x8_shr(wasm_i16x8_add(b.val, delta), n); + v128_t maxval = wasm_i16x8_splat(127); + v128_t minval = wasm_i16x8_splat(-128); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i16x8_gt(b1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval)); + v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i16x8_lt(b1, minval)); + return v_int8x16(wasm_v8x16_shuffle(a3, b3, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +template +inline v_uint16x8 v_rshr_pack(const v_uint32x4& a, const v_uint32x4& b) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_u32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t b1 = wasm_u32x4_shr(wasm_i32x4_add(b.val, delta), n); + v128_t maxval = wasm_i32x4_splat(65535); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u32x4_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_u32x4_gt(b1, maxval)); + return v_uint16x8(wasm_v8x16_shuffle(a2, b2, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} +template +inline v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t b1 = wasm_i32x4_shr(wasm_i32x4_add(b.val, delta), n); + v128_t maxval = wasm_i32x4_splat(32767); + v128_t minval = wasm_i16x8_splat(-32768); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i32x4_gt(b1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval)); + v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i32x4_lt(b1, minval)); + return v_int16x8(wasm_v8x16_shuffle(a3, b3, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} +template +inline v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b) +{ + v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); + v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n); + v128_t b1 = wasm_u64x2_shr(wasm_i64x2_add(b.val, delta), n); + return v_uint32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); +} +template +inline v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b) +{ + v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); + v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n); + v128_t b1 = wasm_i64x2_shr(wasm_i64x2_add(b.val, delta), n); + return v_int32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); +} +template +inline v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b) +{ + v128_t delta = wasm_i16x8_splat(((short)1 << (n-1))); + v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t b1 = wasm_i16x8_shr(wasm_i16x8_add(b.val, delta), n); + v128_t maxval = wasm_i16x8_splat(255); + v128_t minval = wasm_i16x8_splat(0); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i16x8_gt(b1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval)); + v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i16x8_lt(b1, minval)); + return v_uint8x16(wasm_v8x16_shuffle(a3, b3, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} +template +inline v_uint16x8 v_rshr_pack_u(const v_int32x4& a, const v_int32x4& b) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t b1 = wasm_i32x4_shr(wasm_i32x4_add(b.val, delta), n); + v128_t maxval = wasm_i32x4_splat(65535); + v128_t minval = wasm_i16x8_splat(0); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval)); + v128_t b2 = wasm_v128_bitselect(maxval, b1, wasm_i32x4_gt(b1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval)); + v128_t b3 = wasm_v128_bitselect(minval, b2, wasm_i32x4_lt(b1, minval)); + return v_uint16x8(wasm_v8x16_shuffle(a3, b3, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29)); +} + +inline void v_pack_store(uchar* ptr, const v_uint16x8& a) +{ + v128_t maxval = wasm_i16x8_splat(255); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval)); + v128_t r = wasm_v8x16_shuffle(a1, a1, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + uchar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_store(schar* ptr, const v_int16x8& a) +{ + v128_t maxval = wasm_i16x8_splat(127); + v128_t minval = wasm_i16x8_splat(-128); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + schar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_store(ushort* ptr, const v_uint32x4& a) +{ + v128_t maxval = wasm_i32x4_splat(65535); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval)); + v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + ushort t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_store(short* ptr, const v_int32x4& a) +{ + v128_t maxval = wasm_i32x4_splat(32767); + v128_t minval = wasm_i32x4_splat(-32768); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + short t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_store(unsigned* ptr, const v_uint64x2& a) +{ + v128_t r = wasm_v8x16_shuffle(a.val, a.val, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); + unsigned t_ptr[4]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<2; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_store(int* ptr, const v_int64x2& a) +{ + v128_t r = wasm_v8x16_shuffle(a.val, a.val, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); + int t_ptr[4]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<2; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_u_store(uchar* ptr, const v_int16x8& a) +{ + v128_t maxval = wasm_i16x8_splat(255); + v128_t minval = wasm_i16x8_splat(0); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i16x8_gt(a.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i16x8_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + uchar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +inline void v_pack_u_store(ushort* ptr, const v_int32x4& a) +{ + v128_t maxval = wasm_i32x4_splat(65535); + v128_t minval = wasm_i32x4_splat(0); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_i32x4_gt(a.val, maxval)); + v128_t a2 = wasm_v128_bitselect(minval, a1, wasm_i32x4_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + ushort t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} + +template +inline void v_rshr_pack_store(uchar* ptr, const v_uint16x8& a) +{ + v128_t delta = wasm_i16x8_splat((short)(1 << (n-1))); + v128_t a1 = wasm_u16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t maxval = wasm_i16x8_splat(255); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u16x8_gt(a1, maxval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + uchar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_store(schar* ptr, const v_int16x8& a) +{ + v128_t delta = wasm_i16x8_splat(((short)1 << (n-1))); + v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t maxval = wasm_i16x8_splat(127); + v128_t minval = wasm_i16x8_splat(-128); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a3, a3, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + schar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_store(ushort* ptr, const v_uint32x4& a) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_u32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t maxval = wasm_i32x4_splat(65535); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_u32x4_gt(a1, maxval)); + v128_t r = wasm_v8x16_shuffle(a2, a2, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + ushort t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_store(short* ptr, const v_int32x4& a) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t maxval = wasm_i32x4_splat(32767); + v128_t minval = wasm_i32x4_splat(-32768); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a3, a3, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + short t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a) +{ + v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); + v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n); + v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); + unsigned t_ptr[4]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<2; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_store(int* ptr, const v_int64x2& a) +{ + v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); + v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n); + v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); + int t_ptr[4]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<2; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a) +{ + v128_t delta = wasm_i16x8_splat(((short)1 << (n-1))); + v128_t a1 = wasm_i16x8_shr(wasm_i16x8_add(a.val, delta), n); + v128_t maxval = wasm_i16x8_splat(255); + v128_t minval = wasm_i16x8_splat(0); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i16x8_gt(a1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i16x8_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a3, a3, 0,2,4,6,8,10,12,14,0,2,4,6,8,10,12,14); + uchar t_ptr[16]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<8; ++i) { + ptr[i] = t_ptr[i]; + } +} +template +inline void v_rshr_pack_u_store(ushort* ptr, const v_int32x4& a) +{ + v128_t delta = wasm_i32x4_splat(((int)1 << (n-1))); + v128_t a1 = wasm_i32x4_shr(wasm_i32x4_add(a.val, delta), n); + v128_t maxval = wasm_i32x4_splat(65535); + v128_t minval = wasm_i32x4_splat(0); + v128_t a2 = wasm_v128_bitselect(maxval, a1, wasm_i32x4_gt(a1, maxval)); + v128_t a3 = wasm_v128_bitselect(minval, a2, wasm_i32x4_lt(a1, minval)); + v128_t r = wasm_v8x16_shuffle(a3, a3, 0,1,4,5,8,9,12,13,0,1,4,5,8,9,12,13); + ushort t_ptr[8]; + wasm_v128_store(t_ptr, r); + for (int i=0; i<4; ++i) { + ptr[i] = t_ptr[i]; + } +} + +inline v_uint8x16 v_pack_b(const v_uint16x8& a, const v_uint16x8& b) +{ + v128_t maxval = wasm_i16x8_splat(255); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u16x8_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u16x8_gt(b.val, maxval)); + return v_uint8x16(wasm_v8x16_shuffle(a1, b1, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)); +} + +inline v_uint8x16 v_pack_b(const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d) +{ + v128_t maxval = wasm_i32x4_splat(255); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, wasm_u32x4_gt(a.val, maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, wasm_u32x4_gt(b.val, maxval)); + v128_t c1 = wasm_v128_bitselect(maxval, c.val, wasm_u32x4_gt(c.val, maxval)); + v128_t d1 = wasm_v128_bitselect(maxval, d.val, wasm_u32x4_gt(d.val, maxval)); + v128_t ab = wasm_v8x16_shuffle(a1, b1, 0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28); + v128_t cd = wasm_v8x16_shuffle(c1, d1, 0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28); + return v_uint8x16(wasm_v8x16_shuffle(ab, cd, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23)); +} + +inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uint64x2& c, + const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, + const v_uint64x2& g, const v_uint64x2& h) +{ + v128_t maxval = wasm_i32x4_splat(255); + v128_t a1 = wasm_v128_bitselect(maxval, a.val, ((__u64x2)(a.val) > (__u64x2)maxval)); + v128_t b1 = wasm_v128_bitselect(maxval, b.val, ((__u64x2)(b.val) > (__u64x2)maxval)); + v128_t c1 = wasm_v128_bitselect(maxval, c.val, ((__u64x2)(c.val) > (__u64x2)maxval)); + v128_t d1 = wasm_v128_bitselect(maxval, d.val, ((__u64x2)(d.val) > (__u64x2)maxval)); + v128_t e1 = wasm_v128_bitselect(maxval, e.val, ((__u64x2)(e.val) > (__u64x2)maxval)); + v128_t f1 = wasm_v128_bitselect(maxval, f.val, ((__u64x2)(f.val) > (__u64x2)maxval)); + v128_t g1 = wasm_v128_bitselect(maxval, g.val, ((__u64x2)(g.val) > (__u64x2)maxval)); + v128_t h1 = wasm_v128_bitselect(maxval, h.val, ((__u64x2)(h.val) > (__u64x2)maxval)); + v128_t ab = wasm_v8x16_shuffle(a1, b1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24); + v128_t cd = wasm_v8x16_shuffle(c1, d1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24); + v128_t ef = wasm_v8x16_shuffle(e1, f1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24); + v128_t gh = wasm_v8x16_shuffle(g1, h1, 0,8,16,24,0,8,16,24,0,8,16,24,0,8,16,24); + v128_t abcd = wasm_v8x16_shuffle(ab, cd, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19); + v128_t efgh = wasm_v8x16_shuffle(ef, gh, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19); + return v_uint8x16(wasm_v8x16_shuffle(abcd, efgh, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23)); +} + +inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& m3) +{ + v128_t v0 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 0)); + v128_t v1 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 1)); + v128_t v2 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 2)); + v128_t v3 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 3)); + v0 = wasm_f32x4_mul(v0, m0.val); + v1 = wasm_f32x4_mul(v1, m1.val); + v2 = wasm_f32x4_mul(v2, m2.val); + v3 = wasm_f32x4_mul(v3, m3.val); + + return v_float32x4(wasm_f32x4_add(wasm_f32x4_add(v0, v1), wasm_f32x4_add(v2, v3))); +} + +inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0, + const v_float32x4& m1, const v_float32x4& m2, + const v_float32x4& a) +{ + v128_t v0 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 0)); + v128_t v1 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 1)); + v128_t v2 = wasm_f32x4_splat(wasm_f32x4_extract_lane(v.val, 2)); + v0 = wasm_f32x4_mul(v0, m0.val); + v1 = wasm_f32x4_mul(v1, m1.val); + v2 = wasm_f32x4_mul(v2, m2.val); + + return v_float32x4(wasm_f32x4_add(wasm_f32x4_add(v0, v1), wasm_f32x4_add(v2, a.val))); +} + +#define OPENCV_HAL_IMPL_WASM_BIN_OP(bin_op, _Tpvec, intrin) \ +inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} \ +inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ +{ \ + a.val = intrin(a.val, b.val); \ + return a; \ +} + +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint8x16, wasm_u8x16_add_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint8x16, wasm_u8x16_sub_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int8x16, wasm_i8x16_add_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int8x16, wasm_i8x16_sub_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint16x8, wasm_u16x8_add_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint16x8, wasm_u16x8_sub_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int16x8, wasm_i16x8_add_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int16x8, wasm_i16x8_sub_saturate) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint32x4, wasm_i32x4_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint32x4, wasm_i32x4_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_uint32x4, wasm_i32x4_mul) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int32x4, wasm_i32x4_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int32x4, wasm_i32x4_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_int32x4, wasm_i32x4_mul) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float32x4, wasm_f32x4_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float32x4, wasm_f32x4_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float32x4, wasm_f32x4_mul) +OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float32x4, wasm_f32x4_div) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint64x2, wasm_i64x2_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint64x2, wasm_i64x2_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int64x2, wasm_i64x2_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_int64x2, wasm_i64x2_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float64x2, wasm_f64x2_add) +OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float64x2, wasm_f64x2_sub) +OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float64x2, wasm_f64x2_mul) +OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float64x2, wasm_f64x2_div) + +// saturating multiply 8-bit, 16-bit +#define OPENCV_HAL_IMPL_WASM_MUL_SAT(_Tpvec, _Tpwvec) \ +inline _Tpvec operator * (const _Tpvec& a, const _Tpvec& b) \ +{ \ + _Tpwvec c, d; \ + v_mul_expand(a, b, c, d); \ + return v_pack(c, d); \ +} \ +inline _Tpvec& operator *= (_Tpvec& a, const _Tpvec& b) \ +{ a = a * b; return a; } + +OPENCV_HAL_IMPL_WASM_MUL_SAT(v_uint8x16, v_uint16x8) +OPENCV_HAL_IMPL_WASM_MUL_SAT(v_int8x16, v_int16x8) +OPENCV_HAL_IMPL_WASM_MUL_SAT(v_uint16x8, v_uint32x4) +OPENCV_HAL_IMPL_WASM_MUL_SAT(v_int16x8, v_int32x4) + +// Multiply and expand +inline void v_mul_expand(const v_uint8x16& a, const v_uint8x16& b, + v_uint16x8& c, v_uint16x8& d) +{ + v_uint16x8 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int8x16& a, const v_int8x16& b, + v_int16x8& c, v_int16x8& d) +{ + v_int16x8 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c = v_mul_wrap(a0, b0); + d = v_mul_wrap(a1, b1); +} + +inline void v_mul_expand(const v_int16x8& a, const v_int16x8& b, + v_int32x4& c, v_int32x4& d) +{ + v_int32x4 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c.val = wasm_i32x4_mul(a0.val, b0.val); + d.val = wasm_i32x4_mul(a1.val, b1.val); +} + +inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, + v_uint32x4& c, v_uint32x4& d) +{ + v_uint32x4 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c.val = wasm_i32x4_mul(a0.val, b0.val); + d.val = wasm_i32x4_mul(a1.val, b1.val); +} + +inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, + v_uint64x2& c, v_uint64x2& d) +{ + v_uint64x2 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + c.val = ((__u64x2)(a0.val) * (__u64x2)(b0.val)); + d.val = ((__u64x2)(a1.val) * (__u64x2)(b1.val)); +} + +inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) +{ + v_int32x4 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + v128_t c = wasm_i32x4_mul(a0.val, b0.val); + v128_t d = wasm_i32x4_mul(a1.val, b1.val); + return v_int16x8(wasm_v8x16_shuffle(c, d, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31)); +} +inline v_uint16x8 v_mul_hi(const v_uint16x8& a, const v_uint16x8& b) +{ + v_uint32x4 a0, a1, b0, b1; + v_expand(a, a0, a1); + v_expand(b, b0, b1); + v128_t c = wasm_i32x4_mul(a0.val, b0.val); + v128_t d = wasm_i32x4_mul(a1.val, b1.val); + return v_uint16x8(wasm_v8x16_shuffle(c, d, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31)); +} + +//////// Dot Product //////// + +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b) +{ + v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16); + v128_t a1 = wasm_i32x4_shr(a.val, 16); + v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16); + v128_t b1 = wasm_i32x4_shr(b.val, 16); + v128_t c = wasm_i32x4_mul(a0, b0); + v128_t d = wasm_i32x4_mul(a1, b1); + return v_int32x4(wasm_i32x4_add(c, d)); +} + +inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_dotprod(a, b) + c; } + +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) +{ + v128_t a0 = wasm_i64x2_shr(wasm_i64x2_shl(a.val, 32), 32); + v128_t a1 = wasm_i64x2_shr(a.val, 32); + v128_t b0 = wasm_i64x2_shr(wasm_i64x2_shl(b.val, 32), 32); + v128_t b1 = wasm_i64x2_shr(b.val, 32); + v128_t c = (v128_t)((__i64x2)a0 * (__i64x2)b0); + v128_t d = (v128_t)((__i64x2)a1 * (__i64x2)b1); + return v_int64x2(wasm_i64x2_add(c, d)); +} +inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ + return v_dotprod(a, b) + c; +} + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b) +{ + v128_t a0 = wasm_u16x8_shr(wasm_i16x8_shl(a.val, 8), 8); + v128_t a1 = wasm_u16x8_shr(a.val, 8); + v128_t b0 = wasm_u16x8_shr(wasm_i16x8_shl(b.val, 8), 8); + v128_t b1 = wasm_u16x8_shr(b.val, 8); + return v_uint32x4(( + v_dotprod(v_int16x8(a0), v_int16x8(b0)) + + v_dotprod(v_int16x8(a1), v_int16x8(b1))).val + ); +} +inline v_uint32x4 v_dotprod_expand(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b) +{ + v128_t a0 = wasm_i16x8_shr(wasm_i16x8_shl(a.val, 8), 8); + v128_t a1 = wasm_i16x8_shr(a.val, 8); + v128_t b0 = wasm_i16x8_shr(wasm_i16x8_shl(b.val, 8), 8); + v128_t b1 = wasm_i16x8_shr(b.val, 8); + return v_int32x4( + v_dotprod(v_int16x8(a0), v_int16x8(b0)) + + v_dotprod(v_int16x8(a1), v_int16x8(b1)) + ); +} +inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand(a, b) + c; } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) +{ + v128_t a0 = wasm_u32x4_shr(wasm_i32x4_shl(a.val, 16), 16); + v128_t a1 = wasm_u32x4_shr(a.val, 16); + v128_t b0 = wasm_u32x4_shr(wasm_i32x4_shl(b.val, 16), 16); + v128_t b1 = wasm_u32x4_shr(b.val, 16); + return v_uint64x2(( + v_dotprod(v_int32x4(a0), v_int32x4(b0)) + + v_dotprod(v_int32x4(a1), v_int32x4(b1))).val + ); +} +inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) +{ + v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16); + v128_t a1 = wasm_i32x4_shr(a.val, 16); + v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16); + v128_t b1 = wasm_i32x4_shr(b.val, 16); + return v_int64x2(( + v_dotprod(v_int32x4(a0), v_int32x4(b0)) + + v_dotprod(v_int32x4(a1), v_int32x4(b1))) + ); +} + +inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) +{ return v_cvt_f64(v_dotprod(a, b)); } +inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b) + c; } + +//////// Fast Dot Product //////// + +// 16 >> 32 +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod(a, b); } +inline v_int32x4 v_dotprod_fast(const v_int16x8& a, const v_int16x8& b, const v_int32x4& c) +{ return v_dotprod(a, b, c); } + +// 32 >> 64 +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod(a, b); } +inline v_int64x2 v_dotprod_fast(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) +{ return v_dotprod(a, b, c); } + +// 8 >> 32 +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b) +{ return v_dotprod_expand(a, b); } +inline v_uint32x4 v_dotprod_expand_fast(const v_uint8x16& a, const v_uint8x16& b, const v_uint32x4& c) +{ return v_dotprod_expand(a, b, c); } +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b) +{ return v_dotprod_expand(a, b); } +inline v_int32x4 v_dotprod_expand_fast(const v_int8x16& a, const v_int8x16& b, const v_int32x4& c) +{ return v_dotprod_expand(a, b, c); } + +// 16 >> 64 +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b) +{ return v_dotprod_expand(a, b); } +inline v_uint64x2 v_dotprod_expand_fast(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) +{ return v_dotprod_expand(a, b, c); } +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b) +{ return v_dotprod_expand(a, b); } +inline v_int64x2 v_dotprod_expand_fast(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) +{ return v_dotprod_expand(a, b, c); } + +// 32 >> 64f +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b) +{ return v_dotprod_expand(a, b); } +inline v_float64x2 v_dotprod_expand_fast(const v_int32x4& a, const v_int32x4& b, const v_float64x2& c) +{ return v_dotprod_expand(a, b, c); } + +#define OPENCV_HAL_IMPL_WASM_LOGIC_OP(_Tpvec) \ +OPENCV_HAL_IMPL_WASM_BIN_OP(&, _Tpvec, wasm_v128_and) \ +OPENCV_HAL_IMPL_WASM_BIN_OP(|, _Tpvec, wasm_v128_or) \ +OPENCV_HAL_IMPL_WASM_BIN_OP(^, _Tpvec, wasm_v128_xor) \ +inline _Tpvec operator ~ (const _Tpvec& a) \ +{ \ + return _Tpvec(wasm_v128_not(a.val)); \ +} + +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint8x16) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int8x16) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint16x8) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int16x8) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint32x4) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int32x4) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_uint64x2) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_int64x2) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float32x4) +OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float64x2) + +inline v_float32x4 v_sqrt(const v_float32x4& x) +{ + return v_float32x4(wasm_f32x4_sqrt(x.val)); +} + +inline v_float32x4 v_invsqrt(const v_float32x4& x) +{ + const v128_t _1_0 = wasm_f32x4_splat(1.0); + return v_float32x4(wasm_f32x4_div(_1_0, wasm_f32x4_sqrt(x.val))); +} + +inline v_float64x2 v_sqrt(const v_float64x2& x) +{ + return v_float64x2(wasm_f64x2_sqrt(x.val)); +} + +inline v_float64x2 v_invsqrt(const v_float64x2& x) +{ + const v128_t _1_0 = wasm_f64x2_splat(1.0); + return v_float64x2(wasm_f64x2_div(_1_0, wasm_f64x2_sqrt(x.val))); +} + +#define OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(_Tpuvec, _Tpsvec, suffix, zsuffix, shiftWidth) \ +inline _Tpuvec v_abs(const _Tpsvec& x) \ +{ \ + v128_t s = wasm_##suffix##_shr(x.val, shiftWidth); \ + v128_t f = wasm_##zsuffix##_shr(x.val, shiftWidth); \ + return _Tpuvec(wasm_##zsuffix##_add(wasm_v128_xor(x.val, f), s)); \ +} + +OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint8x16, v_int8x16, u8x16, i8x16, 7) +OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint16x8, v_int16x8, u16x8, i16x8, 15) +OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(v_uint32x4, v_int32x4, u32x4, i32x4, 31) + +inline v_float32x4 v_abs(const v_float32x4& x) +{ return v_float32x4(wasm_f32x4_abs(x.val)); } +inline v_float64x2 v_abs(const v_float64x2& x) +{ + return v_float64x2(wasm_f64x2_abs(x.val)); +} + +// TODO: exp, log, sin, cos + +#define OPENCV_HAL_IMPL_WASM_BIN_FUNC(_Tpvec, func, intrin) \ +inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(intrin(a.val, b.val)); \ +} + +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_max, wasm_f64x2_max) + +#define OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(_Tpvec, suffix) \ +inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(wasm_v128_bitselect(b.val, a.val, wasm_##suffix##_gt(a.val, b.val))); \ +} \ +inline _Tpvec v_max(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(wasm_v128_bitselect(a.val, b.val, wasm_##suffix##_gt(a.val, b.val))); \ +} + +OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int8x16, i8x16) +OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int16x8, i16x8) +OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(v_int32x4, i32x4) + +#define OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(_Tpvec, suffix, deltaNum) \ +inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \ +{ \ + v128_t delta = wasm_##suffix##_splat(deltaNum); \ + v128_t mask = wasm_##suffix##_gt(wasm_v128_xor(a.val, delta), wasm_v128_xor(b.val, delta)); \ + return _Tpvec(wasm_v128_bitselect(b.val, a.val, mask)); \ +} \ +inline _Tpvec v_max(const _Tpvec& a, const _Tpvec& b) \ +{ \ + v128_t delta = wasm_##suffix##_splat(deltaNum); \ + v128_t mask = wasm_##suffix##_gt(wasm_v128_xor(a.val, delta), wasm_v128_xor(b.val, delta)); \ + return _Tpvec(wasm_v128_bitselect(a.val, b.val, mask)); \ +} + +OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint8x16, i8x16, (schar)0x80) +OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint16x8, i16x8, (short)0x8000) +OPENCV_HAL_IMPL_WASM_MINMAX_U_INIT_FUNC(v_uint32x4, i32x4, (int)0x80000000) + +#define OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(_Tpvec, suffix, esuffix) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##esuffix##_eq(a.val, b.val)); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##esuffix##_ne(a.val, b.val)); } \ +inline _Tpvec operator < (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##suffix##_lt(a.val, b.val)); } \ +inline _Tpvec operator > (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##suffix##_gt(a.val, b.val)); } \ +inline _Tpvec operator <= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##suffix##_le(a.val, b.val)); } \ +inline _Tpvec operator >= (const _Tpvec& a, const _Tpvec& b) \ +{ return _Tpvec(wasm_##suffix##_ge(a.val, b.val)); } + +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint8x16, u8x16, i8x16) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int8x16, i8x16, i8x16) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint16x8, u16x8, i16x8) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int16x8, i16x8, i16x8) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint32x4, u32x4, i32x4) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int32x4, i32x4, i32x4) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float32x4, f32x4, f32x4) +OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float64x2, f64x2, f64x2) + +#define OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(_Tpvec, cast) \ +inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ +{ return cast(v_reinterpret_as_f64(a) == v_reinterpret_as_f64(b)); } \ +inline _Tpvec operator != (const _Tpvec& a, const _Tpvec& b) \ +{ return cast(v_reinterpret_as_f64(a) != v_reinterpret_as_f64(b)); } + +OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(v_uint64x2, v_reinterpret_as_u64) +OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(v_int64x2, v_reinterpret_as_s64) + +inline v_float32x4 v_not_nan(const v_float32x4& a) +{ + v128_t z = wasm_i32x4_splat(0x7fffffff); + v128_t t = wasm_i32x4_splat(0x7f800000); + return v_float32x4(wasm_u32x4_lt(wasm_v128_and(a.val, z), t)); +} +inline v_float64x2 v_not_nan(const v_float64x2& a) +{ + v128_t z = wasm_i64x2_splat(0x7fffffffffffffff); + v128_t t = wasm_i64x2_splat(0x7ff0000000000000); + return v_float64x2((__u64x2)(wasm_v128_and(a.val, z)) < (__u64x2)t); +} + +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_add_wrap, wasm_i8x16_add) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_add_wrap, wasm_i8x16_add) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_add_wrap, wasm_i16x8_add) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_add_wrap, wasm_i16x8_add) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_sub_wrap, wasm_i8x16_sub) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_sub_wrap, wasm_i8x16_sub) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_sub_wrap, wasm_i16x8_sub) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_sub_wrap, wasm_i16x8_sub) +#if (__EMSCRIPTEN_major__ * 1000000 + __EMSCRIPTEN_minor__ * 1000 + __EMSCRIPTEN_tiny__) >= (1039012) +// details: https://github.com/opencv/opencv/issues/18097 ( https://github.com/emscripten-core/emscripten/issues/12018 ) +// 1.39.12: https://github.com/emscripten-core/emscripten/commit/cd801d0f110facfd694212a3c8b2ed2ffcd630e2 +inline v_uint8x16 v_mul_wrap(const v_uint8x16& a, const v_uint8x16& b) +{ + uchar a_[16], b_[16]; + wasm_v128_store(a_, a.val); + wasm_v128_store(b_, b.val); + for (int i = 0; i < 16; i++) + a_[i] = (uchar)(a_[i] * b_[i]); + return v_uint8x16(wasm_v128_load(a_)); +} +inline v_int8x16 v_mul_wrap(const v_int8x16& a, const v_int8x16& b) +{ + schar a_[16], b_[16]; + wasm_v128_store(a_, a.val); + wasm_v128_store(b_, b.val); + for (int i = 0; i < 16; i++) + a_[i] = (schar)(a_[i] * b_[i]); + return v_int8x16(wasm_v128_load(a_)); +} +#else +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_mul_wrap, wasm_i8x16_mul) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int8x16, v_mul_wrap, wasm_i8x16_mul) +#endif +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint16x8, v_mul_wrap, wasm_i16x8_mul) +OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_int16x8, v_mul_wrap, wasm_i16x8_mul) + + +/** Absolute difference **/ + +inline v_uint8x16 v_absdiff(const v_uint8x16& a, const v_uint8x16& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint16x8 v_absdiff(const v_uint16x8& a, const v_uint16x8& b) +{ return v_add_wrap(a - b, b - a); } +inline v_uint32x4 v_absdiff(const v_uint32x4& a, const v_uint32x4& b) +{ return v_max(a, b) - v_min(a, b); } + +inline v_uint8x16 v_absdiff(const v_int8x16& a, const v_int8x16& b) +{ + v_int8x16 d = v_sub_wrap(a, b); + v_int8x16 m = a < b; + return v_reinterpret_as_u8(v_sub_wrap(d ^ m, m)); +} +inline v_uint16x8 v_absdiff(const v_int16x8& a, const v_int16x8& b) +{ + return v_reinterpret_as_u16(v_sub_wrap(v_max(a, b), v_min(a, b))); +} +inline v_uint32x4 v_absdiff(const v_int32x4& a, const v_int32x4& b) +{ + v_int32x4 d = a - b; + v_int32x4 m = a < b; + return v_reinterpret_as_u32((d ^ m) - m); +} + +/** Saturating absolute difference **/ +inline v_int8x16 v_absdiffs(const v_int8x16& a, const v_int8x16& b) +{ + v_int8x16 d = a - b; + v_int8x16 m = a < b; + return (d ^ m) - m; + } +inline v_int16x8 v_absdiffs(const v_int16x8& a, const v_int16x8& b) +{ return v_max(a, b) - v_min(a, b); } + + +inline v_int32x4 v_fma(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return a * b + c; +} + +inline v_int32x4 v_muladd(const v_int32x4& a, const v_int32x4& b, const v_int32x4& c) +{ + return v_fma(a, b, c); +} + +inline v_float32x4 v_fma(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c) +{ + return a * b + c; +} + +inline v_float64x2 v_fma(const v_float64x2& a, const v_float64x2& b, const v_float64x2& c) +{ + return a * b + c; +} + +inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b) +{ + v128_t absmask_vec = wasm_i32x4_splat(0x7fffffff); + return v_float32x4(wasm_v128_and(wasm_f32x4_sub(a.val, b.val), absmask_vec)); +} +inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b) +{ + v128_t absmask_vec = wasm_u64x2_shr(wasm_i32x4_splat(-1), 1); + return v_float64x2(wasm_v128_and(wasm_f64x2_sub(a.val, b.val), absmask_vec)); +} + +#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec, suffix) \ +inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ \ + v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \ + v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \ + return _Tpvec(wasm_##suffix##_sqrt(wasm_##suffix##_add(a_Square, b_Square))); \ +} \ +inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ +{ \ + v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \ + v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \ + return _Tpvec(wasm_##suffix##_add(a_Square, b_Square)); \ +} \ +inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ +{ \ + return _Tpvec(wasm_##suffix##_add(wasm_##suffix##_mul(a.val, b.val), c.val)); \ +} + +OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4, f32x4) +OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2, f64x2) + +#define OPENCV_HAL_IMPL_WASM_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, ssuffix) \ +inline _Tpuvec operator << (const _Tpuvec& a, int imm) \ +{ \ + return _Tpuvec(wasm_##suffix##_shl(a.val, imm)); \ +} \ +inline _Tpsvec operator << (const _Tpsvec& a, int imm) \ +{ \ + return _Tpsvec(wasm_##suffix##_shl(a.val, imm)); \ +} \ +inline _Tpuvec operator >> (const _Tpuvec& a, int imm) \ +{ \ + return _Tpuvec(wasm_##ssuffix##_shr(a.val, imm)); \ +} \ +inline _Tpsvec operator >> (const _Tpsvec& a, int imm) \ +{ \ + return _Tpsvec(wasm_##suffix##_shr(a.val, imm)); \ +} \ +template \ +inline _Tpuvec v_shl(const _Tpuvec& a) \ +{ \ + return _Tpuvec(wasm_##suffix##_shl(a.val, imm)); \ +} \ +template \ +inline _Tpsvec v_shl(const _Tpsvec& a) \ +{ \ + return _Tpsvec(wasm_##suffix##_shl(a.val, imm)); \ +} \ +template \ +inline _Tpuvec v_shr(const _Tpuvec& a) \ +{ \ + return _Tpuvec(wasm_##ssuffix##_shr(a.val, imm)); \ +} \ +template \ +inline _Tpsvec v_shr(const _Tpsvec& a) \ +{ \ + return _Tpsvec(wasm_##suffix##_shr(a.val, imm)); \ +} + +OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint8x16, v_int8x16, i8x16, u8x16) +OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint16x8, v_int16x8, i16x8, u16x8) +OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint32x4, v_int32x4, i32x4, u32x4) +OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint64x2, v_int64x2, i64x2, u64x2) + +namespace hal_wasm_internal +{ + template 16)), + bool is_first = (imm == 0), + bool is_second = (imm == 16), + bool is_other = (((imm > 0) && (imm < 16)))> + class v_wasm_palignr_u8_class; + + template + class v_wasm_palignr_u8_class; + + template + class v_wasm_palignr_u8_class + { + public: + inline v128_t operator()(const v128_t& a, const v128_t&) const + { + return a; + } + }; + + template + class v_wasm_palignr_u8_class + { + public: + inline v128_t operator()(const v128_t&, const v128_t& b) const + { + return b; + } + }; + + template + class v_wasm_palignr_u8_class + { + public: + inline v128_t operator()(const v128_t& a, const v128_t& b) const + { + enum { imm2 = (sizeof(v128_t) - imm) }; + return wasm_v8x16_shuffle(a, b, + imm, imm+1, imm+2, imm+3, + imm+4, imm+5, imm+6, imm+7, + imm+8, imm+9, imm+10, imm+11, + imm+12, imm+13, imm+14, imm+15); + } + }; + + template + inline v128_t v_wasm_palignr_u8(const v128_t& a, const v128_t& b) + { + CV_StaticAssert((imm >= 0) && (imm <= 16), "Invalid imm for v_wasm_palignr_u8."); + return v_wasm_palignr_u8_class()(a, b); + } +} + +template +inline _Tpvec v_rotate_right(const _Tpvec &a) +{ + using namespace hal_wasm_internal; + enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) }; + v128_t z = wasm_i8x16_splat(0); + return _Tpvec(v_wasm_palignr_u8(a.val, z)); +} + +template +inline _Tpvec v_rotate_left(const _Tpvec &a) +{ + using namespace hal_wasm_internal; + enum { imm2 = ((_Tpvec::nlanes - imm) * sizeof(typename _Tpvec::lane_type)) }; + v128_t z = wasm_i8x16_splat(0); + return _Tpvec(v_wasm_palignr_u8(z, a.val)); +} + +template +inline _Tpvec v_rotate_right(const _Tpvec &a, const _Tpvec &b) +{ + using namespace hal_wasm_internal; + enum { imm2 = (imm * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_wasm_palignr_u8(a.val, b.val)); +} + +template +inline _Tpvec v_rotate_left(const _Tpvec &a, const _Tpvec &b) +{ + using namespace hal_wasm_internal; + enum { imm2 = ((_Tpvec::nlanes - imm) * sizeof(typename _Tpvec::lane_type)) }; + return _Tpvec(v_wasm_palignr_u8(b.val, a.val)); +} + +#define OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(_Tpvec, _Tp) \ +inline _Tpvec v_load(const _Tp* ptr) \ +{ return _Tpvec(wasm_v128_load(ptr)); } \ +inline _Tpvec v_load_aligned(const _Tp* ptr) \ +{ return _Tpvec(wasm_v128_load(ptr)); } \ +inline _Tpvec v_load_low(const _Tp* ptr) \ +{ \ + _Tp tmp[_Tpvec::nlanes] = {0}; \ + for (int i=0; i<_Tpvec::nlanes/2; ++i) { \ + tmp[i] = ptr[i]; \ + } \ + return _Tpvec(wasm_v128_load(tmp)); \ +} \ +inline _Tpvec v_load_halves(const _Tp* ptr0, const _Tp* ptr1) \ +{ \ + _Tp tmp[_Tpvec::nlanes]; \ + for (int i=0; i<_Tpvec::nlanes/2; ++i) { \ + tmp[i] = ptr0[i]; \ + tmp[i+_Tpvec::nlanes/2] = ptr1[i]; \ + } \ + return _Tpvec(wasm_v128_load(tmp)); \ +} \ +inline void v_store(_Tp* ptr, const _Tpvec& a) \ +{ wasm_v128_store(ptr, a.val); } \ +inline void v_store_aligned(_Tp* ptr, const _Tpvec& a) \ +{ wasm_v128_store(ptr, a.val); } \ +inline void v_store_aligned_nocache(_Tp* ptr, const _Tpvec& a) \ +{ wasm_v128_store(ptr, a.val); } \ +inline void v_store(_Tp* ptr, const _Tpvec& a, hal::StoreMode /*mode*/) \ +{ \ + wasm_v128_store(ptr, a.val); \ +} \ +inline void v_store_low(_Tp* ptr, const _Tpvec& a) \ +{ \ + _Tpvec::lane_type a_[_Tpvec::nlanes]; \ + wasm_v128_store(a_, a.val); \ + for (int i = 0; i < (_Tpvec::nlanes / 2); i++) \ + ptr[i] = a_[i]; \ +} \ +inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ +{ \ + _Tpvec::lane_type a_[_Tpvec::nlanes]; \ + wasm_v128_store(a_, a.val); \ + for (int i = 0; i < (_Tpvec::nlanes / 2); i++) \ + ptr[i] = a_[i + (_Tpvec::nlanes / 2)]; \ +} + +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint8x16, uchar) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int8x16, schar) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint16x8, ushort) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int16x8, short) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint32x4, unsigned) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int32x4, int) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_uint64x2, uint64) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_int64x2, int64) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_float32x4, float) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INT_OP(v_float64x2, double) + + +/** Reverse **/ +inline v_uint8x16 v_reverse(const v_uint8x16 &a) +{ return v_uint8x16(wasm_v8x16_shuffle(a.val, a.val, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)); } + +inline v_int8x16 v_reverse(const v_int8x16 &a) +{ return v_reinterpret_as_s8(v_reverse(v_reinterpret_as_u8(a))); } + +inline v_uint16x8 v_reverse(const v_uint16x8 &a) +{ return v_uint16x8(wasm_v8x16_shuffle(a.val, a.val, 14, 15, 12, 13, 10, 11, 8, 9, 6, 7, 4, 5, 2, 3, 0, 1)); } + +inline v_int16x8 v_reverse(const v_int16x8 &a) +{ return v_reinterpret_as_s16(v_reverse(v_reinterpret_as_u16(a))); } + +inline v_uint32x4 v_reverse(const v_uint32x4 &a) +{ return v_uint32x4(wasm_v8x16_shuffle(a.val, a.val, 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3)); } + +inline v_int32x4 v_reverse(const v_int32x4 &a) +{ return v_reinterpret_as_s32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_float32x4 v_reverse(const v_float32x4 &a) +{ return v_reinterpret_as_f32(v_reverse(v_reinterpret_as_u32(a))); } + +inline v_uint64x2 v_reverse(const v_uint64x2 &a) +{ return v_uint64x2(wasm_v8x16_shuffle(a.val, a.val, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7)); } + +inline v_int64x2 v_reverse(const v_int64x2 &a) +{ return v_reinterpret_as_s64(v_reverse(v_reinterpret_as_u64(a))); } + +inline v_float64x2 v_reverse(const v_float64x2 &a) +{ return v_reinterpret_as_f64(v_reverse(v_reinterpret_as_u64(a))); } + + +#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + regtype val = a.val; \ + val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \ + val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 4,5,6,7,8,9,10,11,12,13,14,15,0,1,2,3)); \ + return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \ +} + +OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_uint32x4, unsigned, v128_t, i32x4, i32x4) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_int32x4, int, v128_t, i32x4, i32x4) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_4_SUM(v_float32x4, float, v128_t, f32x4, f32x4) + +// To do: Optimize v_reduce_sum with wasm intrin. +// Now use fallback implementation as there is no widening op in wasm intrin. + +#define OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(_Tpvec, scalartype) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + _Tpvec::lane_type a_[_Tpvec::nlanes]; \ + wasm_v128_store(a_, a.val); \ + scalartype c = a_[0]; \ + for (int i = 1; i < _Tpvec::nlanes; i++) \ + c += a_[i]; \ + return c; \ +} + +OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint8x16, unsigned) +OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int8x16, int) +OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint16x8, unsigned) +OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int16x8, int) + + +#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + regtype val = a.val; \ + val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \ + return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \ +} +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_uint64x2, uint64, v128_t, i64x2, i64x2) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_int64x2, int64, v128_t, i64x2, i64x2) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_float64x2, double, v128_t, f64x2,f64x2) + +inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d) +{ + v128_t ac = wasm_f32x4_add(wasm_unpacklo_i32x4(a.val, c.val), wasm_unpackhi_i32x4(a.val, c.val)); + v128_t bd = wasm_f32x4_add(wasm_unpacklo_i32x4(b.val, d.val), wasm_unpackhi_i32x4(b.val, d.val)); + return v_float32x4(wasm_f32x4_add(wasm_unpacklo_i32x4(ac, bd), wasm_unpackhi_i32x4(ac, bd))); +} + +#define OPENCV_HAL_IMPL_WASM_REDUCE_OP(_Tpvec, scalartype, func, scalar_func) \ +inline scalartype v_reduce_##func(const _Tpvec& a) \ +{ \ + scalartype buf[_Tpvec::nlanes]; \ + v_store(buf, a); \ + scalartype tmp = buf[0]; \ + for (int i=1; i<_Tpvec::nlanes; ++i) { \ + tmp = scalar_func(tmp, buf[i]); \ + } \ + return tmp; \ +} + +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint8x16, uchar, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint8x16, uchar, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int8x16, schar, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int8x16, schar, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint16x8, ushort, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint16x8, ushort, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int16x8, short, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int16x8, short, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint32x4, unsigned, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_uint32x4, unsigned, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int32x4, int, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_int32x4, int, min, std::min) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_float32x4, float, max, std::max) +OPENCV_HAL_IMPL_WASM_REDUCE_OP(v_float32x4, float, min, std::min) + +inline unsigned v_reduce_sad(const v_uint8x16& a, const v_uint8x16& b) +{ + v_uint16x8 l16, h16; + v_uint32x4 l16_l32, l16_h32, h16_l32, h16_h32; + v_expand(v_absdiff(a, b), l16, h16); + v_expand(l16, l16_l32, l16_h32); + v_expand(h16, h16_l32, h16_h32); + return v_reduce_sum(l16_l32+l16_h32+h16_l32+h16_h32); +} +inline unsigned v_reduce_sad(const v_int8x16& a, const v_int8x16& b) +{ + v_uint16x8 l16, h16; + v_uint32x4 l16_l32, l16_h32, h16_l32, h16_h32; + v_expand(v_absdiff(a, b), l16, h16); + v_expand(l16, l16_l32, l16_h32); + v_expand(h16, h16_l32, h16_h32); + return v_reduce_sum(l16_l32+l16_h32+h16_l32+h16_h32); +} +inline unsigned v_reduce_sad(const v_uint16x8& a, const v_uint16x8& b) +{ + v_uint32x4 l, h; + v_expand(v_absdiff(a, b), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_int16x8& a, const v_int16x8& b) +{ + v_uint32x4 l, h; + v_expand(v_absdiff(a, b), l, h); + return v_reduce_sum(l + h); +} +inline unsigned v_reduce_sad(const v_uint32x4& a, const v_uint32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} +inline unsigned v_reduce_sad(const v_int32x4& a, const v_int32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} +inline float v_reduce_sad(const v_float32x4& a, const v_float32x4& b) +{ + return v_reduce_sum(v_absdiff(a, b)); +} + +inline v_uint8x16 v_popcount(const v_uint8x16& a) +{ + v128_t m1 = wasm_i32x4_splat(0x55555555); + v128_t m2 = wasm_i32x4_splat(0x33333333); + v128_t m4 = wasm_i32x4_splat(0x0f0f0f0f); + v128_t p = a.val; + p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 1), m1), wasm_v128_and(p, m1)); + p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 2), m2), wasm_v128_and(p, m2)); + p = wasm_i32x4_add(wasm_v128_and(wasm_u32x4_shr(p, 4), m4), wasm_v128_and(p, m4)); + return v_uint8x16(p); +} +inline v_uint16x8 v_popcount(const v_uint16x8& a) +{ + v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + return v_reinterpret_as_u16(p) & v_setall_u16(0x00ff); +} +inline v_uint32x4 v_popcount(const v_uint32x4& a) +{ + v_uint8x16 p = v_popcount(v_reinterpret_as_u8(a)); + p += v_rotate_right<1>(p); + p += v_rotate_right<2>(p); + return v_reinterpret_as_u32(p) & v_setall_u32(0x000000ff); +} +inline v_uint64x2 v_popcount(const v_uint64x2& a) +{ + uint64 a_[2], b_[2] = { 0 }; + wasm_v128_store(a_, a.val); + for (int i = 0; i < 16; i++) + b_[i / 8] += popCountTable[((uint8_t*)a_)[i]]; + return v_uint64x2(wasm_v128_load(b_)); +} +inline v_uint8x16 v_popcount(const v_int8x16& a) +{ return v_popcount(v_reinterpret_as_u8(a)); } +inline v_uint16x8 v_popcount(const v_int16x8& a) +{ return v_popcount(v_reinterpret_as_u16(a)); } +inline v_uint32x4 v_popcount(const v_int32x4& a) +{ return v_popcount(v_reinterpret_as_u32(a)); } +inline v_uint64x2 v_popcount(const v_int64x2& a) +{ return v_popcount(v_reinterpret_as_u64(a)); } + +#define OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(_Tpvec, suffix, scalarType) \ +inline int v_signmask(const _Tpvec& a) \ +{ \ + _Tpvec::lane_type a_[_Tpvec::nlanes]; \ + wasm_v128_store(a_, a.val); \ + int mask = 0; \ + for (int i = 0; i < _Tpvec::nlanes; i++) \ + mask |= (reinterpret_int(a_[i]) < 0) << i; \ + return mask; \ +} \ +inline bool v_check_all(const _Tpvec& a) \ +{ return wasm_i8x16_all_true(wasm_##suffix##_lt(a.val, wasm_##suffix##_splat(0))); } \ +inline bool v_check_any(const _Tpvec& a) \ +{ return wasm_i8x16_any_true(wasm_##suffix##_lt(a.val, wasm_##suffix##_splat(0)));; } + +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint8x16, i8x16, schar) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int8x16, i8x16, schar) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint16x8, i16x8, short) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int16x8, i16x8, short) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint32x4, i32x4, int) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int32x4, i32x4, int) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float32x4, i32x4, float) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float64x2, f64x2, double) + +#define OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(_Tpvec, suffix, esuffix) \ +inline bool v_check_all(const _Tpvec& a) \ +{ \ + v128_t masked = v_reinterpret_as_##esuffix(a).val; \ + masked = wasm_i32x4_replace_lane(masked, 0, 0xffffffff); \ + masked = wasm_i32x4_replace_lane(masked, 2, 0xffffffff); \ + return wasm_i8x16_all_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \ +} \ +inline bool v_check_any(const _Tpvec& a) \ +{ \ + v128_t masked = v_reinterpret_as_##esuffix(a).val; \ + masked = wasm_i32x4_replace_lane(masked, 0, 0x0); \ + masked = wasm_i32x4_replace_lane(masked, 2, 0x0); \ + return wasm_i8x16_any_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \ +} \ + +OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_int64x2, i32x4, s32) +OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_uint64x2, i32x4, u32) + + +inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } +inline int v_scan_forward(const v_int16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_uint16x8& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 2; } +inline int v_scan_forward(const v_int32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_uint32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_float32x4& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 4; } +inline int v_scan_forward(const v_int64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_uint64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } +inline int v_scan_forward(const v_float64x2& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))) / 8; } + +#define OPENCV_HAL_IMPL_WASM_SELECT(_Tpvec) \ +inline _Tpvec v_select(const _Tpvec& mask, const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(wasm_v128_bitselect(a.val, b.val, mask.val)); \ +} + +OPENCV_HAL_IMPL_WASM_SELECT(v_uint8x16) +OPENCV_HAL_IMPL_WASM_SELECT(v_int8x16) +OPENCV_HAL_IMPL_WASM_SELECT(v_uint16x8) +OPENCV_HAL_IMPL_WASM_SELECT(v_int16x8) +OPENCV_HAL_IMPL_WASM_SELECT(v_uint32x4) +OPENCV_HAL_IMPL_WASM_SELECT(v_int32x4) +OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2) +OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2) +OPENCV_HAL_IMPL_WASM_SELECT(v_float32x4) +OPENCV_HAL_IMPL_WASM_SELECT(v_float64x2) + +#define OPENCV_HAL_IMPL_WASM_EXPAND(_Tpvec, _Tpwvec, _Tp, intrin) \ +inline void v_expand(const _Tpvec& a, _Tpwvec& b0, _Tpwvec& b1) \ +{ \ + b0.val = intrin(a.val); \ + b1.val = __CV_CAT(intrin, _high)(a.val); \ +} \ +inline _Tpwvec v_expand_low(const _Tpvec& a) \ +{ return _Tpwvec(intrin(a.val)); } \ +inline _Tpwvec v_expand_high(const _Tpvec& a) \ +{ return _Tpwvec(__CV_CAT(intrin, _high)(a.val)); } \ +inline _Tpwvec v_load_expand(const _Tp* ptr) \ +{ \ + v128_t a = wasm_v128_load(ptr); \ + return _Tpwvec(intrin(a)); \ +} + +OPENCV_HAL_IMPL_WASM_EXPAND(v_uint8x16, v_uint16x8, uchar, v128_cvtu8x16_i16x8) +OPENCV_HAL_IMPL_WASM_EXPAND(v_int8x16, v_int16x8, schar, v128_cvti8x16_i16x8) +OPENCV_HAL_IMPL_WASM_EXPAND(v_uint16x8, v_uint32x4, ushort, v128_cvtu16x8_i32x4) +OPENCV_HAL_IMPL_WASM_EXPAND(v_int16x8, v_int32x4, short, v128_cvti16x8_i32x4) +OPENCV_HAL_IMPL_WASM_EXPAND(v_uint32x4, v_uint64x2, unsigned, v128_cvtu32x4_i64x2) +OPENCV_HAL_IMPL_WASM_EXPAND(v_int32x4, v_int64x2, int, v128_cvti32x4_i64x2) + +#define OPENCV_HAL_IMPL_WASM_EXPAND_Q(_Tpvec, _Tp, intrin) \ +inline _Tpvec v_load_expand_q(const _Tp* ptr) \ +{ \ + v128_t a = wasm_v128_load(ptr); \ + return _Tpvec(intrin(a)); \ +} + +OPENCV_HAL_IMPL_WASM_EXPAND_Q(v_uint32x4, uchar, v128_cvtu8x16_i32x4) +OPENCV_HAL_IMPL_WASM_EXPAND_Q(v_int32x4, schar, v128_cvti8x16_i32x4) + +#define OPENCV_HAL_IMPL_WASM_UNPACKS(_Tpvec, suffix) \ +inline void v_zip(const _Tpvec& a0, const _Tpvec& a1, _Tpvec& b0, _Tpvec& b1) \ +{ \ + b0.val = wasm_unpacklo_##suffix(a0.val, a1.val); \ + b1.val = wasm_unpackhi_##suffix(a0.val, a1.val); \ +} \ +inline _Tpvec v_combine_low(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(wasm_unpacklo_i64x2(a.val, b.val)); \ +} \ +inline _Tpvec v_combine_high(const _Tpvec& a, const _Tpvec& b) \ +{ \ + return _Tpvec(wasm_unpackhi_i64x2(a.val, b.val)); \ +} \ +inline void v_recombine(const _Tpvec& a, const _Tpvec& b, _Tpvec& c, _Tpvec& d) \ +{ \ + c.val = wasm_unpacklo_i64x2(a.val, b.val); \ + d.val = wasm_unpackhi_i64x2(a.val, b.val); \ +} + +OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint8x16, i8x16) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_int8x16, i8x16) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint16x8, i16x8) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_int16x8, i16x8) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_uint32x4, i32x4) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_int32x4, i32x4) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_float32x4, i32x4) +OPENCV_HAL_IMPL_WASM_UNPACKS(v_float64x2, i64x2) + +template +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) +{ + return v_rotate_right(a, b); +} + +inline v_int32x4 v_round(const v_float32x4& a) +{ + v128_t h = wasm_f32x4_splat(0.5); + return v_int32x4(wasm_i32x4_trunc_saturate_f32x4(wasm_f32x4_add(a.val, h))); +} + +inline v_int32x4 v_floor(const v_float32x4& a) +{ + v128_t a1 = wasm_i32x4_trunc_saturate_f32x4(a.val); + v128_t mask = wasm_f32x4_lt(a.val, wasm_f32x4_convert_i32x4(a1)); + return v_int32x4(wasm_i32x4_add(a1, mask)); +} + +inline v_int32x4 v_ceil(const v_float32x4& a) +{ + v128_t a1 = wasm_i32x4_trunc_saturate_f32x4(a.val); + v128_t mask = wasm_f32x4_gt(a.val, wasm_f32x4_convert_i32x4(a1)); + return v_int32x4(wasm_i32x4_sub(a1, mask)); +} + +inline v_int32x4 v_trunc(const v_float32x4& a) +{ return v_int32x4(wasm_i32x4_trunc_saturate_f32x4(a.val)); } + +#define OPENCV_HAL_IMPL_WASM_MATH_FUNC(func, cfunc) \ +inline v_int32x4 func(const v_float64x2& a) \ +{ \ + double a_[2]; \ + wasm_v128_store(a_, a.val); \ + int c_[4]; \ + c_[0] = cfunc(a_[0]); \ + c_[1] = cfunc(a_[1]); \ + c_[2] = 0; \ + c_[3] = 0; \ + return v_int32x4(wasm_v128_load(c_)); \ +} + +OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_round, cvRound) +OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_floor, cvFloor) +OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_ceil, cvCeil) +OPENCV_HAL_IMPL_WASM_MATH_FUNC(v_trunc, int) + +inline v_int32x4 v_round(const v_float64x2& a, const v_float64x2& b) +{ + double a_[2], b_[2]; + wasm_v128_store(a_, a.val); + wasm_v128_store(b_, b.val); + int c_[4]; + c_[0] = cvRound(a_[0]); + c_[1] = cvRound(a_[1]); + c_[2] = cvRound(b_[0]); + c_[3] = cvRound(b_[1]); + return v_int32x4(wasm_v128_load(c_)); +} + +#define OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(_Tpvec, suffix) \ +inline void v_transpose4x4(const _Tpvec& a0, const _Tpvec& a1, \ + const _Tpvec& a2, const _Tpvec& a3, \ + _Tpvec& b0, _Tpvec& b1, \ + _Tpvec& b2, _Tpvec& b3) \ +{ \ + v128_t t0 = wasm_unpacklo_##suffix(a0.val, a1.val); \ + v128_t t1 = wasm_unpacklo_##suffix(a2.val, a3.val); \ + v128_t t2 = wasm_unpackhi_##suffix(a0.val, a1.val); \ + v128_t t3 = wasm_unpackhi_##suffix(a2.val, a3.val); \ +\ + b0.val = wasm_unpacklo_i64x2(t0, t1); \ + b1.val = wasm_unpackhi_i64x2(t0, t1); \ + b2.val = wasm_unpacklo_i64x2(t2, t3); \ + b3.val = wasm_unpackhi_i64x2(t2, t3); \ +} + +OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_uint32x4, i32x4) +OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_int32x4, i32x4) +OPENCV_HAL_IMPL_WASM_TRANSPOSE4x4(v_float32x4, i32x4) + +// load deinterleave +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b) +{ + v128_t t00 = wasm_v128_load(ptr); + v128_t t01 = wasm_v128_load(ptr + 16); + + a.val = wasm_v8x16_shuffle(t00, t01, 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30); + b.val = wasm_v8x16_shuffle(t00, t01, 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31); +} + +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c) +{ + v128_t t00 = wasm_v128_load(ptr); + v128_t t01 = wasm_v128_load(ptr + 16); + v128_t t02 = wasm_v128_load(ptr + 32); + + v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,3,6,9,12,15,18,21,24,27,30,1,2,4,5,7); + v128_t t11 = wasm_v8x16_shuffle(t00, t01, 1,4,7,10,13,16,19,22,25,28,31,0,2,3,5,6); + v128_t t12 = wasm_v8x16_shuffle(t00, t01, 2,5,8,11,14,17,20,23,26,29,0,1,3,4,6,7); + + a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,17,20,23,26,29); + b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,18,21,24,27,30); + c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,8,9,16,19,22,25,28,31); +} + +inline void v_load_deinterleave(const uchar* ptr, v_uint8x16& a, v_uint8x16& b, v_uint8x16& c, v_uint8x16& d) +{ + v128_t u0 = wasm_v128_load(ptr); // a0 b0 c0 d0 a1 b1 c1 d1 ... + v128_t u1 = wasm_v128_load(ptr + 16); // a4 b4 c4 d4 ... + v128_t u2 = wasm_v128_load(ptr + 32); // a8 b8 c8 d8 ... + v128_t u3 = wasm_v128_load(ptr + 48); // a12 b12 c12 d12 ... + + v128_t v0 = wasm_v8x16_shuffle(u0, u1, 0,4,8,12,16,20,24,28,1,5,9,13,17,21,25,29); + v128_t v1 = wasm_v8x16_shuffle(u2, u3, 0,4,8,12,16,20,24,28,1,5,9,13,17,21,25,29); + v128_t v2 = wasm_v8x16_shuffle(u0, u1, 2,6,10,14,18,22,26,30,3,7,11,15,19,23,27,31); + v128_t v3 = wasm_v8x16_shuffle(u2, u3, 2,6,10,14,18,22,26,30,3,7,11,15,19,23,27,31); + + a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); + b.val = wasm_v8x16_shuffle(v0, v1, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); + c.val = wasm_v8x16_shuffle(v2, v3, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); + d.val = wasm_v8x16_shuffle(v2, v3, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b) +{ + v128_t v0 = wasm_v128_load(ptr); // a0 b0 a1 b1 a2 b2 a3 b3 + v128_t v1 = wasm_v128_load(ptr + 8); // a4 b4 a5 b5 a6 b6 a7 b7 + + a.val = wasm_v8x16_shuffle(v0, v1, 0,1,4,5,8,9,12,13,16,17,20,21,24,25,28,29); // a0 a1 a2 a3 a4 a5 a6 a7 + b.val = wasm_v8x16_shuffle(v0, v1, 2,3,6,7,10,11,14,15,18,19,22,23,26,27,30,31); // b0 b1 ab b3 b4 b5 b6 b7 +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c) +{ + v128_t t00 = wasm_v128_load(ptr); // a0 b0 c0 a1 b1 c1 a2 b2 + v128_t t01 = wasm_v128_load(ptr + 8); // c2 a3 b3 c3 a4 b4 c4 a5 + v128_t t02 = wasm_v128_load(ptr + 16); // b5 c5 a6 b6 c6 a7 b7 c7 + + v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,6,7,12,13,18,19,24,25,30,31,2,3,4,5); + v128_t t11 = wasm_v8x16_shuffle(t00, t01, 2,3,8,9,14,15,20,21,26,27,0,1,4,5,6,7); + v128_t t12 = wasm_v8x16_shuffle(t00, t01, 4,5,10,11,16,17,22,23,28,29,0,1,2,3,6,7); + + a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,26,27); + b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,16,17,22,23,28,29); + c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,8,9,18,19,24,25,30,31); +} + +inline void v_load_deinterleave(const ushort* ptr, v_uint16x8& a, v_uint16x8& b, v_uint16x8& c, v_uint16x8& d) +{ + v128_t u0 = wasm_v128_load(ptr); // a0 b0 c0 d0 a1 b1 c1 d1 + v128_t u1 = wasm_v128_load(ptr + 8); // a2 b2 c2 d2 ... + v128_t u2 = wasm_v128_load(ptr + 16); // a4 b4 c4 d4 ... + v128_t u3 = wasm_v128_load(ptr + 24); // a6 b6 c6 d6 ... + + v128_t v0 = wasm_v8x16_shuffle(u0, u1, 0,1,8,9,16,17,24,25,2,3,10,11,18,19,26,27); // a0 a1 a2 a3 b0 b1 b2 b3 + v128_t v1 = wasm_v8x16_shuffle(u2, u3, 0,1,8,9,16,17,24,25,2,3,10,11,18,19,26,27); // a4 a5 a6 a7 b4 b5 b6 b7 + v128_t v2 = wasm_v8x16_shuffle(u0, u1, 4,5,12,13,20,21,28,29,6,7,14,15,22,23,30,31); // c0 c1 c2 c3 d0 d1 d2 d3 + v128_t v3 = wasm_v8x16_shuffle(u2, u3, 4,5,12,13,20,21,28,29,6,7,14,15,22,23,30,31); // c4 c5 c6 c7 d4 d5 d6 d7 + + a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); + b.val = wasm_v8x16_shuffle(v0, v1, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); + c.val = wasm_v8x16_shuffle(v2, v3, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); + d.val = wasm_v8x16_shuffle(v2, v3, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b) +{ + v128_t v0 = wasm_v128_load(ptr); // a0 b0 a1 b1 + v128_t v1 = wasm_v128_load(ptr + 4); // a2 b2 a3 b3 + + a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27); // a0 a1 a2 a3 + b.val = wasm_v8x16_shuffle(v0, v1, 4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31); // b0 b1 b2 b3 +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c) +{ + v128_t t00 = wasm_v128_load(ptr); // a0 b0 c0 a1 + v128_t t01 = wasm_v128_load(ptr + 4); // b2 c2 a3 b3 + v128_t t02 = wasm_v128_load(ptr + 8); // c3 a4 b4 c4 + + v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,2,3,12,13,14,15,24,25,26,27,4,5,6,7); + v128_t t11 = wasm_v8x16_shuffle(t00, t01, 4,5,6,7,16,17,18,19,28,29,30,31,0,1,2,3); + v128_t t12 = wasm_v8x16_shuffle(t00, t01, 8,9,10,11,20,21,22,23,0,1,2,3,4,5,6,7); + + a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,22,23); + b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,11,24,25,26,27); + c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,16,17,18,19,28,29,30,31); +} + +inline void v_load_deinterleave(const unsigned* ptr, v_uint32x4& a, v_uint32x4& b, v_uint32x4& c, v_uint32x4& d) +{ + v_uint32x4 s0(wasm_v128_load(ptr)); // a0 b0 c0 d0 + v_uint32x4 s1(wasm_v128_load(ptr + 4)); // a1 b1 c1 d1 + v_uint32x4 s2(wasm_v128_load(ptr + 8)); // a2 b2 c2 d2 + v_uint32x4 s3(wasm_v128_load(ptr + 12)); // a3 b3 c3 d3 + + v_transpose4x4(s0, s1, s2, s3, a, b, c, d); +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b) +{ + v128_t v0 = wasm_v128_load(ptr); // a0 b0 a1 b1 + v128_t v1 = wasm_v128_load((ptr + 4)); // a2 b2 a3 b3 + + a.val = wasm_v8x16_shuffle(v0, v1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27); // a0 a1 a2 a3 + b.val = wasm_v8x16_shuffle(v0, v1, 4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31); // b0 b1 b2 b3 +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c) +{ + v128_t t00 = wasm_v128_load(ptr); // a0 b0 c0 a1 + v128_t t01 = wasm_v128_load(ptr + 4); // b2 c2 a3 b3 + v128_t t02 = wasm_v128_load(ptr + 8); // c3 a4 b4 c4 + + v128_t t10 = wasm_v8x16_shuffle(t00, t01, 0,1,2,3,12,13,14,15,24,25,26,27,4,5,6,7); + v128_t t11 = wasm_v8x16_shuffle(t00, t01, 4,5,6,7,16,17,18,19,28,29,30,31,0,1,2,3); + v128_t t12 = wasm_v8x16_shuffle(t00, t01, 8,9,10,11,20,21,22,23,0,1,2,3,4,5,6,7); + + a.val = wasm_v8x16_shuffle(t10, t02, 0,1,2,3,4,5,6,7,8,9,10,11,20,21,22,23); + b.val = wasm_v8x16_shuffle(t11, t02, 0,1,2,3,4,5,6,7,8,9,10,11,24,25,26,27); + c.val = wasm_v8x16_shuffle(t12, t02, 0,1,2,3,4,5,6,7,16,17,18,19,28,29,30,31); +} + +inline void v_load_deinterleave(const float* ptr, v_float32x4& a, v_float32x4& b, v_float32x4& c, v_float32x4& d) +{ + v_float32x4 s0(wasm_v128_load(ptr)); // a0 b0 c0 d0 + v_float32x4 s1(wasm_v128_load(ptr + 4)); // a1 b1 c1 d1 + v_float32x4 s2(wasm_v128_load(ptr + 8)); // a2 b2 c2 d2 + v_float32x4 s3(wasm_v128_load(ptr + 12)); // a3 b3 c3 d3 + + v_transpose4x4(s0, s1, s2, s3, a, b, c, d); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b) +{ + v128_t t0 = wasm_v128_load(ptr); // a0 b0 + v128_t t1 = wasm_v128_load(ptr + 2); // a1 b1 + + a.val = wasm_unpacklo_i64x2(t0, t1); + b.val = wasm_unpackhi_i64x2(t0, t1); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, v_uint64x2& b, v_uint64x2& c) +{ + v128_t t0 = wasm_v128_load(ptr); // a0, b0 + v128_t t1 = wasm_v128_load(ptr + 2); // c0, a1 + v128_t t2 = wasm_v128_load(ptr + 4); // b1, c1 + + a.val = wasm_v8x16_shuffle(t0, t1, 0,1,2,3,4,5,6,7,24,25,26,27,28,29,30,31); + b.val = wasm_v8x16_shuffle(t0, t2, 8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23); + c.val = wasm_v8x16_shuffle(t1, t2, 0,1,2,3,4,5,6,7,24,25,26,27,28,29,30,31); +} + +inline void v_load_deinterleave(const uint64 *ptr, v_uint64x2& a, + v_uint64x2& b, v_uint64x2& c, v_uint64x2& d) +{ + v128_t t0 = wasm_v128_load(ptr); // a0 b0 + v128_t t1 = wasm_v128_load(ptr + 2); // c0 d0 + v128_t t2 = wasm_v128_load(ptr + 4); // a1 b1 + v128_t t3 = wasm_v128_load(ptr + 6); // c1 d1 + + a.val = wasm_unpacklo_i64x2(t0, t2); + b.val = wasm_unpackhi_i64x2(t0, t2); + c.val = wasm_unpacklo_i64x2(t1, t3); + d.val = wasm_unpackhi_i64x2(t1, t3); +} + +// store interleave + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i8x16(a.val, b.val); + v128_t v1 = wasm_unpackhi_i8x16(a.val, b.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 16, v1); +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + const v_uint8x16& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,16,0,1,17,0,2,18,0,3,19,0,4,20,0,5); + v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 21,0,6,22,0,7,23,0,8,24,0,9,25,0,10,26); + v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,11,27,0,12,28,0,13,29,0,14,30,0,15,31,0); + + v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,16,3,4,17,6,7,18,9,10,19,12,13,20,15); + v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,21,2,3,22,5,6,23,8,9,24,11,12,25,14,15); + v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 26,1,2,27,4,5,28,7,8,29,10,11,30,13,14,31); + + wasm_v128_store(ptr, t10); + wasm_v128_store(ptr + 16, t11); + wasm_v128_store(ptr + 32, t12); +} + +inline void v_store_interleave( uchar* ptr, const v_uint8x16& a, const v_uint8x16& b, + const v_uint8x16& c, const v_uint8x16& d, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + // a0 a1 a2 a3 .... + // b0 b1 b2 b3 .... + // c0 c1 c2 c3 .... + // d0 d1 d2 d3 .... + v128_t u0 = wasm_unpacklo_i8x16(a.val, c.val); // a0 c0 a1 c1 ... + v128_t u1 = wasm_unpackhi_i8x16(a.val, c.val); // a8 c8 a9 c9 ... + v128_t u2 = wasm_unpacklo_i8x16(b.val, d.val); // b0 d0 b1 d1 ... + v128_t u3 = wasm_unpackhi_i8x16(b.val, d.val); // b8 d8 b9 d9 ... + + v128_t v0 = wasm_unpacklo_i8x16(u0, u2); // a0 b0 c0 d0 ... + v128_t v1 = wasm_unpackhi_i8x16(u0, u2); // a4 b4 c4 d4 ... + v128_t v2 = wasm_unpacklo_i8x16(u1, u3); // a8 b8 c8 d8 ... + v128_t v3 = wasm_unpackhi_i8x16(u1, u3); // a12 b12 c12 d12 ... + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 16, v1); + wasm_v128_store(ptr + 32, v2); + wasm_v128_store(ptr + 48, v3); +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i16x8(a.val, b.val); + v128_t v1 = wasm_unpackhi_i16x8(a.val, b.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 8, v1); +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, + const v_uint16x8& b, const v_uint16x8& c, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,16,17,0,0,2,3,18,19,0,0,4,5,20,21); + v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 0,0,6,7,22,23,0,0,8,9,24,25,0,0,10,11); + v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 26,27,0,0,12,13,28,29,0,0,14,15,30,31,0,0); + + v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,16,17,6,7,8,9,18,19,12,13,14,15); + v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 20,21,2,3,4,5,22,23,8,9,10,11,24,25,14,15); + v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 0,1,26,27,4,5,6,7,28,29,10,11,12,13,30,31); + + wasm_v128_store(ptr, t10); + wasm_v128_store(ptr + 8, t11); + wasm_v128_store(ptr + 16, t12); +} + +inline void v_store_interleave( ushort* ptr, const v_uint16x8& a, const v_uint16x8& b, + const v_uint16x8& c, const v_uint16x8& d, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + // a0 a1 a2 a3 .... + // b0 b1 b2 b3 .... + // c0 c1 c2 c3 .... + // d0 d1 d2 d3 .... + v128_t u0 = wasm_unpacklo_i16x8(a.val, c.val); // a0 c0 a1 c1 ... + v128_t u1 = wasm_unpackhi_i16x8(a.val, c.val); // a4 c4 a5 c5 ... + v128_t u2 = wasm_unpacklo_i16x8(b.val, d.val); // b0 d0 b1 d1 ... + v128_t u3 = wasm_unpackhi_i16x8(b.val, d.val); // b4 d4 b5 d5 ... + + v128_t v0 = wasm_unpacklo_i16x8(u0, u2); // a0 b0 c0 d0 ... + v128_t v1 = wasm_unpackhi_i16x8(u0, u2); // a2 b2 c2 d2 ... + v128_t v2 = wasm_unpacklo_i16x8(u1, u3); // a4 b4 c4 d4 ... + v128_t v3 = wasm_unpackhi_i16x8(u1, u3); // a6 b6 c6 d6 ... + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 8, v1); + wasm_v128_store(ptr + 16, v2); + wasm_v128_store(ptr + 24, v3); +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i32x4(a.val, b.val); + v128_t v1 = wasm_unpackhi_i32x4(a.val, b.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 4, v1); +} + +inline void v_store_interleave( unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,16,17,18,19,0,0,0,0,4,5,6,7); + v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 20,21,22,23,0,0,0,0,8,9,10,11,24,25,26,27); + v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,0,0,0,12,13,14,15,28,29,30,31,0,0,0,0); + + v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,4,5,6,7,16,17,18,19,12,13,14,15); + v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,1,2,3,20,21,22,23,8,9,10,11,12,13,14,15); + v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 24,25,26,27,4,5,6,7,8,9,10,11,28,29,30,31); + + wasm_v128_store(ptr, t10); + wasm_v128_store(ptr + 4, t11); + wasm_v128_store(ptr + 8, t12); +} + +inline void v_store_interleave(unsigned* ptr, const v_uint32x4& a, const v_uint32x4& b, + const v_uint32x4& c, const v_uint32x4& d, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v_uint32x4 v0, v1, v2, v3; + v_transpose4x4(a, b, c, d, v0, v1, v2, v3); + + wasm_v128_store(ptr, v0.val); + wasm_v128_store(ptr + 4, v1.val); + wasm_v128_store(ptr + 8, v2.val); + wasm_v128_store(ptr + 12, v3.val); +} + +// 2-channel, float only +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i32x4(a.val, b.val); + v128_t v1 = wasm_unpackhi_i32x4(a.val, b.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 4, v1); +} + +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t t00 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,16,17,18,19,0,0,0,0,4,5,6,7); + v128_t t01 = wasm_v8x16_shuffle(a.val, b.val, 20,21,22,23,0,0,0,0,8,9,10,11,24,25,26,27); + v128_t t02 = wasm_v8x16_shuffle(a.val, b.val, 0,0,0,0,12,13,14,15,28,29,30,31,0,0,0,0); + + v128_t t10 = wasm_v8x16_shuffle(t00, c.val, 0,1,2,3,4,5,6,7,16,17,18,19,12,13,14,15); + v128_t t11 = wasm_v8x16_shuffle(t01, c.val, 0,1,2,3,20,21,22,23,8,9,10,11,12,13,14,15); + v128_t t12 = wasm_v8x16_shuffle(t02, c.val, 24,25,26,27,4,5,6,7,8,9,10,11,28,29,30,31); + + wasm_v128_store(ptr, t10); + wasm_v128_store(ptr + 4, t11); + wasm_v128_store(ptr + 8, t12); +} + +inline void v_store_interleave(float* ptr, const v_float32x4& a, const v_float32x4& b, + const v_float32x4& c, const v_float32x4& d, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v_float32x4 v0, v1, v2, v3; + v_transpose4x4(a, b, c, d, v0, v1, v2, v3); + + wasm_v128_store(ptr, v0.val); + wasm_v128_store(ptr + 4, v1.val); + wasm_v128_store(ptr + 8, v2.val); + wasm_v128_store(ptr + 12, v3.val); +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i64x2(a.val, b.val); + v128_t v1 = wasm_unpackhi_i64x2(a.val, b.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 2, v1); +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + const v_uint64x2& c, hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_v8x16_shuffle(a.val, b.val, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23); + v128_t v1 = wasm_v8x16_shuffle(a.val, c.val, 16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15); + v128_t v2 = wasm_v8x16_shuffle(b.val, c.val, 8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 2, v1); + wasm_v128_store(ptr + 4, v2); +} + +inline void v_store_interleave(uint64 *ptr, const v_uint64x2& a, const v_uint64x2& b, + const v_uint64x2& c, const v_uint64x2& d, + hal::StoreMode /*mode*/ = hal::STORE_UNALIGNED) +{ + v128_t v0 = wasm_unpacklo_i64x2(a.val, b.val); + v128_t v1 = wasm_unpacklo_i64x2(c.val, d.val); + v128_t v2 = wasm_unpackhi_i64x2(a.val, b.val); + v128_t v3 = wasm_unpackhi_i64x2(c.val, d.val); + + wasm_v128_store(ptr, v0); + wasm_v128_store(ptr + 2, v1); + wasm_v128_store(ptr + 4, v2); + wasm_v128_store(ptr + 6, v3); +} + +#define OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(_Tpvec0, _Tp0, suffix0, _Tpvec1, _Tp1, suffix1) \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0 ) \ +{ \ + _Tpvec1 a1, b1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0 ) \ +{ \ + _Tpvec1 a1, b1, c1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ +} \ +inline void v_load_deinterleave( const _Tp0* ptr, _Tpvec0& a0, _Tpvec0& b0, _Tpvec0& c0, _Tpvec0& d0 ) \ +{ \ + _Tpvec1 a1, b1, c1, d1; \ + v_load_deinterleave((const _Tp1*)ptr, a1, b1, c1, d1); \ + a0 = v_reinterpret_as_##suffix0(a1); \ + b0 = v_reinterpret_as_##suffix0(b1); \ + c0 = v_reinterpret_as_##suffix0(c1); \ + d0 = v_reinterpret_as_##suffix0(d1); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, mode); \ +} \ +inline void v_store_interleave( _Tp0* ptr, const _Tpvec0& a0, const _Tpvec0& b0, \ + const _Tpvec0& c0, const _Tpvec0& d0, \ + hal::StoreMode mode = hal::STORE_UNALIGNED ) \ +{ \ + _Tpvec1 a1 = v_reinterpret_as_##suffix1(a0); \ + _Tpvec1 b1 = v_reinterpret_as_##suffix1(b0); \ + _Tpvec1 c1 = v_reinterpret_as_##suffix1(c0); \ + _Tpvec1 d1 = v_reinterpret_as_##suffix1(d0); \ + v_store_interleave((_Tp1*)ptr, a1, b1, c1, d1, mode); \ +} + +OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int8x16, schar, s8, v_uint8x16, uchar, u8) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int16x8, short, s16, v_uint16x8, ushort, u16) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int32x4, int, s32, v_uint32x4, unsigned, u32) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_int64x2, int64, s64, v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_WASM_LOADSTORE_INTERLEAVE(v_float64x2, double, f64, v_uint64x2, uint64, u64) + +inline v_float32x4 v_cvt_f32(const v_int32x4& a) +{ + return v_float32x4(wasm_f32x4_convert_i32x4(a.val)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a) +{ + double a_[2]; + wasm_v128_store(a_, a.val); + float c_[4]; + c_[0] = (float)(a_[0]); + c_[1] = (float)(a_[1]); + c_[2] = 0; + c_[3] = 0; + return v_float32x4(wasm_v128_load(c_)); +} + +inline v_float32x4 v_cvt_f32(const v_float64x2& a, const v_float64x2& b) +{ + double a_[2], b_[2]; + wasm_v128_store(a_, a.val); + wasm_v128_store(b_, b.val); + float c_[4]; + c_[0] = (float)(a_[0]); + c_[1] = (float)(a_[1]); + c_[2] = (float)(b_[0]); + c_[3] = (float)(b_[1]); + return v_float32x4(wasm_v128_load(c_)); +} + +inline v_float64x2 v_cvt_f64(const v_int32x4& a) +{ +#ifdef __wasm_unimplemented_simd128__ + v128_t p = v128_cvti32x4_i64x2(a.val); + return v_float64x2(wasm_f64x2_convert_i64x2(p)); +#else + int a_[4]; + wasm_v128_store(a_, a.val); + double c_[2]; + c_[0] = (double)(a_[0]); + c_[1] = (double)(a_[1]); + return v_float64x2(wasm_v128_load(c_)); +#endif +} + +inline v_float64x2 v_cvt_f64_high(const v_int32x4& a) +{ +#ifdef __wasm_unimplemented_simd128__ + v128_t p = v128_cvti32x4_i64x2_high(a.val); + return v_float64x2(wasm_f64x2_convert_i64x2(p)); +#else + int a_[4]; + wasm_v128_store(a_, a.val); + double c_[2]; + c_[0] = (double)(a_[2]); + c_[1] = (double)(a_[3]); + return v_float64x2(wasm_v128_load(c_)); +#endif +} + +inline v_float64x2 v_cvt_f64(const v_float32x4& a) +{ + float a_[4]; + wasm_v128_store(a_, a.val); + double c_[2]; + c_[0] = (double)(a_[0]); + c_[1] = (double)(a_[1]); + return v_float64x2(wasm_v128_load(c_)); +} + +inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) +{ + float a_[4]; + wasm_v128_store(a_, a.val); + double c_[2]; + c_[0] = (double)(a_[2]); + c_[1] = (double)(a_[3]); + return v_float64x2(wasm_v128_load(c_)); +} + +inline v_float64x2 v_cvt_f64(const v_int64x2& a) +{ +#ifdef __wasm_unimplemented_simd128__ + return v_float64x2(wasm_f64x2_convert_i64x2(a.val)); +#else + int64 a_[2]; + wasm_v128_store(a_, a.val); + double c_[2]; + c_[0] = (double)(a_[0]); + c_[1] = (double)(a_[1]); + return v_float64x2(wasm_v128_load(c_)); +#endif +} + +////////////// Lookup table access //////////////////// + +inline v_int8x16 v_lut(const schar* tab, const int* idx) +{ + return v_int8x16(tab[idx[0]], tab[idx[1]], tab[idx[ 2]], tab[idx[ 3]], tab[idx[ 4]], tab[idx[ 5]], tab[idx[ 6]], tab[idx[ 7]], + tab[idx[8]], tab[idx[9]], tab[idx[10]], tab[idx[11]], tab[idx[12]], tab[idx[13]], tab[idx[14]], tab[idx[15]]); +} +inline v_int8x16 v_lut_pairs(const schar* tab, const int* idx) +{ + return v_int8x16(tab[idx[0]], tab[idx[0]+1], tab[idx[1]], tab[idx[1]+1], tab[idx[2]], tab[idx[2]+1], tab[idx[3]], tab[idx[3]+1], + tab[idx[4]], tab[idx[4]+1], tab[idx[5]], tab[idx[5]+1], tab[idx[6]], tab[idx[6]+1], tab[idx[7]], tab[idx[7]+1]); +} +inline v_int8x16 v_lut_quads(const schar* tab, const int* idx) +{ + return v_int8x16(tab[idx[0]], tab[idx[0]+1], tab[idx[0]+2], tab[idx[0]+3], tab[idx[1]], tab[idx[1]+1], tab[idx[1]+2], tab[idx[1]+3], + tab[idx[2]], tab[idx[2]+1], tab[idx[2]+2], tab[idx[2]+3], tab[idx[3]], tab[idx[3]+1], tab[idx[3]+2], tab[idx[3]+3]); +} +inline v_uint8x16 v_lut(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut((const schar *)tab, idx)); } +inline v_uint8x16 v_lut_pairs(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_pairs((const schar *)tab, idx)); } +inline v_uint8x16 v_lut_quads(const uchar* tab, const int* idx) { return v_reinterpret_as_u8(v_lut_quads((const schar *)tab, idx)); } + +inline v_int16x8 v_lut(const short* tab, const int* idx) +{ + return v_int16x8(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]], + tab[idx[4]], tab[idx[5]], tab[idx[6]], tab[idx[7]]); +} +inline v_int16x8 v_lut_pairs(const short* tab, const int* idx) +{ + return v_int16x8(tab[idx[0]], tab[idx[0]+1], tab[idx[1]], tab[idx[1]+1], + tab[idx[2]], tab[idx[2]+1], tab[idx[3]], tab[idx[3]+1]); +} +inline v_int16x8 v_lut_quads(const short* tab, const int* idx) +{ + return v_int16x8(tab[idx[0]], tab[idx[0]+1], tab[idx[0]+2], tab[idx[0]+3], + tab[idx[1]], tab[idx[1]+1], tab[idx[1]+2], tab[idx[1]+3]); +} +inline v_uint16x8 v_lut(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut((const short *)tab, idx)); } +inline v_uint16x8 v_lut_pairs(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_pairs((const short *)tab, idx)); } +inline v_uint16x8 v_lut_quads(const ushort* tab, const int* idx) { return v_reinterpret_as_u16(v_lut_quads((const short *)tab, idx)); } + +inline v_int32x4 v_lut(const int* tab, const int* idx) +{ + return v_int32x4(tab[idx[0]], tab[idx[1]], + tab[idx[2]], tab[idx[3]]); +} +inline v_int32x4 v_lut_pairs(const int* tab, const int* idx) +{ + return v_int32x4(tab[idx[0]], tab[idx[0]+1], + tab[idx[1]], tab[idx[1]+1]); +} +inline v_int32x4 v_lut_quads(const int* tab, const int* idx) +{ + return v_int32x4(wasm_v128_load(tab + idx[0])); +} +inline v_uint32x4 v_lut(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut((const int *)tab, idx)); } +inline v_uint32x4 v_lut_pairs(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_pairs((const int *)tab, idx)); } +inline v_uint32x4 v_lut_quads(const unsigned* tab, const int* idx) { return v_reinterpret_as_u32(v_lut_quads((const int *)tab, idx)); } + +inline v_int64x2 v_lut(const int64_t* tab, const int* idx) +{ + return v_int64x2(tab[idx[0]], tab[idx[1]]); +} +inline v_int64x2 v_lut_pairs(const int64_t* tab, const int* idx) +{ + return v_int64x2(wasm_v128_load(tab + idx[0])); +} +inline v_uint64x2 v_lut(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut((const int64_t *)tab, idx)); } +inline v_uint64x2 v_lut_pairs(const uint64_t* tab, const int* idx) { return v_reinterpret_as_u64(v_lut_pairs((const int64_t *)tab, idx)); } + +inline v_float32x4 v_lut(const float* tab, const int* idx) +{ + return v_float32x4(tab[idx[0]], tab[idx[1]], tab[idx[2]], tab[idx[3]]); +} +inline v_float32x4 v_lut_pairs(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_pairs((const int *)tab, idx)); } +inline v_float32x4 v_lut_quads(const float* tab, const int* idx) { return v_reinterpret_as_f32(v_lut_quads((const int *)tab, idx)); } + +inline v_float64x2 v_lut(const double* tab, const int* idx) +{ + return v_float64x2(tab[idx[0]], tab[idx[1]]); +} +inline v_float64x2 v_lut_pairs(const double* tab, const int* idx) +{ + return v_float64x2(wasm_v128_load(tab + idx[0])); +} + +inline v_int32x4 v_lut(const int* tab, const v_int32x4& idxvec) +{ + return v_int32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)], + tab[wasm_i32x4_extract_lane(idxvec.val, 1)], + tab[wasm_i32x4_extract_lane(idxvec.val, 2)], + tab[wasm_i32x4_extract_lane(idxvec.val, 3)]); +} + +inline v_uint32x4 v_lut(const unsigned* tab, const v_int32x4& idxvec) +{ + return v_reinterpret_as_u32(v_lut((const int *)tab, idxvec)); +} + +inline v_float32x4 v_lut(const float* tab, const v_int32x4& idxvec) +{ + return v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)], + tab[wasm_i32x4_extract_lane(idxvec.val, 1)], + tab[wasm_i32x4_extract_lane(idxvec.val, 2)], + tab[wasm_i32x4_extract_lane(idxvec.val, 3)]); +} + +inline v_float64x2 v_lut(const double* tab, const v_int32x4& idxvec) +{ + return v_float64x2(tab[wasm_i32x4_extract_lane(idxvec.val, 0)], + tab[wasm_i32x4_extract_lane(idxvec.val, 1)]); +} + +// loads pairs from the table and deinterleaves them, e.g. returns: +// x = (tab[idxvec[0], tab[idxvec[1]], tab[idxvec[2]], tab[idxvec[3]]), +// y = (tab[idxvec[0]+1], tab[idxvec[1]+1], tab[idxvec[2]+1], tab[idxvec[3]+1]) +// note that the indices are float's indices, not the float-pair indices. +// in theory, this function can be used to implement bilinear interpolation, +// when idxvec are the offsets within the image. +inline void v_lut_deinterleave(const float* tab, const v_int32x4& idxvec, v_float32x4& x, v_float32x4& y) +{ + x = v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)], + tab[wasm_i32x4_extract_lane(idxvec.val, 1)], + tab[wasm_i32x4_extract_lane(idxvec.val, 2)], + tab[wasm_i32x4_extract_lane(idxvec.val, 3)]); + y = v_float32x4(tab[wasm_i32x4_extract_lane(idxvec.val, 0)+1], + tab[wasm_i32x4_extract_lane(idxvec.val, 1)+1], + tab[wasm_i32x4_extract_lane(idxvec.val, 2)+1], + tab[wasm_i32x4_extract_lane(idxvec.val, 3)+1]); +} + +inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_float64x2& x, v_float64x2& y) +{ + v128_t xy0 = wasm_v128_load(tab + wasm_i32x4_extract_lane(idxvec.val, 0)); + v128_t xy1 = wasm_v128_load(tab + wasm_i32x4_extract_lane(idxvec.val, 1)); + x.val = wasm_unpacklo_i64x2(xy0, xy1); + y.val = wasm_unpacklo_i64x2(xy0, xy1); +} + +inline v_int8x16 v_interleave_pairs(const v_int8x16& vec) +{ + return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,2,1,3,4,6,5,7,8,10,9,11,12,14,13,15)); +} +inline v_uint8x16 v_interleave_pairs(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_pairs(v_reinterpret_as_s8(vec))); } +inline v_int8x16 v_interleave_quads(const v_int8x16& vec) +{ + return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,4,1,5,2,6,3,7,8,12,9,13,10,14,11,15)); +} +inline v_uint8x16 v_interleave_quads(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_interleave_quads(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_interleave_pairs(const v_int16x8& vec) +{ + return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,4,5,2,3,6,7,8,9,12,13,10,11,14,15)); +} +inline v_uint16x8 v_interleave_pairs(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_pairs(v_reinterpret_as_s16(vec))); } +inline v_int16x8 v_interleave_quads(const v_int16x8& vec) +{ + return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15)); +} +inline v_uint16x8 v_interleave_quads(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_interleave_quads(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_interleave_pairs(const v_int32x4& vec) +{ + return v_int32x4(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15)); +} +inline v_uint32x4 v_interleave_pairs(const v_uint32x4& vec) { return v_reinterpret_as_u32(v_interleave_pairs(v_reinterpret_as_s32(vec))); } +inline v_float32x4 v_interleave_pairs(const v_float32x4& vec) +{ + return v_float32x4(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15)); +} + +inline v_int8x16 v_pack_triplets(const v_int8x16& vec) +{ + return v_int8x16(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,4,5,6,8,9,10,12,13,14,16,16,16,16)); +} +inline v_uint8x16 v_pack_triplets(const v_uint8x16& vec) { return v_reinterpret_as_u8(v_pack_triplets(v_reinterpret_as_s8(vec))); } + +inline v_int16x8 v_pack_triplets(const v_int16x8& vec) +{ + return v_int16x8(wasm_v8x16_shuffle(vec.val, vec.val, 0,1,2,3,4,5,8,9,10,11,12,13,14,15,6,7)); +} +inline v_uint16x8 v_pack_triplets(const v_uint16x8& vec) { return v_reinterpret_as_u16(v_pack_triplets(v_reinterpret_as_s16(vec))); } + +inline v_int32x4 v_pack_triplets(const v_int32x4& vec) { return vec; } +inline v_uint32x4 v_pack_triplets(const v_uint32x4& vec) { return vec; } +inline v_float32x4 v_pack_triplets(const v_float32x4& vec) { return vec; } + +template +inline typename _Tp::lane_type v_extract_n(const _Tp& a) +{ + return v_rotate_right(a).get0(); +} + +template +inline v_uint32x4 v_broadcast_element(const v_uint32x4& a) +{ + return v_setall_u32(v_extract_n(a)); +} +template +inline v_int32x4 v_broadcast_element(const v_int32x4& a) +{ + return v_setall_s32(v_extract_n(a)); +} +template +inline v_float32x4 v_broadcast_element(const v_float32x4& a) +{ + return v_setall_f32(v_extract_n(a)); +} + + +////////////// FP16 support /////////////////////////// + +inline v_float32x4 v_load_expand(const float16_t* ptr) +{ + float a[4]; + for (int i = 0; i < 4; i++) + a[i] = ptr[i]; + return v_float32x4(wasm_v128_load(a)); +} + +inline void v_pack_store(float16_t* ptr, const v_float32x4& v) +{ + double v_[4]; + wasm_v128_store(v_, v.val); + ptr[0] = float16_t(v_[0]); + ptr[1] = float16_t(v_[1]); + ptr[2] = float16_t(v_[2]); + ptr[3] = float16_t(v_[3]); +} + +inline void v_cleanup() {} + +CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END + +//! @endcond + +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/msa_macros.h b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/msa_macros.h new file mode 100644 index 00000000..bd6ddb12 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/msa_macros.h @@ -0,0 +1,1558 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_HAL_MSA_MACROS_H +#define OPENCV_CORE_HAL_MSA_MACROS_H + +#ifdef __mips_msa +#include "msa.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Define 64 bits vector types */ +typedef signed char v8i8 __attribute__ ((vector_size(8), aligned(8))); +typedef unsigned char v8u8 __attribute__ ((vector_size(8), aligned(8))); +typedef short v4i16 __attribute__ ((vector_size(8), aligned(8))); +typedef unsigned short v4u16 __attribute__ ((vector_size(8), aligned(8))); +typedef int v2i32 __attribute__ ((vector_size(8), aligned(8))); +typedef unsigned int v2u32 __attribute__ ((vector_size(8), aligned(8))); +typedef long long v1i64 __attribute__ ((vector_size(8), aligned(8))); +typedef unsigned long long v1u64 __attribute__ ((vector_size(8), aligned(8))); +typedef float v2f32 __attribute__ ((vector_size(8), aligned(8))); +typedef double v1f64 __attribute__ ((vector_size(8), aligned(8))); + + +/* Load values from the given memory a 64-bit vector. */ +#define msa_ld1_s8(__a) (*((v8i8*)(__a))) +#define msa_ld1_s16(__a) (*((v4i16*)(__a))) +#define msa_ld1_s32(__a) (*((v2i32*)(__a))) +#define msa_ld1_s64(__a) (*((v1i64*)(__a))) +#define msa_ld1_u8(__a) (*((v8u8*)(__a))) +#define msa_ld1_u16(__a) (*((v4u16*)(__a))) +#define msa_ld1_u32(__a) (*((v2u32*)(__a))) +#define msa_ld1_u64(__a) (*((v1u64*)(__a))) +#define msa_ld1_f32(__a) (*((v2f32*)(__a))) +#define msa_ld1_f64(__a) (*((v1f64*)(__a))) + +/* Load values from the given memory address to a 128-bit vector */ +#define msa_ld1q_s8(__a) ((v16i8)__builtin_msa_ld_b(__a, 0)) +#define msa_ld1q_s16(__a) ((v8i16)__builtin_msa_ld_h(__a, 0)) +#define msa_ld1q_s32(__a) ((v4i32)__builtin_msa_ld_w(__a, 0)) +#define msa_ld1q_s64(__a) ((v2i64)__builtin_msa_ld_d(__a, 0)) +#define msa_ld1q_u8(__a) ((v16u8)__builtin_msa_ld_b(__a, 0)) +#define msa_ld1q_u16(__a) ((v8u16)__builtin_msa_ld_h(__a, 0)) +#define msa_ld1q_u32(__a) ((v4u32)__builtin_msa_ld_w(__a, 0)) +#define msa_ld1q_u64(__a) ((v2u64)__builtin_msa_ld_d(__a, 0)) +#define msa_ld1q_f32(__a) ((v4f32)__builtin_msa_ld_w(__a, 0)) +#define msa_ld1q_f64(__a) ((v2f64)__builtin_msa_ld_d(__a, 0)) + +/* Store 64bits vector elements values to the given memory address. */ +#define msa_st1_s8(__a, __b) (*((v8i8*)(__a)) = __b) +#define msa_st1_s16(__a, __b) (*((v4i16*)(__a)) = __b) +#define msa_st1_s32(__a, __b) (*((v2i32*)(__a)) = __b) +#define msa_st1_s64(__a, __b) (*((v1i64*)(__a)) = __b) +#define msa_st1_u8(__a, __b) (*((v8u8*)(__a)) = __b) +#define msa_st1_u16(__a, __b) (*((v4u16*)(__a)) = __b) +#define msa_st1_u32(__a, __b) (*((v2u32*)(__a)) = __b) +#define msa_st1_u64(__a, __b) (*((v1u64*)(__a)) = __b) +#define msa_st1_f32(__a, __b) (*((v2f32*)(__a)) = __b) +#define msa_st1_f64(__a, __b) (*((v1f64*)(__a)) = __b) + +/* Store the values of elements in the 128 bits vector __a to the given memory address __a. */ +#define msa_st1q_s8(__a, __b) (__builtin_msa_st_b((v16i8)(__b), __a, 0)) +#define msa_st1q_s16(__a, __b) (__builtin_msa_st_h((v8i16)(__b), __a, 0)) +#define msa_st1q_s32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0)) +#define msa_st1q_s64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0)) +#define msa_st1q_u8(__a, __b) (__builtin_msa_st_b((v16i8)(__b), __a, 0)) +#define msa_st1q_u16(__a, __b) (__builtin_msa_st_h((v8i16)(__b), __a, 0)) +#define msa_st1q_u32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0)) +#define msa_st1q_u64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0)) +#define msa_st1q_f32(__a, __b) (__builtin_msa_st_w((v4i32)(__b), __a, 0)) +#define msa_st1q_f64(__a, __b) (__builtin_msa_st_d((v2i64)(__b), __a, 0)) + +/* Store the value of the element with the index __c in vector __a to the given memory address __a. */ +#define msa_st1_lane_s8(__a, __b, __c) (*((int8_t*)(__a)) = __b[__c]) +#define msa_st1_lane_s16(__a, __b, __c) (*((int16_t*)(__a)) = __b[__c]) +#define msa_st1_lane_s32(__a, __b, __c) (*((int32_t*)(__a)) = __b[__c]) +#define msa_st1_lane_s64(__a, __b, __c) (*((int64_t*)(__a)) = __b[__c]) +#define msa_st1_lane_u8(__a, __b, __c) (*((uint8_t*)(__a)) = __b[__c]) +#define msa_st1_lane_u16(__a, __b, __c) (*((uint16_t*)(__a)) = __b[__c]) +#define msa_st1_lane_u32(__a, __b, __c) (*((uint32_t*)(__a)) = __b[__c]) +#define msa_st1_lane_u64(__a, __b, __c) (*((uint64_t*)(__a)) = __b[__c]) +#define msa_st1_lane_f32(__a, __b, __c) (*((float*)(__a)) = __b[__c]) +#define msa_st1_lane_f64(__a, __b, __c) (*((double*)(__a)) = __b[__c]) +#define msa_st1q_lane_s8(__a, __b, __c) (*((int8_t*)(__a)) = (int8_t)__builtin_msa_copy_s_b(__b, __c)) +#define msa_st1q_lane_s16(__a, __b, __c) (*((int16_t*)(__a)) = (int16_t)__builtin_msa_copy_s_h(__b, __c)) +#define msa_st1q_lane_s32(__a, __b, __c) (*((int32_t*)(__a)) = __builtin_msa_copy_s_w(__b, __c)) +#define msa_st1q_lane_s64(__a, __b, __c) (*((int64_t*)(__a)) = __builtin_msa_copy_s_d(__b, __c)) +#define msa_st1q_lane_u8(__a, __b, __c) (*((uint8_t*)(__a)) = (uint8_t)__builtin_msa_copy_u_b((v16i8)(__b), __c)) +#define msa_st1q_lane_u16(__a, __b, __c) (*((uint16_t*)(__a)) = (uint16_t)__builtin_msa_copy_u_h((v8i16)(__b), __c)) +#define msa_st1q_lane_u32(__a, __b, __c) (*((uint32_t*)(__a)) = __builtin_msa_copy_u_w((v4i32)(__b), __c)) +#define msa_st1q_lane_u64(__a, __b, __c) (*((uint64_t*)(__a)) = __builtin_msa_copy_u_d((v2i64)(__b), __c)) +#define msa_st1q_lane_f32(__a, __b, __c) (*((float*)(__a)) = __b[__c]) +#define msa_st1q_lane_f64(__a, __b, __c) (*((double*)(__a)) = __b[__c]) + +/* Duplicate elements for 64-bit doubleword vectors */ +#define msa_dup_n_s8(__a) ((v8i8)__builtin_msa_copy_s_d((v2i64)__builtin_msa_fill_b((int32_t)(__a)), 0)) +#define msa_dup_n_s16(__a) ((v4i16)__builtin_msa_copy_s_d((v2i64)__builtin_msa_fill_h((int32_t)(__a)), 0)) +#define msa_dup_n_s32(__a) ((v2i32){__a, __a}) +#define msa_dup_n_s64(__a) ((v1i64){__a}) +#define msa_dup_n_u8(__a) ((v8u8)__builtin_msa_copy_u_d((v2i64)__builtin_msa_fill_b((int32_t)(__a)), 0)) +#define msa_dup_n_u16(__a) ((v4u16)__builtin_msa_copy_u_d((v2i64)__builtin_msa_fill_h((int32_t)(__a)), 0)) +#define msa_dup_n_u32(__a) ((v2u32){__a, __a}) +#define msa_dup_n_u64(__a) ((v1u64){__a}) +#define msa_dup_n_f32(__a) ((v2f32){__a, __a}) +#define msa_dup_n_f64(__a) ((v1f64){__a}) + +/* Duplicate elements for 128-bit quadword vectors */ +#define msa_dupq_n_s8(__a) (__builtin_msa_fill_b((int32_t)(__a))) +#define msa_dupq_n_s16(__a) (__builtin_msa_fill_h((int32_t)(__a))) +#define msa_dupq_n_s32(__a) (__builtin_msa_fill_w((int32_t)(__a))) +#define msa_dupq_n_s64(__a) (__builtin_msa_fill_d((int64_t)(__a))) +#define msa_dupq_n_u8(__a) ((v16u8)__builtin_msa_fill_b((int32_t)(__a))) +#define msa_dupq_n_u16(__a) ((v8u16)__builtin_msa_fill_h((int32_t)(__a))) +#define msa_dupq_n_u32(__a) ((v4u32)__builtin_msa_fill_w((int32_t)(__a))) +#define msa_dupq_n_u64(__a) ((v2u64)__builtin_msa_fill_d((int64_t)(__a))) +#define msa_dupq_n_f32(__a) ((v4f32){__a, __a, __a, __a}) +#define msa_dupq_n_f64(__a) ((v2f64){__a, __a}) +#define msa_dupq_lane_s8(__a, __b) (__builtin_msa_splat_b(__a, __b)) +#define msa_dupq_lane_s16(__a, __b) (__builtin_msa_splat_h(__a, __b)) +#define msa_dupq_lane_s32(__a, __b) (__builtin_msa_splat_w(__a, __b)) +#define msa_dupq_lane_s64(__a, __b) (__builtin_msa_splat_d(__a, __b)) +#define msa_dupq_lane_u8(__a, __b) ((v16u8)__builtin_msa_splat_b((v16i8)(__a), __b)) +#define msa_dupq_lane_u16(__a, __b) ((v8u16)__builtin_msa_splat_h((v8i16)(__a), __b)) +#define msa_dupq_lane_u32(__a, __b) ((v4u32)__builtin_msa_splat_w((v4i32)(__a), __b)) +#define msa_dupq_lane_u64(__a, __b) ((v2u64)__builtin_msa_splat_d((v2i64)(__a), __b)) + +/* Create a 64 bits vector */ +#define msa_create_s8(__a) ((v8i8)((uint64_t)(__a))) +#define msa_create_s16(__a) ((v4i16)((uint64_t)(__a))) +#define msa_create_s32(__a) ((v2i32)((uint64_t)(__a))) +#define msa_create_s64(__a) ((v1i64)((uint64_t)(__a))) +#define msa_create_u8(__a) ((v8u8)((uint64_t)(__a))) +#define msa_create_u16(__a) ((v4u16)((uint64_t)(__a))) +#define msa_create_u32(__a) ((v2u32)((uint64_t)(__a))) +#define msa_create_u64(__a) ((v1u64)((uint64_t)(__a))) +#define msa_create_f32(__a) ((v2f32)((uint64_t)(__a))) +#define msa_create_f64(__a) ((v1f64)((uint64_t)(__a))) + +/* Sign extends or zero extends each element in a 64 bits vector to twice its original length, and places the results in a 128 bits vector. */ +/*Transform v8i8 to v8i16*/ +#define msa_movl_s8(__a) \ +((v8i16){(__a)[0], (__a)[1], (__a)[2], (__a)[3], \ + (__a)[4], (__a)[5], (__a)[6], (__a)[7]}) + +/*Transform v8u8 to v8u16*/ +#define msa_movl_u8(__a) \ +((v8u16){(__a)[0], (__a)[1], (__a)[2], (__a)[3], \ + (__a)[4], (__a)[5], (__a)[6], (__a)[7]}) + +/*Transform v4i16 to v8i16*/ +#define msa_movl_s16(__a) ((v4i32){(__a)[0], (__a)[1], (__a)[2], (__a)[3]}) + +/*Transform v2i32 to v4i32*/ +#define msa_movl_s32(__a) ((v2i64){(__a)[0], (__a)[1]}) + +/*Transform v4u16 to v8u16*/ +#define msa_movl_u16(__a) ((v4u32){(__a)[0], (__a)[1], (__a)[2], (__a)[3]}) + +/*Transform v2u32 to v4u32*/ +#define msa_movl_u32(__a) ((v2u64){(__a)[0], (__a)[1]}) + +/* Copies the least significant half of each element of a 128 bits vector into the corresponding elements of a 64 bits vector. */ +#define msa_movn_s16(__a) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)(__a)); \ + (v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_movn_s32(__a) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \ + (v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_movn_s64(__a) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \ + (v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_movn_u16(__a) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)(__a)); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_movn_u32(__a) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_movn_u64(__a) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +/* qmovn */ +#define msa_qmovn_s16(__a) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_s_h((v8i16)(__a), 7)); \ + (v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_qmovn_s32(__a) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_s_w((v4i32)(__a), 15)); \ + (v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_qmovn_s64(__a) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_s_d((v2i64)(__a), 31)); \ + (v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_qmovn_u16(__a) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)(__a), 7)); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_qmovn_u32(__a) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)(__a), 15)); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_qmovn_u64(__a) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)(__a), 31)); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +/* qmovun */ +#define msa_qmovun_s16(__a) \ +({ \ + v8i16 __d = __builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)); \ + v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qmovun_s32(__a) \ +({ \ + v4i32 __d = __builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)); \ + v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qmovun_s64(__a) \ +({ \ + v2i64 __d = __builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)); \ + v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +/* Right shift elements in a 128 bits vector by an immediate value, and places the results in a 64 bits vector. */ +#define msa_shrn_n_s16(__a, __b) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srai_h((v8i16)(__a), (int)(__b))); \ + (v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_shrn_n_s32(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srai_w((v4i32)(__a), (int)(__b))); \ + (v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_shrn_n_s64(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srai_d((v2i64)(__a), (int)(__b))); \ + (v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_shrn_n_u16(__a, __b) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srli_h((v8i16)(__a), (int)(__b))); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_shrn_n_u32(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srli_w((v4i32)(__a), (int)(__b))); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_shrn_n_u64(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srli_d((v2i64)(__a), (int)(__b))); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +/* Right shift elements in a 128 bits vector by an immediate value, and places the results in a 64 bits vector. */ +#define msa_rshrn_n_s16(__a, __b) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srari_h((v8i16)(__a), (int)__b)); \ + (v8i8)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_rshrn_n_s32(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srari_w((v4i32)(__a), (int)__b)); \ + (v4i16)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_rshrn_n_s64(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srari_d((v2i64)(__a), (int)__b)); \ + (v2i32)__builtin_msa_copy_s_d((v2i64)__d, 0); \ +}) + +#define msa_rshrn_n_u16(__a, __b) \ +({ \ + v16i8 __d = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_srlri_h((v8i16)(__a), (int)__b)); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_rshrn_n_u32(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_srlri_w((v4i32)(__a), (int)__b)); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +#define msa_rshrn_n_u64(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_srlri_d((v2i64)(__a), (int)__b)); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__d, 0); \ +}) + +/* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector. */ +#define msa_qrshrn_n_s16(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__a), (int)(__b)), 7); \ + v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__d); \ + (v8i8)__builtin_msa_copy_s_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrn_n_s32(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__a), (int)(__b)), 15); \ + v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__d); \ + (v4i16)__builtin_msa_copy_s_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrn_n_s64(__a, __b) \ +({ \ + v2i64 __d = __builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__a), (int)(__b)), 31); \ + v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__d); \ + (v2i32)__builtin_msa_copy_s_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrn_n_u16(__a, __b) \ +({ \ + v8u16 __d = __builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__a), (int)(__b)), 7); \ + v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__d); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrn_n_u32(__a, __b) \ +({ \ + v4u32 __d = __builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__a), (int)(__b)), 15); \ + v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__d); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrn_n_u64(__a, __b) \ +({ \ + v2u64 __d = __builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__a), (int)(__b)), 31); \ + v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__d); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +/* Right shift elements in a 128 bits vector by an immediate value, saturate the results and them in a 64 bits vector. + Input is signed and output is unsigned. */ +#define msa_qrshrun_n_s16(__a, __b) \ +({ \ + v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__b)); \ + v16i8 __e = __builtin_msa_pckev_b(__builtin_msa_fill_b(0), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \ + (v8u8)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrun_n_s32(__a, __b) \ +({ \ + v4i32 __d = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)), (int)(__b)); \ + v8i16 __e = __builtin_msa_pckev_h(__builtin_msa_fill_h(0), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \ + (v4u16)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +#define msa_qrshrun_n_s64(__a, __b) \ +({ \ + v2i64 __d = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)), (int)(__b)); \ + v4i32 __e = __builtin_msa_pckev_w(__builtin_msa_fill_w(0), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \ + (v2u32)__builtin_msa_copy_u_d((v2i64)__e, 0); \ +}) + +/* pack */ +#define msa_pack_s16(__a, __b) (__builtin_msa_pckev_b((v16i8)(__b), (v16i8)(__a))) +#define msa_pack_s32(__a, __b) (__builtin_msa_pckev_h((v8i16)(__b), (v8i16)(__a))) +#define msa_pack_s64(__a, __b) (__builtin_msa_pckev_w((v4i32)(__b), (v4i32)(__a))) +#define msa_pack_u16(__a, __b) ((v16u8)__builtin_msa_pckev_b((v16i8)(__b), (v16i8)(__a))) +#define msa_pack_u32(__a, __b) ((v8u16)__builtin_msa_pckev_h((v8i16)(__b), (v8i16)(__a))) +#define msa_pack_u64(__a, __b) ((v4u32)__builtin_msa_pckev_w((v4i32)(__b), (v4i32)(__a))) + +/* qpack */ +#define msa_qpack_s16(__a, __b) \ +(__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_s_h((v8i16)(__b), 7), (v16i8)__builtin_msa_sat_s_h((v8i16)(__a), 7))) +#define msa_qpack_s32(__a, __b) \ +(__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_s_w((v4i32)(__b), 15), (v8i16)__builtin_msa_sat_s_w((v4i32)(__a), 15))) +#define msa_qpack_s64(__a, __b) \ +(__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_s_d((v2i64)(__b), 31), (v4i32)__builtin_msa_sat_s_d((v2i64)(__a), 31))) +#define msa_qpack_u16(__a, __b) \ +((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)(__b), 7), (v16i8)__builtin_msa_sat_u_h((v8u16)(__a), 7))) +#define msa_qpack_u32(__a, __b) \ +((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)(__b), 15), (v8i16)__builtin_msa_sat_u_w((v4u32)(__a), 15))) +#define msa_qpack_u64(__a, __b) \ +((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)(__b), 31), (v4i32)__builtin_msa_sat_u_d((v2u64)(__a), 31))) + +/* qpacku */ +#define msa_qpacku_s16(__a, __b) \ +((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__b))), 7), \ + (v16i8)__builtin_msa_sat_u_h((v8u16)(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a))), 7))) +#define msa_qpacku_s32(__a, __b) \ +((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__b))), 15), \ + (v8i16)__builtin_msa_sat_u_w((v4u32)(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a))), 15))) +#define msa_qpacku_s64(__a, __b) \ +((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__b))), 31), \ + (v4i32)__builtin_msa_sat_u_d((v2u64)(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a))), 31))) + +/* packr */ +#define msa_packr_s16(__a, __b, __c) \ +(__builtin_msa_pckev_b((v16i8)__builtin_msa_srai_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srai_h((v8i16)(__a), (int)(__c)))) +#define msa_packr_s32(__a, __b, __c) \ +(__builtin_msa_pckev_h((v8i16)__builtin_msa_srai_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srai_w((v4i32)(__a), (int)(__c)))) +#define msa_packr_s64(__a, __b, __c) \ +(__builtin_msa_pckev_w((v4i32)__builtin_msa_srai_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srai_d((v2i64)(__a), (int)(__c)))) +#define msa_packr_u16(__a, __b, __c) \ +((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_srli_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srli_h((v8i16)(__a), (int)(__c)))) +#define msa_packr_u32(__a, __b, __c) \ +((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_srli_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srli_w((v4i32)(__a), (int)(__c)))) +#define msa_packr_u64(__a, __b, __c) \ +((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_srli_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srli_d((v2i64)(__a), (int)(__c)))) + +/* rpackr */ +#define msa_rpackr_s16(__a, __b, __c) \ +(__builtin_msa_pckev_b((v16i8)__builtin_msa_srari_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srari_h((v8i16)(__a), (int)(__c)))) +#define msa_rpackr_s32(__a, __b, __c) \ +(__builtin_msa_pckev_h((v8i16)__builtin_msa_srari_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srari_w((v4i32)(__a), (int)(__c)))) +#define msa_rpackr_s64(__a, __b, __c) \ +(__builtin_msa_pckev_w((v4i32)__builtin_msa_srari_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srari_d((v2i64)(__a), (int)(__c)))) +#define msa_rpackr_u16(__a, __b, __c) \ +((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_srlri_h((v8i16)(__b), (int)(__c)), (v16i8)__builtin_msa_srlri_h((v8i16)(__a), (int)(__c)))) +#define msa_rpackr_u32(__a, __b, __c) \ +((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_srlri_w((v4i32)(__b), (int)(__c)), (v8i16)__builtin_msa_srlri_w((v4i32)(__a), (int)(__c)))) +#define msa_rpackr_u64(__a, __b, __c) \ +((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_srlri_d((v2i64)(__b), (int)(__c)), (v4i32)__builtin_msa_srlri_d((v2i64)(__a), (int)(__c)))) + +/* qrpackr */ +#define msa_qrpackr_s16(__a, __b, __c) \ +(__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__b), (int)(__c)), 7), \ + (v16i8)__builtin_msa_sat_s_h(__builtin_msa_srari_h((v8i16)(__a), (int)(__c)), 7))) +#define msa_qrpackr_s32(__a, __b, __c) \ +(__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__b), (int)(__c)), 15), \ + (v8i16)__builtin_msa_sat_s_w(__builtin_msa_srari_w((v4i32)(__a), (int)(__c)), 15))) +#define msa_qrpackr_s64(__a, __b, __c) \ +(__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__b), (int)(__c)), 31), \ + (v4i32)__builtin_msa_sat_s_d(__builtin_msa_srari_d((v2i64)(__a), (int)(__c)), 31))) +#define msa_qrpackr_u16(__a, __b, __c) \ +((v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__b), (int)(__c)), 7), \ + (v16i8)__builtin_msa_sat_u_h((v8u16)__builtin_msa_srlri_h((v8i16)(__a), (int)(__c)), 7))) +#define msa_qrpackr_u32(__a, __b, __c) \ +((v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__b), (int)(__c)), 15), \ + (v8i16)__builtin_msa_sat_u_w((v4u32)__builtin_msa_srlri_w((v4i32)(__a), (int)(__c)), 15))) +#define msa_qrpackr_u64(__a, __b, __c) \ +((v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__b), (int)(__c)), 31), \ + (v4i32)__builtin_msa_sat_u_d((v2u64)__builtin_msa_srlri_d((v2i64)(__a), (int)(__c)), 31))) + +/* qrpackru */ +#define msa_qrpackru_s16(__a, __b, __c) \ +({ \ + v8i16 __d = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__a)), (int)(__c)); \ + v8i16 __e = __builtin_msa_srlri_h(__builtin_msa_max_s_h(__builtin_msa_fill_h(0), (v8i16)(__b)), (int)(__c)); \ + (v16u8)__builtin_msa_pckev_b((v16i8)__builtin_msa_sat_u_h((v8u16)__e, 7), (v16i8)__builtin_msa_sat_u_h((v8u16)__d, 7)); \ +}) + +#define msa_qrpackru_s32(__a, __b, __c) \ +({ \ + v4i32 __d = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__a)), (int)(__c)); \ + v4i32 __e = __builtin_msa_srlri_w(__builtin_msa_max_s_w(__builtin_msa_fill_w(0), (v4i32)(__b)), (int)(__c)); \ + (v8u16)__builtin_msa_pckev_h((v8i16)__builtin_msa_sat_u_w((v4u32)__e, 15), (v8i16)__builtin_msa_sat_u_w((v4u32)__d, 15)); \ +}) + +#define msa_qrpackru_s64(__a, __b, __c) \ +({ \ + v2i64 __d = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__a)), (int)(__c)); \ + v2i64 __e = __builtin_msa_srlri_d(__builtin_msa_max_s_d(__builtin_msa_fill_d(0), (v2i64)(__b)), (int)(__c)); \ + (v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__e, 31), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \ +}) + +/* Minimum values between corresponding elements in the two vectors are written to the returned vector. */ +#define msa_minq_s8(__a, __b) (__builtin_msa_min_s_b(__a, __b)) +#define msa_minq_s16(__a, __b) (__builtin_msa_min_s_h(__a, __b)) +#define msa_minq_s32(__a, __b) (__builtin_msa_min_s_w(__a, __b)) +#define msa_minq_s64(__a, __b) (__builtin_msa_min_s_d(__a, __b)) +#define msa_minq_u8(__a, __b) ((v16u8)__builtin_msa_min_u_b(__a, __b)) +#define msa_minq_u16(__a, __b) ((v8u16)__builtin_msa_min_u_h(__a, __b)) +#define msa_minq_u32(__a, __b) ((v4u32)__builtin_msa_min_u_w(__a, __b)) +#define msa_minq_u64(__a, __b) ((v2u64)__builtin_msa_min_u_d(__a, __b)) +#define msa_minq_f32(__a, __b) (__builtin_msa_fmin_w(__a, __b)) +#define msa_minq_f64(__a, __b) (__builtin_msa_fmin_d(__a, __b)) + +/* Maximum values between corresponding elements in the two vectors are written to the returned vector. */ +#define msa_maxq_s8(__a, __b) (__builtin_msa_max_s_b(__a, __b)) +#define msa_maxq_s16(__a, __b) (__builtin_msa_max_s_h(__a, __b)) +#define msa_maxq_s32(__a, __b) (__builtin_msa_max_s_w(__a, __b)) +#define msa_maxq_s64(__a, __b) (__builtin_msa_max_s_d(__a, __b)) +#define msa_maxq_u8(__a, __b) ((v16u8)__builtin_msa_max_u_b(__a, __b)) +#define msa_maxq_u16(__a, __b) ((v8u16)__builtin_msa_max_u_h(__a, __b)) +#define msa_maxq_u32(__a, __b) ((v4u32)__builtin_msa_max_u_w(__a, __b)) +#define msa_maxq_u64(__a, __b) ((v2u64)__builtin_msa_max_u_d(__a, __b)) +#define msa_maxq_f32(__a, __b) (__builtin_msa_fmax_w(__a, __b)) +#define msa_maxq_f64(__a, __b) (__builtin_msa_fmax_d(__a, __b)) + +/* Vector type reinterpretion */ +#define MSA_TPV_REINTERPRET(_Tpv, Vec) ((_Tpv)(Vec)) + +/* Add the odd elements in vector __a with the even elements in vector __b to double width elements in the returned vector. */ +/* v8i16 msa_hadd_s16 ((v16i8)__a, (v16i8)__b) */ +#define msa_hadd_s16(__a, __b) (__builtin_msa_hadd_s_h((v16i8)(__a), (v16i8)(__b))) +/* v4i32 msa_hadd_s32 ((v8i16)__a, (v8i16)__b) */ +#define msa_hadd_s32(__a, __b) (__builtin_msa_hadd_s_w((v8i16)(__a), (v8i16)(__b))) +/* v2i64 msa_hadd_s64 ((v4i32)__a, (v4i32)__b) */ +#define msa_hadd_s64(__a, __b) (__builtin_msa_hadd_s_d((v4i32)(__a), (v4i32)(__b))) + +/* Copy even elements in __a to the left half and even elements in __b to the right half and return the result vector. */ +#define msa_pckev_s8(__a, __b) (__builtin_msa_pckev_b((v16i8)(__a), (v16i8)(__b))) +#define msa_pckev_s16(__a, __b) (__builtin_msa_pckev_h((v8i16)(__a), (v8i16)(__b))) +#define msa_pckev_s32(__a, __b) (__builtin_msa_pckev_w((v4i32)(__a), (v4i32)(__b))) +#define msa_pckev_s64(__a, __b) (__builtin_msa_pckev_d((v2i64)(__a), (v2i64)(__b))) + +/* Copy even elements in __a to the left half and even elements in __b to the right half and return the result vector. */ +#define msa_pckod_s8(__a, __b) (__builtin_msa_pckod_b((v16i8)(__a), (v16i8)(__b))) +#define msa_pckod_s16(__a, __b) (__builtin_msa_pckod_h((v8i16)(__a), (v8i16)(__b))) +#define msa_pckod_s32(__a, __b) (__builtin_msa_pckod_w((v4i32)(__a), (v4i32)(__b))) +#define msa_pckod_s64(__a, __b) (__builtin_msa_pckod_d((v2i64)(__a), (v2i64)(__b))) + +#ifdef _MIPSEB +#define LANE_IMM0_1(x) (0b1 - ((x) & 0b1)) +#define LANE_IMM0_3(x) (0b11 - ((x) & 0b11)) +#define LANE_IMM0_7(x) (0b111 - ((x) & 0b111)) +#define LANE_IMM0_15(x) (0b1111 - ((x) & 0b1111)) +#else +#define LANE_IMM0_1(x) ((x) & 0b1) +#define LANE_IMM0_3(x) ((x) & 0b11) +#define LANE_IMM0_7(x) ((x) & 0b111) +#define LANE_IMM0_15(x) ((x) & 0b1111) +#endif + +#define msa_get_lane_u8(__a, __b) ((uint8_t)(__a)[LANE_IMM0_7(__b)]) +#define msa_get_lane_s8(__a, __b) ((int8_t)(__a)[LANE_IMM0_7(__b)]) +#define msa_get_lane_u16(__a, __b) ((uint16_t)(__a)[LANE_IMM0_3(__b)]) +#define msa_get_lane_s16(__a, __b) ((int16_t)(__a)[LANE_IMM0_3(__b)]) +#define msa_get_lane_u32(__a, __b) ((uint32_t)(__a)[LANE_IMM0_1(__b)]) +#define msa_get_lane_s32(__a, __b) ((int32_t)(__a)[LANE_IMM0_1(__b)]) +#define msa_get_lane_f32(__a, __b) ((float)(__a)[LANE_IMM0_3(__b)]) +#define msa_get_lane_s64(__a, __b) ((int64_t)(__a)[LANE_IMM0_1(__b)]) +#define msa_get_lane_u64(__a, __b) ((uint64_t)(__a)[LANE_IMM0_1(__b)]) +#define msa_get_lane_f64(__a, __b) ((double)(__a)[LANE_IMM0_1(__b)]) +#define msa_getq_lane_u8(__a, imm0_15) ((uint8_t)__builtin_msa_copy_u_b((v16i8)(__a), imm0_15)) +#define msa_getq_lane_s8(__a, imm0_15) ((int8_t)__builtin_msa_copy_s_b(__a, imm0_15)) +#define msa_getq_lane_u16(__a, imm0_7) ((uint16_t)__builtin_msa_copy_u_h((v8i16)(__a), imm0_7)) +#define msa_getq_lane_s16(__a, imm0_7) ((int16_t)__builtin_msa_copy_s_h(__a, imm0_7)) +#define msa_getq_lane_u32(__a, imm0_3) __builtin_msa_copy_u_w((v4i32)(__a), imm0_3) +#define msa_getq_lane_s32 __builtin_msa_copy_s_w +#define msa_getq_lane_f32(__a, __b) ((float)(__a)[LANE_IMM0_3(__b)]) +#define msa_getq_lane_f64(__a, __b) ((double)(__a)[LANE_IMM0_1(__b)]) +#if (__mips == 64) +#define msa_getq_lane_u64(__a, imm0_1) __builtin_msa_copy_u_d((v2i64)(__a), imm0_1) +#define msa_getq_lane_s64 __builtin_msa_copy_s_d +#else +#define msa_getq_lane_u64(__a, imm0_1) ((uint64_t)(__a)[LANE_IMM0_1(imm0_1)]) +#define msa_getq_lane_s64(__a, imm0_1) ((int64_t)(__a)[LANE_IMM0_1(imm0_1)]) +#endif + +/* combine */ +#if (__mips == 64) +#define __COMBINE_64_64(__TYPE, a, b) ((__TYPE)((v2u64){((v1u64)(a))[0], ((v1u64)(b))[0]})) +#else +#define __COMBINE_64_64(__TYPE, a, b) ((__TYPE)((v4u32){((v2u32)(a))[0], ((v2u32)(a))[1], \ + ((v2u32)(b))[0], ((v2u32)(b))[1]})) +#endif + +/* v16i8 msa_combine_s8 (v8i8 __a, v8i8 __b) */ +#define msa_combine_s8(__a, __b) __COMBINE_64_64(v16i8, __a, __b) + +/* v8i16 msa_combine_s16(v4i16 __a, v4i16 __b) */ +#define msa_combine_s16(__a, __b) __COMBINE_64_64(v8i16, __a, __b) + +/* v4i32 msa_combine_s32(v2i32 __a, v2i32 __b) */ +#define msa_combine_s32(__a, __b) __COMBINE_64_64(v4i32, __a, __b) + +/* v2i64 msa_combine_s64(v1i64 __a, v1i64 __b) */ +#define msa_combine_s64(__a, __b) __COMBINE_64_64(v2i64, __a, __b) + +/* v4f32 msa_combine_f32(v2f32 __a, v2f32 __b) */ +#define msa_combine_f32(__a, __b) __COMBINE_64_64(v4f32, __a, __b) + +/* v16u8 msa_combine_u8(v8u8 __a, v8u8 __b) */ +#define msa_combine_u8(__a, __b) __COMBINE_64_64(v16u8, __a, __b) + +/* v8u16 msa_combine_u16(v4u16 __a, v4u16 __b) */ +#define msa_combine_u16(__a, __b) __COMBINE_64_64(v8u16, __a, __b) + +/* v4u32 msa_combine_u32(v2u32 __a, v2u32 __b) */ +#define msa_combine_u32(__a, __b) __COMBINE_64_64(v4u32, __a, __b) + +/* v2u64 msa_combine_u64(v1u64 __a, v1u64 __b) */ +#define msa_combine_u64(__a, __b) __COMBINE_64_64(v2u64, __a, __b) + +/* v2f64 msa_combine_f64(v1f64 __a, v1f64 __b) */ +#define msa_combine_f64(__a, __b) __COMBINE_64_64(v2f64, __a, __b) + +/* get_low, get_high */ +#if (__mips == 64) +#define __GET_LOW(__TYPE, a) ((__TYPE)((v1u64)(__builtin_msa_copy_u_d((v2i64)(a), 0)))) +#define __GET_HIGH(__TYPE, a) ((__TYPE)((v1u64)(__builtin_msa_copy_u_d((v2i64)(a), 1)))) +#else +#define __GET_LOW(__TYPE, a) ((__TYPE)(((v2u64)(a))[0])) +#define __GET_HIGH(__TYPE, a) ((__TYPE)(((v2u64)(a))[1])) +#endif + +/* v8i8 msa_get_low_s8(v16i8 __a) */ +#define msa_get_low_s8(__a) __GET_LOW(v8i8, __a) + +/* v4i16 msa_get_low_s16(v8i16 __a) */ +#define msa_get_low_s16(__a) __GET_LOW(v4i16, __a) + +/* v2i32 msa_get_low_s32(v4i32 __a) */ +#define msa_get_low_s32(__a) __GET_LOW(v2i32, __a) + +/* v1i64 msa_get_low_s64(v2i64 __a) */ +#define msa_get_low_s64(__a) __GET_LOW(v1i64, __a) + +/* v8u8 msa_get_low_u8(v16u8 __a) */ +#define msa_get_low_u8(__a) __GET_LOW(v8u8, __a) + +/* v4u16 msa_get_low_u16(v8u16 __a) */ +#define msa_get_low_u16(__a) __GET_LOW(v4u16, __a) + +/* v2u32 msa_get_low_u32(v4u32 __a) */ +#define msa_get_low_u32(__a) __GET_LOW(v2u32, __a) + +/* v1u64 msa_get_low_u64(v2u64 __a) */ +#define msa_get_low_u64(__a) __GET_LOW(v1u64, __a) + +/* v2f32 msa_get_low_f32(v4f32 __a) */ +#define msa_get_low_f32(__a) __GET_LOW(v2f32, __a) + +/* v1f64 msa_get_low_f64(v2f64 __a) */ +#define msa_get_low_f64(__a) __GET_LOW(v1f64, __a) + +/* v8i8 msa_get_high_s8(v16i8 __a) */ +#define msa_get_high_s8(__a) __GET_HIGH(v8i8, __a) + +/* v4i16 msa_get_high_s16(v8i16 __a) */ +#define msa_get_high_s16(__a) __GET_HIGH(v4i16, __a) + +/* v2i32 msa_get_high_s32(v4i32 __a) */ +#define msa_get_high_s32(__a) __GET_HIGH(v2i32, __a) + +/* v1i64 msa_get_high_s64(v2i64 __a) */ +#define msa_get_high_s64(__a) __GET_HIGH(v1i64, __a) + +/* v8u8 msa_get_high_u8(v16u8 __a) */ +#define msa_get_high_u8(__a) __GET_HIGH(v8u8, __a) + +/* v4u16 msa_get_high_u16(v8u16 __a) */ +#define msa_get_high_u16(__a) __GET_HIGH(v4u16, __a) + +/* v2u32 msa_get_high_u32(v4u32 __a) */ +#define msa_get_high_u32(__a) __GET_HIGH(v2u32, __a) + +/* v1u64 msa_get_high_u64(v2u64 __a) */ +#define msa_get_high_u64(__a) __GET_HIGH(v1u64, __a) + +/* v2f32 msa_get_high_f32(v4f32 __a) */ +#define msa_get_high_f32(__a) __GET_HIGH(v2f32, __a) + +/* v1f64 msa_get_high_f64(v2f64 __a) */ +#define msa_get_high_f64(__a) __GET_HIGH(v1f64, __a) + +/* ri = ai * b[lane] */ +/* v4f32 msa_mulq_lane_f32(v4f32 __a, v4f32 __b, const int __lane) */ +#define msa_mulq_lane_f32(__a, __b, __lane) ((__a) * msa_getq_lane_f32(__b, __lane)) + +/* ri = ai + bi * c[lane] */ +/* v4f32 msa_mlaq_lane_f32(v4f32 __a, v4f32 __b, v4f32 __c, const int __lane) */ +#define msa_mlaq_lane_f32(__a, __b, __c, __lane) ((__a) + ((__b) * msa_getq_lane_f32(__c, __lane))) + +/* uint16_t msa_sum_u16(v8u16 __a)*/ +#define msa_sum_u16(__a) \ +({ \ + v4u32 _b; \ + v2u64 _c; \ + _b = __builtin_msa_hadd_u_w(__a, __a); \ + _c = __builtin_msa_hadd_u_d(_b, _b); \ + (uint16_t)(_c[0] + _c[1]); \ +}) + +/* int16_t msa_sum_s16(v8i16 __a) */ +#define msa_sum_s16(__a) \ +({ \ + v4i32 _b; \ + v2i64 _c; \ + _b = __builtin_msa_hadd_s_w(__a, __a); \ + _c = __builtin_msa_hadd_s_d(_b, _b); \ + (int16_t)(_c[0] + _c[1]); \ +}) + + +/* uint32_t msa_sum_u32(v4u32 __a)*/ +#define msa_sum_u32(__a) \ +({ \ + v2u64 _b; \ + _b = __builtin_msa_hadd_u_d(__a, __a); \ + (uint32_t)(_b[0] + _b[1]); \ +}) + +/* int32_t msa_sum_s32(v4i32 __a)*/ +#define msa_sum_s32(__a) \ +({ \ + v2i64 _b; \ + _b = __builtin_msa_hadd_s_d(__a, __a); \ + (int32_t)(_b[0] + _b[1]); \ +}) + +/* uint8_t msa_sum_u8(v16u8 __a)*/ +#define msa_sum_u8(__a) \ +({ \ + v8u16 _b16; \ + v4u32 _c32; \ + _b16 = __builtin_msa_hadd_u_h(__a, __a); \ + _c32 = __builtin_msa_hadd_u_w(_b16, _b16); \ + (uint8_t)msa_sum_u32(_c32); \ +}) + +/* int8_t msa_sum_s8(v16s8 __a)*/ +#define msa_sum_s8(__a) \ +({ \ + v8i16 _b16; \ + v4i32 _c32; \ + _b16 = __builtin_msa_hadd_s_h(__a, __a); \ + _c32 = __builtin_msa_hadd_s_w(_b16, _b16); \ + (int8_t)msa_sum_s32(_c32); \ +}) + +/* float msa_sum_f32(v4f32 __a)*/ +#define msa_sum_f32(__a) ((__a)[0] + (__a)[1] + (__a)[2] + (__a)[3]) + +/* v8u16 msa_paddlq_u8(v16u8 __a) */ +#define msa_paddlq_u8(__a) (__builtin_msa_hadd_u_h(__a, __a)) + +/* v8i16 msa_paddlq_s8(v16i8 __a) */ +#define msa_paddlq_s8(__a) (__builtin_msa_hadd_s_h(__a, __a)) + +/* v4u32 msa_paddlq_u16 (v8u16 __a)*/ +#define msa_paddlq_u16(__a) (__builtin_msa_hadd_u_w(__a, __a)) + +/* v4i32 msa_paddlq_s16 (v8i16 __a)*/ +#define msa_paddlq_s16(__a) (__builtin_msa_hadd_s_w(__a, __a)) + +/* v2u64 msa_paddlq_u32(v4u32 __a) */ +#define msa_paddlq_u32(__a) (__builtin_msa_hadd_u_d(__a, __a)) + +/* v2i64 msa_paddlq_s32(v4i32 __a) */ +#define msa_paddlq_s32(__a) (__builtin_msa_hadd_s_d(__a, __a)) + +#define V8U8_2_V8U16(x) {(uint16_t)x[0], (uint16_t)x[1], (uint16_t)x[2], (uint16_t)x[3], \ + (uint16_t)x[4], (uint16_t)x[5], (uint16_t)x[6], (uint16_t)x[7]} +#define V8U8_2_V8I16(x) {(int16_t)x[0], (int16_t)x[1], (int16_t)x[2], (int16_t)x[3], \ + (int16_t)x[4], (int16_t)x[5], (int16_t)x[6], (int16_t)x[7]} +#define V8I8_2_V8I16(x) {(int16_t)x[0], (int16_t)x[1], (int16_t)x[2], (int16_t)x[3], \ + (int16_t)x[4], (int16_t)x[5], (int16_t)x[6], (int16_t)x[7]} +#define V4U16_2_V4U32(x) {(uint32_t)x[0], (uint32_t)x[1], (uint32_t)x[2], (uint32_t)x[3]} +#define V4U16_2_V4I32(x) {(int32_t)x[0], (int32_t)x[1], (int32_t)x[2], (int32_t)x[3]} +#define V4I16_2_V4I32(x) {(int32_t)x[0], (int32_t)x[1], (int32_t)x[2], (int32_t)x[3]} +#define V2U32_2_V2U64(x) {(uint64_t)x[0], (uint64_t)x[1]} +#define V2U32_2_V2I64(x) {(int64_t)x[0], (int64_t)x[1]} + +/* v8u16 msa_mull_u8(v8u8 __a, v8u8 __b) */ +#define msa_mull_u8(__a, __b) ((v8u16)__builtin_msa_mulv_h((v8i16)V8U8_2_V8I16(__a), (v8i16)V8U8_2_V8I16(__b))) + +/* v8i16 msa_mull_s8(v8i8 __a, v8i8 __b)*/ +#define msa_mull_s8(__a, __b) (__builtin_msa_mulv_h((v8i16)V8I8_2_V8I16(__a), (v8i16)V8I8_2_V8I16(__b))) + +/* v4u32 msa_mull_u16(v4u16 __a, v4u16 __b) */ +#define msa_mull_u16(__a, __b) ((v4u32)__builtin_msa_mulv_w((v4i32)V4U16_2_V4I32(__a), (v4i32)V4U16_2_V4I32(__b))) + +/* v4i32 msa_mull_s16(v4i16 __a, v4i16 __b) */ +#define msa_mull_s16(__a, __b) (__builtin_msa_mulv_w((v4i32)V4I16_2_V4I32(__a), (v4i32)V4I16_2_V4I32(__b))) + +/* v2u64 msa_mull_u32(v2u32 __a, v2u32 __b) */ +#define msa_mull_u32(__a, __b) ((v2u64)__builtin_msa_mulv_d((v2i64)V2U32_2_V2I64(__a), (v2i64)V2U32_2_V2I64(__b))) + +/* bitwise and: __builtin_msa_and_v */ +#define msa_andq_u8(__a, __b) ((v16u8)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_s8(__a, __b) ((v16i8)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_u16(__a, __b) ((v8u16)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_s16(__a, __b) ((v8i16)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_u32(__a, __b) ((v4u32)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_s32(__a, __b) ((v4i32)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_u64(__a, __b) ((v2u64)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) +#define msa_andq_s64(__a, __b) ((v2i64)__builtin_msa_and_v((v16u8)(__a), (v16u8)(__b))) + +/* bitwise or: __builtin_msa_or_v */ +#define msa_orrq_u8(__a, __b) ((v16u8)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_s8(__a, __b) ((v16i8)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_u16(__a, __b) ((v8u16)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_s16(__a, __b) ((v8i16)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_u32(__a, __b) ((v4u32)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_s32(__a, __b) ((v4i32)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_u64(__a, __b) ((v2u64)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) +#define msa_orrq_s64(__a, __b) ((v2i64)__builtin_msa_or_v((v16u8)(__a), (v16u8)(__b))) + +/* bitwise xor: __builtin_msa_xor_v */ +#define msa_eorq_u8(__a, __b) ((v16u8)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_s8(__a, __b) ((v16i8)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_u16(__a, __b) ((v8u16)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_s16(__a, __b) ((v8i16)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_u32(__a, __b) ((v4u32)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_s32(__a, __b) ((v4i32)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_u64(__a, __b) ((v2u64)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) +#define msa_eorq_s64(__a, __b) ((v2i64)__builtin_msa_xor_v((v16u8)(__a), (v16u8)(__b))) + +/* bitwise not: v16u8 __builtin_msa_xori_b (v16u8, 0xff) */ +#define msa_mvnq_u8(__a) ((v16u8)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_s8(__a) ((v16i8)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_u16(__a) ((v8u16)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_s16(__a) ((v8i16)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_u32(__a) ((v4u32)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_s32(__a) ((v4i32)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_u64(__a) ((v2u64)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) +#define msa_mvnq_s64(__a) ((v2i64)__builtin_msa_xori_b((v16u8)(__a), 0xFF)) + +/* compare equal: ceq -> ri = ai == bi ? 1...1:0...0 */ +#define msa_ceqq_u8(__a, __b) ((v16u8)__builtin_msa_ceq_b((v16i8)(__a), (v16i8)(__b))) +#define msa_ceqq_s8(__a, __b) ((v16u8)__builtin_msa_ceq_b((v16i8)(__a), (v16i8)(__b))) +#define msa_ceqq_u16(__a, __b) ((v8u16)__builtin_msa_ceq_h((v8i16)(__a), (v8i16)(__b))) +#define msa_ceqq_s16(__a, __b) ((v8u16)__builtin_msa_ceq_h((v8i16)(__a), (v8i16)(__b))) +#define msa_ceqq_u32(__a, __b) ((v4u32)__builtin_msa_ceq_w((v4i32)(__a), (v4i32)(__b))) +#define msa_ceqq_s32(__a, __b) ((v4u32)__builtin_msa_ceq_w((v4i32)(__a), (v4i32)(__b))) +#define msa_ceqq_f32(__a, __b) ((v4u32)__builtin_msa_fceq_w((v4f32)(__a), (v4f32)(__b))) +#define msa_ceqq_u64(__a, __b) ((v2u64)__builtin_msa_ceq_d((v2i64)(__a), (v2i64)(__b))) +#define msa_ceqq_s64(__a, __b) ((v2u64)__builtin_msa_ceq_d((v2i64)(__a), (v2i64)(__b))) +#define msa_ceqq_f64(__a, __b) ((v2u64)__builtin_msa_fceq_d((v2f64)(__a), (v2f64)(__b))) + +/* Compare less-than: clt -> ri = ai < bi ? 1...1:0...0 */ +#define msa_cltq_u8(__a, __b) ((v16u8)__builtin_msa_clt_u_b((v16u8)(__a), (v16u8)(__b))) +#define msa_cltq_s8(__a, __b) ((v16u8)__builtin_msa_clt_s_b((v16i8)(__a), (v16i8)(__b))) +#define msa_cltq_u16(__a, __b) ((v8u16)__builtin_msa_clt_u_h((v8u16)(__a), (v8u16)(__b))) +#define msa_cltq_s16(__a, __b) ((v8u16)__builtin_msa_clt_s_h((v8i16)(__a), (v8i16)(__b))) +#define msa_cltq_u32(__a, __b) ((v4u32)__builtin_msa_clt_u_w((v4u32)(__a), (v4u32)(__b))) +#define msa_cltq_s32(__a, __b) ((v4u32)__builtin_msa_clt_s_w((v4i32)(__a), (v4i32)(__b))) +#define msa_cltq_f32(__a, __b) ((v4u32)__builtin_msa_fclt_w((v4f32)(__a), (v4f32)(__b))) +#define msa_cltq_u64(__a, __b) ((v2u64)__builtin_msa_clt_u_d((v2u64)(__a), (v2u64)(__b))) +#define msa_cltq_s64(__a, __b) ((v2u64)__builtin_msa_clt_s_d((v2i64)(__a), (v2i64)(__b))) +#define msa_cltq_f64(__a, __b) ((v2u64)__builtin_msa_fclt_d((v2f64)(__a), (v2f64)(__b))) + +/* compare greater-than: cgt -> ri = ai > bi ? 1...1:0...0 */ +#define msa_cgtq_u8(__a, __b) ((v16u8)__builtin_msa_clt_u_b((v16u8)(__b), (v16u8)(__a))) +#define msa_cgtq_s8(__a, __b) ((v16u8)__builtin_msa_clt_s_b((v16i8)(__b), (v16i8)(__a))) +#define msa_cgtq_u16(__a, __b) ((v8u16)__builtin_msa_clt_u_h((v8u16)(__b), (v8u16)(__a))) +#define msa_cgtq_s16(__a, __b) ((v8u16)__builtin_msa_clt_s_h((v8i16)(__b), (v8i16)(__a))) +#define msa_cgtq_u32(__a, __b) ((v4u32)__builtin_msa_clt_u_w((v4u32)(__b), (v4u32)(__a))) +#define msa_cgtq_s32(__a, __b) ((v4u32)__builtin_msa_clt_s_w((v4i32)(__b), (v4i32)(__a))) +#define msa_cgtq_f32(__a, __b) ((v4u32)__builtin_msa_fclt_w((v4f32)(__b), (v4f32)(__a))) +#define msa_cgtq_u64(__a, __b) ((v2u64)__builtin_msa_clt_u_d((v2u64)(__b), (v2u64)(__a))) +#define msa_cgtq_s64(__a, __b) ((v2u64)__builtin_msa_clt_s_d((v2i64)(__b), (v2i64)(__a))) +#define msa_cgtq_f64(__a, __b) ((v2u64)__builtin_msa_fclt_d((v2f64)(__b), (v2f64)(__a))) + +/* compare less-equal: cle -> ri = ai <= bi ? 1...1:0...0 */ +#define msa_cleq_u8(__a, __b) ((v16u8)__builtin_msa_cle_u_b((v16u8)(__a), (v16u8)(__b))) +#define msa_cleq_s8(__a, __b) ((v16u8)__builtin_msa_cle_s_b((v16i8)(__a), (v16i8)(__b))) +#define msa_cleq_u16(__a, __b) ((v8u16)__builtin_msa_cle_u_h((v8u16)(__a), (v8u16)(__b))) +#define msa_cleq_s16(__a, __b) ((v8u16)__builtin_msa_cle_s_h((v8i16)(__a), (v8i16)(__b))) +#define msa_cleq_u32(__a, __b) ((v4u32)__builtin_msa_cle_u_w((v4u32)(__a), (v4u32)(__b))) +#define msa_cleq_s32(__a, __b) ((v4u32)__builtin_msa_cle_s_w((v4i32)(__a), (v4i32)(__b))) +#define msa_cleq_f32(__a, __b) ((v4u32)__builtin_msa_fcle_w((v4f32)(__a), (v4f32)(__b))) +#define msa_cleq_u64(__a, __b) ((v2u64)__builtin_msa_cle_u_d((v2u64)(__a), (v2u64)(__b))) +#define msa_cleq_s64(__a, __b) ((v2u64)__builtin_msa_cle_s_d((v2i64)(__a), (v2i64)(__b))) +#define msa_cleq_f64(__a, __b) ((v2u64)__builtin_msa_fcle_d((v2f64)(__a), (v2f64)(__b))) + +/* compare greater-equal: cge -> ri = ai >= bi ? 1...1:0...0 */ +#define msa_cgeq_u8(__a, __b) ((v16u8)__builtin_msa_cle_u_b((v16u8)(__b), (v16u8)(__a))) +#define msa_cgeq_s8(__a, __b) ((v16u8)__builtin_msa_cle_s_b((v16i8)(__b), (v16i8)(__a))) +#define msa_cgeq_u16(__a, __b) ((v8u16)__builtin_msa_cle_u_h((v8u16)(__b), (v8u16)(__a))) +#define msa_cgeq_s16(__a, __b) ((v8u16)__builtin_msa_cle_s_h((v8i16)(__b), (v8i16)(__a))) +#define msa_cgeq_u32(__a, __b) ((v4u32)__builtin_msa_cle_u_w((v4u32)(__b), (v4u32)(__a))) +#define msa_cgeq_s32(__a, __b) ((v4u32)__builtin_msa_cle_s_w((v4i32)(__b), (v4i32)(__a))) +#define msa_cgeq_f32(__a, __b) ((v4u32)__builtin_msa_fcle_w((v4f32)(__b), (v4f32)(__a))) +#define msa_cgeq_u64(__a, __b) ((v2u64)__builtin_msa_cle_u_d((v2u64)(__b), (v2u64)(__a))) +#define msa_cgeq_s64(__a, __b) ((v2u64)__builtin_msa_cle_s_d((v2i64)(__b), (v2i64)(__a))) +#define msa_cgeq_f64(__a, __b) ((v2u64)__builtin_msa_fcle_d((v2f64)(__b), (v2f64)(__a))) + +/* Shift Left Logical: shl -> ri = ai << bi; */ +#define msa_shlq_u8(__a, __b) ((v16u8)__builtin_msa_sll_b((v16i8)(__a), (v16i8)(__b))) +#define msa_shlq_s8(__a, __b) ((v16i8)__builtin_msa_sll_b((v16i8)(__a), (v16i8)(__b))) +#define msa_shlq_u16(__a, __b) ((v8u16)__builtin_msa_sll_h((v8i16)(__a), (v8i16)(__b))) +#define msa_shlq_s16(__a, __b) ((v8i16)__builtin_msa_sll_h((v8i16)(__a), (v8i16)(__b))) +#define msa_shlq_u32(__a, __b) ((v4u32)__builtin_msa_sll_w((v4i32)(__a), (v4i32)(__b))) +#define msa_shlq_s32(__a, __b) ((v4i32)__builtin_msa_sll_w((v4i32)(__a), (v4i32)(__b))) +#define msa_shlq_u64(__a, __b) ((v2u64)__builtin_msa_sll_d((v2i64)(__a), (v2i64)(__b))) +#define msa_shlq_s64(__a, __b) ((v2i64)__builtin_msa_sll_d((v2i64)(__a), (v2i64)(__b))) + +/* Immediate Shift Left Logical: shl -> ri = ai << imm; */ +#define msa_shlq_n_u8(__a, __imm) ((v16u8)__builtin_msa_slli_b((v16i8)(__a), __imm)) +#define msa_shlq_n_s8(__a, __imm) ((v16i8)__builtin_msa_slli_b((v16i8)(__a), __imm)) +#define msa_shlq_n_u16(__a, __imm) ((v8u16)__builtin_msa_slli_h((v8i16)(__a), __imm)) +#define msa_shlq_n_s16(__a, __imm) ((v8i16)__builtin_msa_slli_h((v8i16)(__a), __imm)) +#define msa_shlq_n_u32(__a, __imm) ((v4u32)__builtin_msa_slli_w((v4i32)(__a), __imm)) +#define msa_shlq_n_s32(__a, __imm) ((v4i32)__builtin_msa_slli_w((v4i32)(__a), __imm)) +#define msa_shlq_n_u64(__a, __imm) ((v2u64)__builtin_msa_slli_d((v2i64)(__a), __imm)) +#define msa_shlq_n_s64(__a, __imm) ((v2i64)__builtin_msa_slli_d((v2i64)(__a), __imm)) + +/* shift right: shrq -> ri = ai >> bi; */ +#define msa_shrq_u8(__a, __b) ((v16u8)__builtin_msa_srl_b((v16i8)(__a), (v16i8)(__b))) +#define msa_shrq_s8(__a, __b) ((v16i8)__builtin_msa_sra_b((v16i8)(__a), (v16i8)(__b))) +#define msa_shrq_u16(__a, __b) ((v8u16)__builtin_msa_srl_h((v8i16)(__a), (v8i16)(__b))) +#define msa_shrq_s16(__a, __b) ((v8i16)__builtin_msa_sra_h((v8i16)(__a), (v8i16)(__b))) +#define msa_shrq_u32(__a, __b) ((v4u32)__builtin_msa_srl_w((v4i32)(__a), (v4i32)(__b))) +#define msa_shrq_s32(__a, __b) ((v4i32)__builtin_msa_sra_w((v4i32)(__a), (v4i32)(__b))) +#define msa_shrq_u64(__a, __b) ((v2u64)__builtin_msa_srl_d((v2i64)(__a), (v2i64)(__b))) +#define msa_shrq_s64(__a, __b) ((v2i64)__builtin_msa_sra_d((v2i64)(__a), (v2i64)(__b))) + +/* Immediate Shift Right: shr -> ri = ai >> imm; */ +#define msa_shrq_n_u8(__a, __imm) ((v16u8)__builtin_msa_srli_b((v16i8)(__a), __imm)) +#define msa_shrq_n_s8(__a, __imm) ((v16i8)__builtin_msa_srai_b((v16i8)(__a), __imm)) +#define msa_shrq_n_u16(__a, __imm) ((v8u16)__builtin_msa_srli_h((v8i16)(__a), __imm)) +#define msa_shrq_n_s16(__a, __imm) ((v8i16)__builtin_msa_srai_h((v8i16)(__a), __imm)) +#define msa_shrq_n_u32(__a, __imm) ((v4u32)__builtin_msa_srli_w((v4i32)(__a), __imm)) +#define msa_shrq_n_s32(__a, __imm) ((v4i32)__builtin_msa_srai_w((v4i32)(__a), __imm)) +#define msa_shrq_n_u64(__a, __imm) ((v2u64)__builtin_msa_srli_d((v2i64)(__a), __imm)) +#define msa_shrq_n_s64(__a, __imm) ((v2i64)__builtin_msa_srai_d((v2i64)(__a), __imm)) + +/* Immediate Shift Right Rounded: shr -> ri = ai >> (rounded)imm; */ +#define msa_rshrq_n_u8(__a, __imm) ((v16u8)__builtin_msa_srlri_b((v16i8)(__a), __imm)) +#define msa_rshrq_n_s8(__a, __imm) ((v16i8)__builtin_msa_srari_b((v16i8)(__a), __imm)) +#define msa_rshrq_n_u16(__a, __imm) ((v8u16)__builtin_msa_srlri_h((v8i16)(__a), __imm)) +#define msa_rshrq_n_s16(__a, __imm) ((v8i16)__builtin_msa_srari_h((v8i16)(__a), __imm)) +#define msa_rshrq_n_u32(__a, __imm) ((v4u32)__builtin_msa_srlri_w((v4i32)(__a), __imm)) +#define msa_rshrq_n_s32(__a, __imm) ((v4i32)__builtin_msa_srari_w((v4i32)(__a), __imm)) +#define msa_rshrq_n_u64(__a, __imm) ((v2u64)__builtin_msa_srlri_d((v2i64)(__a), __imm)) +#define msa_rshrq_n_s64(__a, __imm) ((v2i64)__builtin_msa_srari_d((v2i64)(__a), __imm)) + +/* Vector saturating rounding shift left, qrshl -> ri = ai << bi; */ +#define msa_qrshrq_s32(a, b) ((v4i32)__msa_srar_w((v4i32)(a), (v4i32)(b))) + +/* Rename the msa builtin func to unify the name style for intrin_msa.hpp */ +#define msa_qaddq_u8 __builtin_msa_adds_u_b +#define msa_qaddq_s8 __builtin_msa_adds_s_b +#define msa_qaddq_u16 __builtin_msa_adds_u_h +#define msa_qaddq_s16 __builtin_msa_adds_s_h +#define msa_qaddq_u32 __builtin_msa_adds_u_w +#define msa_qaddq_s32 __builtin_msa_adds_s_w +#define msa_qaddq_u64 __builtin_msa_adds_u_d +#define msa_qaddq_s64 __builtin_msa_adds_s_d +#define msa_addq_u8(a, b) ((v16u8)__builtin_msa_addv_b((v16i8)(a), (v16i8)(b))) +#define msa_addq_s8 __builtin_msa_addv_b +#define msa_addq_u16(a, b) ((v8u16)__builtin_msa_addv_h((v8i16)(a), (v8i16)(b))) +#define msa_addq_s16 __builtin_msa_addv_h +#define msa_addq_u32(a, b) ((v4u32)__builtin_msa_addv_w((v4i32)(a), (v4i32)(b))) +#define msa_addq_s32 __builtin_msa_addv_w +#define msa_addq_f32 __builtin_msa_fadd_w +#define msa_addq_u64(a, b) ((v2u64)__builtin_msa_addv_d((v2i64)(a), (v2i64)(b))) +#define msa_addq_s64 __builtin_msa_addv_d +#define msa_addq_f64 __builtin_msa_fadd_d +#define msa_qsubq_u8 __builtin_msa_subs_u_b +#define msa_qsubq_s8 __builtin_msa_subs_s_b +#define msa_qsubq_u16 __builtin_msa_subs_u_h +#define msa_qsubq_s16 __builtin_msa_subs_s_h +#define msa_subq_u8(a, b) ((v16u8)__builtin_msa_subv_b((v16i8)(a), (v16i8)(b))) +#define msa_subq_s8 __builtin_msa_subv_b +#define msa_subq_u16(a, b) ((v8u16)__builtin_msa_subv_h((v8i16)(a), (v8i16)(b))) +#define msa_subq_s16 __builtin_msa_subv_h +#define msa_subq_u32(a, b) ((v4u32)__builtin_msa_subv_w((v4i32)(a), (v4i32)(b))) +#define msa_subq_s32 __builtin_msa_subv_w +#define msa_subq_f32 __builtin_msa_fsub_w +#define msa_subq_u64(a, b) ((v2u64)__builtin_msa_subv_d((v2i64)(a), (v2i64)(b))) +#define msa_subq_s64 __builtin_msa_subv_d +#define msa_subq_f64 __builtin_msa_fsub_d +#define msa_mulq_u8(a, b) ((v16u8)__builtin_msa_mulv_b((v16i8)(a), (v16i8)(b))) +#define msa_mulq_s8(a, b) ((v16i8)__builtin_msa_mulv_b((v16i8)(a), (v16i8)(b))) +#define msa_mulq_u16(a, b) ((v8u16)__builtin_msa_mulv_h((v8i16)(a), (v8i16)(b))) +#define msa_mulq_s16(a, b) ((v8i16)__builtin_msa_mulv_h((v8i16)(a), (v8i16)(b))) +#define msa_mulq_u32(a, b) ((v4u32)__builtin_msa_mulv_w((v4i32)(a), (v4i32)(b))) +#define msa_mulq_s32(a, b) ((v4i32)__builtin_msa_mulv_w((v4i32)(a), (v4i32)(b))) +#define msa_mulq_u64(a, b) ((v2u64)__builtin_msa_mulv_d((v2i64)(a), (v2i64)(b))) +#define msa_mulq_s64(a, b) ((v2i64)__builtin_msa_mulv_d((v2i64)(a), (v2i64)(b))) +#define msa_mulq_f32 __builtin_msa_fmul_w +#define msa_mulq_f64 __builtin_msa_fmul_d +#define msa_divq_f32 __builtin_msa_fdiv_w +#define msa_divq_f64 __builtin_msa_fdiv_d +#define msa_dotp_s_h __builtin_msa_dotp_s_h +#define msa_dotp_s_w __builtin_msa_dotp_s_w +#define msa_dotp_s_d __builtin_msa_dotp_s_d +#define msa_dotp_u_h __builtin_msa_dotp_u_h +#define msa_dotp_u_w __builtin_msa_dotp_u_w +#define msa_dotp_u_d __builtin_msa_dotp_u_d +#define msa_dpadd_s_h __builtin_msa_dpadd_s_h +#define msa_dpadd_s_w __builtin_msa_dpadd_s_w +#define msa_dpadd_s_d __builtin_msa_dpadd_s_d +#define msa_dpadd_u_h __builtin_msa_dpadd_u_h +#define msa_dpadd_u_w __builtin_msa_dpadd_u_w +#define msa_dpadd_u_d __builtin_msa_dpadd_u_d + +#define ILVRL_B2(RTYPE, in0, in1, low, hi) do { \ + low = (RTYPE)__builtin_msa_ilvr_b((v16i8)(in0), (v16i8)(in1)); \ + hi = (RTYPE)__builtin_msa_ilvl_b((v16i8)(in0), (v16i8)(in1)); \ + } while (0) +#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__) +#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__) +#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__) +#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__) +#define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__) + +#define ILVRL_H2(RTYPE, in0, in1, low, hi) do { \ + low = (RTYPE)__builtin_msa_ilvr_h((v8i16)(in0), (v8i16)(in1)); \ + hi = (RTYPE)__builtin_msa_ilvl_h((v8i16)(in0), (v8i16)(in1)); \ + } while (0) +#define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__) +#define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__) +#define ILVRL_H2_UH(...) ILVRL_H2(v8u16, __VA_ARGS__) +#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__) +#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__) +#define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__) + +#define ILVRL_W2(RTYPE, in0, in1, low, hi) do { \ + low = (RTYPE)__builtin_msa_ilvr_w((v4i32)(in0), (v4i32)(in1)); \ + hi = (RTYPE)__builtin_msa_ilvl_w((v4i32)(in0), (v4i32)(in1)); \ + } while (0) +#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__) +#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__) +#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__) +#define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__) + +/* absq, qabsq (r = |a|;) */ +#define msa_absq_s8(a) __builtin_msa_add_a_b(a, __builtin_msa_fill_b(0)) +#define msa_absq_s16(a) __builtin_msa_add_a_h(a, __builtin_msa_fill_h(0)) +#define msa_absq_s32(a) __builtin_msa_add_a_w(a, __builtin_msa_fill_w(0)) +#define msa_absq_s64(a) __builtin_msa_add_a_d(a, __builtin_msa_fill_d(0)) +#define msa_absq_f32(a) ((v4f32)__builtin_msa_bclri_w((v4u32)(a), 31)) +#define msa_absq_f64(a) ((v2f64)__builtin_msa_bclri_d((v2u64)(a), 63)) +#define msa_qabsq_s8(a) __builtin_msa_adds_a_b(a, __builtin_msa_fill_b(0)) +#define msa_qabsq_s16(a) __builtin_msa_adds_a_h(a, __builtin_msa_fill_h(0)) +#define msa_qabsq_s32(a) __builtin_msa_adds_a_w(a, __builtin_msa_fill_w(0)) +#define msa_qabsq_s64(a) __builtin_msa_adds_a_d(a, __builtin_msa_fill_d(0)) + +/* abdq, qabdq (r = |a - b|;) */ +#define msa_abdq_u8 __builtin_msa_asub_u_b +#define msa_abdq_s8 __builtin_msa_asub_s_b +#define msa_abdq_u16 __builtin_msa_asub_u_h +#define msa_abdq_s16 __builtin_msa_asub_s_h +#define msa_abdq_u32 __builtin_msa_asub_u_w +#define msa_abdq_s32 __builtin_msa_asub_s_w +#define msa_abdq_u64 __builtin_msa_asub_u_d +#define msa_abdq_s64 __builtin_msa_asub_s_d +#define msa_abdq_f32(a, b) msa_absq_f32(__builtin_msa_fsub_w(a, b)) +#define msa_abdq_f64(a, b) msa_absq_f64(__builtin_msa_fsub_d(a, b)) +#define msa_qabdq_s8(a, b) msa_qabsq_s8(__builtin_msa_subs_s_b(a, b)) +#define msa_qabdq_s16(a, b) msa_qabsq_s16(__builtin_msa_subs_s_h(a, b)) +#define msa_qabdq_s32(a, b) msa_qabsq_s32(__builtin_msa_subs_s_w(a, b)) +#define msa_qabdq_s64(a, b) msa_qabsq_s64(__builtin_msa_subs_s_d(a, b)) + +/* sqrtq, rsqrtq */ +#define msa_sqrtq_f32 __builtin_msa_fsqrt_w +#define msa_sqrtq_f64 __builtin_msa_fsqrt_d +#define msa_rsqrtq_f32 __builtin_msa_frsqrt_w +#define msa_rsqrtq_f64 __builtin_msa_frsqrt_d + + +/* mlaq: r = a + b * c; */ +__extension__ extern __inline v4i32 +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +msa_mlaq_s32(v4i32 __a, v4i32 __b, v4i32 __c) +{ + __asm__ volatile("maddv.w %w[__a], %w[__b], %w[__c]\n" + // Outputs + : [__a] "+f"(__a) + // Inputs + : [__b] "f"(__b), [__c] "f"(__c)); + return __a; +} + +__extension__ extern __inline v2i64 +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +msa_mlaq_s64(v2i64 __a, v2i64 __b, v2i64 __c) +{ + __asm__ volatile("maddv.d %w[__a], %w[__b], %w[__c]\n" + // Outputs + : [__a] "+f"(__a) + // Inputs + : [__b] "f"(__b), [__c] "f"(__c)); + return __a; +} + +__extension__ extern __inline v4f32 +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +msa_mlaq_f32(v4f32 __a, v4f32 __b, v4f32 __c) +{ + __asm__ volatile("fmadd.w %w[__a], %w[__b], %w[__c]\n" + // Outputs + : [__a] "+f"(__a) + // Inputs + : [__b] "f"(__b), [__c] "f"(__c)); + return __a; +} + +__extension__ extern __inline v2f64 +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +msa_mlaq_f64(v2f64 __a, v2f64 __b, v2f64 __c) +{ + __asm__ volatile("fmadd.d %w[__a], %w[__b], %w[__c]\n" + // Outputs + : [__a] "+f"(__a) + // Inputs + : [__b] "f"(__b), [__c] "f"(__c)); + return __a; +} + +/* cntq */ +#define msa_cntq_s8 __builtin_msa_pcnt_b +#define msa_cntq_s16 __builtin_msa_pcnt_h +#define msa_cntq_s32 __builtin_msa_pcnt_w +#define msa_cntq_s64 __builtin_msa_pcnt_d + +/* bslq (a: mask; r = b(if a == 0); r = c(if a == 1);) */ +#define msa_bslq_u8 __builtin_msa_bsel_v + +/* ilvrq, ilvlq (For EL only, ilvrq: b0, a0, b1, a1; ilvlq: b2, a2, b3, a3;) */ +#define msa_ilvrq_s8 __builtin_msa_ilvr_b +#define msa_ilvrq_s16 __builtin_msa_ilvr_h +#define msa_ilvrq_s32 __builtin_msa_ilvr_w +#define msa_ilvrq_s64 __builtin_msa_ilvr_d +#define msa_ilvlq_s8 __builtin_msa_ilvl_b +#define msa_ilvlq_s16 __builtin_msa_ilvl_h +#define msa_ilvlq_s32 __builtin_msa_ilvl_w +#define msa_ilvlq_s64 __builtin_msa_ilvl_d + +/* ilvevq, ilvodq (ilvevq: b0, a0, b2, a2; ilvodq: b1, a1, b3, a3; ) */ +#define msa_ilvevq_s8 __builtin_msa_ilvev_b +#define msa_ilvevq_s16 __builtin_msa_ilvev_h +#define msa_ilvevq_s32 __builtin_msa_ilvev_w +#define msa_ilvevq_s64 __builtin_msa_ilvev_d +#define msa_ilvodq_s8 __builtin_msa_ilvod_b +#define msa_ilvodq_s16 __builtin_msa_ilvod_h +#define msa_ilvodq_s32 __builtin_msa_ilvod_w +#define msa_ilvodq_s64 __builtin_msa_ilvod_d + +/* extq (r = (a || b); a concatenation b and get elements from index c) */ +#ifdef _MIPSEB +#define msa_extq_s8(a, b, c) \ +(__builtin_msa_vshf_b(__builtin_msa_subv_b((v16i8)((v2i64){0x1716151413121110, 0x1F1E1D1C1B1A1918}), __builtin_msa_fill_b(c)), a, b)) +#define msa_extq_s16(a, b, c) \ +(__builtin_msa_vshf_h(__builtin_msa_subv_h((v8i16)((v2i64){0x000B000A00090008, 0x000F000E000D000C}), __builtin_msa_fill_h(c)), a, b)) +#define msa_extq_s32(a, b, c) \ +(__builtin_msa_vshf_w(__builtin_msa_subv_w((v4i32)((v2i64){0x0000000500000004, 0x0000000700000006}), __builtin_msa_fill_w(c)), a, b)) +#define msa_extq_s64(a, b, c) \ +(__builtin_msa_vshf_d(__builtin_msa_subv_d((v2i64){0x0000000000000002, 0x0000000000000003}, __builtin_msa_fill_d(c)), a, b)) +#else +#define msa_extq_s8(a, b, c) \ +(__builtin_msa_vshf_b(__builtin_msa_addv_b((v16i8)((v2i64){0x0706050403020100, 0x0F0E0D0C0B0A0908}), __builtin_msa_fill_b(c)), b, a)) +#define msa_extq_s16(a, b, c) \ +(__builtin_msa_vshf_h(__builtin_msa_addv_h((v8i16)((v2i64){0x0003000200010000, 0x0007000600050004}), __builtin_msa_fill_h(c)), b, a)) +#define msa_extq_s32(a, b, c) \ +(__builtin_msa_vshf_w(__builtin_msa_addv_w((v4i32)((v2i64){0x0000000100000000, 0x0000000300000002}), __builtin_msa_fill_w(c)), b, a)) +#define msa_extq_s64(a, b, c) \ +(__builtin_msa_vshf_d(__builtin_msa_addv_d((v2i64){0x0000000000000000, 0x0000000000000001}, __builtin_msa_fill_d(c)), b, a)) +#endif /* _MIPSEB */ + +/* cvttruncq, cvttintq, cvtrintq */ +#define msa_cvttruncq_u32_f32 __builtin_msa_ftrunc_u_w +#define msa_cvttruncq_s32_f32 __builtin_msa_ftrunc_s_w +#define msa_cvttruncq_u64_f64 __builtin_msa_ftrunc_u_d +#define msa_cvttruncq_s64_f64 __builtin_msa_ftrunc_s_d +#define msa_cvttintq_u32_f32 __builtin_msa_ftint_u_w +#define msa_cvttintq_s32_f32 __builtin_msa_ftint_s_w +#define msa_cvttintq_u64_f64 __builtin_msa_ftint_u_d +#define msa_cvttintq_s64_f64 __builtin_msa_ftint_s_d +#define msa_cvtrintq_f32 __builtin_msa_frint_w +#define msa_cvtrintq_f64 __builtin_msa_frint_d + +/* cvtfintq, cvtfq */ +#define msa_cvtfintq_f32_u32 __builtin_msa_ffint_u_w +#define msa_cvtfintq_f32_s32 __builtin_msa_ffint_s_w +#define msa_cvtfintq_f64_u64 __builtin_msa_ffint_u_d +#define msa_cvtfintq_f64_s64 __builtin_msa_ffint_s_d +#define msa_cvtfq_f32_f64 __builtin_msa_fexdo_w +#define msa_cvtflq_f64_f32 __builtin_msa_fexupr_d +#define msa_cvtfhq_f64_f32 __builtin_msa_fexupl_d + +#define msa_addl_u8(a, b) ((v8u16)__builtin_msa_addv_h((v8i16)V8U8_2_V8I16(a), (v8i16)V8U8_2_V8I16(b))) +#define msa_addl_s8(a, b) (__builtin_msa_addv_h((v8i16)V8I8_2_V8I16(a), (v8i16)V8I8_2_V8I16(b))) +#define msa_addl_u16(a, b) ((v4u32)__builtin_msa_addv_w((v4i32)V4U16_2_V4I32(a), (v4i32)V4U16_2_V4I32(b))) +#define msa_addl_s16(a, b) (__builtin_msa_addv_w((v4i32)V4I16_2_V4I32(a), (v4i32)V4I16_2_V4I32(b))) +#define msa_subl_s16(a, b) (__builtin_msa_subv_w((v4i32)V4I16_2_V4I32(a), (v4i32)V4I16_2_V4I32(b))) +#define msa_recpeq_f32 __builtin_msa_frcp_w +#define msa_recpsq_f32(a, b) (__builtin_msa_fsub_w(msa_dupq_n_f32(2.0f), __builtin_msa_fmul_w(a, b))) + +#define MSA_INTERLEAVED_IMPL_LOAD2_STORE2(_Tp, _Tpv, _Tpvs, suffix, df, nlanes) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld2q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + nlanes); \ + *a = (_Tpv)__builtin_msa_pckev_##df((_Tpvs)v1, (_Tpvs)v0); \ + *b = (_Tpv)__builtin_msa_pckod_##df((_Tpvs)v1, (_Tpvs)v0); \ +} \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st2q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b) \ +{ \ + msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_##df((_Tpvs)b, (_Tpvs)a)); \ + msa_st1q_##suffix(ptr + nlanes, (_Tpv)__builtin_msa_ilvl_##df((_Tpvs)b, (_Tpvs)a)); \ +} + +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint8_t, v16u8, v16i8, u8, b, 16) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int8_t, v16i8, v16i8, s8, b, 16) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint16_t, v8u16, v8i16, u16, h, 8) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int16_t, v8i16, v8i16, s16, h, 8) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint32_t, v4u32, v4i32, u32, w, 4) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int32_t, v4i32, v4i32, s32, w, 4) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(float, v4f32, v4i32, f32, w, 4) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(uint64_t, v2u64, v2i64, u64, d, 2) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(int64_t, v2i64, v2i64, s64, d, 2) +MSA_INTERLEAVED_IMPL_LOAD2_STORE2(double, v2f64, v2i64, f64, d, 2) + +#ifdef _MIPSEB +#define MSA_INTERLEAVED_IMPL_LOAD3_8(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + 16); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + 32); \ + _Tpvs v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0704011F1F1F1F1F, 0x1F1C191613100D0A}), (_Tpvs)v0, (_Tpvs)v1); \ + *a = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x1716150E0B080502, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \ + v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0603001F1F1F1F1F, 0x1E1B1815120F0C09}), (_Tpvs)v0, (_Tpvs)v1); \ + *b = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x1716150D0A070401, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \ + v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x05021F1F1F1F1F1F, 0x1D1A1714110E0B08}), (_Tpvs)v0, (_Tpvs)v1); \ + *c = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x17160F0C09060300, 0x1F1E1D1C1B1A1918}), v3, (_Tpvs)v2); \ +} +#else +#define MSA_INTERLEAVED_IMPL_LOAD3_8(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + 16); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + 32); \ + _Tpvs v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x15120F0C09060300, 0x00000000001E1B18}), (_Tpvs)v1, (_Tpvs)v0); \ + *a = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1D1A1714110A0908}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1613100D0A070401, 0x00000000001F1C19}), (_Tpvs)v1, (_Tpvs)v0); \ + *b = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1E1B1815120A0908}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1714110E0B080502, 0x0000000000001D1A}), (_Tpvs)v1, (_Tpvs)v0); \ + *c = (_Tpv)__builtin_msa_vshf_b((_Tpvs)((v2i64){0x0706050403020100, 0x1F1C191613100908}), (_Tpvs)v2, v3); \ +} +#endif + +MSA_INTERLEAVED_IMPL_LOAD3_8(uint8_t, v16u8, v16i8, u8) +MSA_INTERLEAVED_IMPL_LOAD3_8(int8_t, v16i8, v16i8, s8) + +#ifdef _MIPSEB +#define MSA_INTERLEAVED_IMPL_LOAD3_16(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + 8); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + 16); \ + _Tpvs v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00030000000F000F, 0x000F000C00090006}), (_Tpvs)v1, (_Tpvs)v0); \ + *a = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000A00050002, 0x000F000E000D000C}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0002000F000F000F, 0x000E000B00080005}), (_Tpvs)v1, (_Tpvs)v0); \ + *b = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000700040001, 0x000F000E000D000C}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000F000F000F, 0x000D000A00070004}), (_Tpvs)v1, (_Tpvs)v0); \ + *c = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000600030000, 0x000F000E000D000C}), (_Tpvs)v2, v3); \ +} +#else +#define MSA_INTERLEAVED_IMPL_LOAD3_16(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + 8); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + 16); \ + _Tpvs v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0009000600030000, 0x00000000000F000C}), (_Tpvs)v1, (_Tpvs)v0); \ + *a = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000D000A00050004}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000A000700040001, 0x000000000000000D}), (_Tpvs)v1, (_Tpvs)v0); \ + *b = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000E000B00080004}), (_Tpvs)v2, v3); \ + v3 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B000800050002, 0x000000000000000E}), (_Tpvs)v1, (_Tpvs)v0); \ + *c = (_Tpv)__builtin_msa_vshf_h((_Tpvs)((v2i64){0x0003000200010000, 0x000F000C00090004}), (_Tpvs)v2, v3); \ +} +#endif + +MSA_INTERLEAVED_IMPL_LOAD3_16(uint16_t, v8u16, v8i16, u16) +MSA_INTERLEAVED_IMPL_LOAD3_16(int16_t, v8i16, v8i16, s16) + +#define MSA_INTERLEAVED_IMPL_LOAD3_32(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + _Tpv v00 = msa_ld1q_##suffix(ptr); \ + _Tpv v01 = msa_ld1q_##suffix(ptr + 4); \ + _Tpv v02 = msa_ld1q_##suffix(ptr + 8); \ + _Tpvs v10 = __builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v01, (v2i64)v01), (_Tpvs)v00); \ + _Tpvs v11 = __builtin_msa_ilvr_w((_Tpvs)v02, (_Tpvs)__builtin_msa_ilvl_d((v2i64)v00, (v2i64)v00)); \ + _Tpvs v12 = __builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v02, (v2i64)v02), (_Tpvs)v01); \ + *a = (_Tpv)__builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v11, (v2i64)v11), v10); \ + *b = (_Tpv)__builtin_msa_ilvr_w(v12, (_Tpvs)__builtin_msa_ilvl_d((v2i64)v10, (v2i64)v10)); \ + *c = (_Tpv)__builtin_msa_ilvr_w((_Tpvs)__builtin_msa_ilvl_d((v2i64)v12, (v2i64)v12), v11); \ +} + +MSA_INTERLEAVED_IMPL_LOAD3_32(uint32_t, v4u32, v4i32, u32) +MSA_INTERLEAVED_IMPL_LOAD3_32(int32_t, v4i32, v4i32, s32) +MSA_INTERLEAVED_IMPL_LOAD3_32(float, v4f32, v4i32, f32) + +#define MSA_INTERLEAVED_IMPL_LOAD3_64(_Tp, _Tpv, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld3q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c) \ +{ \ + *((_Tp*)a) = *ptr; *((_Tp*)b) = *(ptr + 1); *((_Tp*)c) = *(ptr + 2); \ + *((_Tp*)a + 1) = *(ptr + 3); *((_Tp*)b + 1) = *(ptr + 4); *((_Tp*)c + 1) = *(ptr + 5); \ +} + +MSA_INTERLEAVED_IMPL_LOAD3_64(uint64_t, v2u64, u64) +MSA_INTERLEAVED_IMPL_LOAD3_64(int64_t, v2i64, s64) +MSA_INTERLEAVED_IMPL_LOAD3_64(double, v2f64, f64) + +#ifdef _MIPSEB +#define MSA_INTERLEAVED_IMPL_STORE3_8(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0F0E0D0C0B1F1F1F, 0x1F1E1D1C1B1A1F1F}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0D1C140C1B130B1A, 0x1F170F1E160E1D15}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0A09080706051F1F, 0x19181716151F1F1F}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x1D14071C13061B12, 0x170A1F16091E1508}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x04030201001F1F1F, 0x14131211101F1F1F}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x15021C14011B1300, 0x051F17041E16031D}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 32, (_Tpv)v1); \ +} +#else +#define MSA_INTERLEAVED_IMPL_STORE3_8(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000050403020100, 0x0000001413121110}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0A02110901100800, 0x05140C04130B0312}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000000A09080706, 0x00001A1918171615}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x170A011609001508, 0x0D04190C03180B02}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x0000000F0E0D0C0B, 0x0000001F1E1D1C1B}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_b((_Tpvs)((v2i64){0x021C09011B08001A, 0x1F0C041E0B031D0A}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 32, (_Tpv)v1); \ +} +#endif + +MSA_INTERLEAVED_IMPL_STORE3_8(uint8_t, v16u8, v16i8, u8) +MSA_INTERLEAVED_IMPL_STORE3_8(int8_t, v16i8, v16i8, s8) + +#ifdef _MIPSEB +#define MSA_INTERLEAVED_IMPL_STORE3_16(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000700060005000F, 0x000F000E000D000F}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000A0006000D0009, 0x000F000B0007000E}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00040003000F000F, 0x000C000B000A000F}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000E000A0003000D, 0x0005000F000B0004}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000200010000000F, 0x00090008000F000F}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000E00090000, 0x000B0002000F000A}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \ +} +#else +#define MSA_INTERLEAVED_IMPL_STORE3_16(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000200010000, 0x0000000A00090008}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0001000800040000, 0x0006000200090005}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000500040003, 0x00000000000C000B}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x000B00040000000A, 0x0002000C00050001}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x0000000000070006, 0x0000000F000E000D}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_h((_Tpvs)((v2i64){0x00050000000D0004, 0x000F00060001000E}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 16, (_Tpv)v1); \ +} +#endif + +MSA_INTERLEAVED_IMPL_STORE3_16(uint16_t, v8u16, v8i16, u16) +MSA_INTERLEAVED_IMPL_STORE3_16(int16_t, v8i16, v8i16, s16) + +#ifdef _MIPSEB +#define MSA_INTERLEAVED_IMPL_STORE3_32(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000300000007, 0x0000000700000006}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000300000006, 0x0000000700000005}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000200000001, 0x0000000500000007}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000700000004, 0x0000000500000002}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 4, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000007, 0x0000000400000007}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000500000000, 0x0000000100000007}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \ +} +#else +#define MSA_INTERLEAVED_IMPL_STORE3_32(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + _Tpvs v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000100000000, 0x0000000000000004}), (_Tpvs)b, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000200000000, 0x0000000100000004}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000002, 0x0000000600000005}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000500000002, 0x0000000300000000}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 4, (_Tpv)v1); \ + v0 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000003, 0x0000000000000007}), (_Tpvs)b, (_Tpvs)a); \ + v1 = __builtin_msa_vshf_w((_Tpvs)((v2i64){0x0000000000000006, 0x0000000700000002}), (_Tpvs)c, (_Tpvs)v0); \ + msa_st1q_##suffix(ptr + 8, (_Tpv)v1); \ +} +#endif + +MSA_INTERLEAVED_IMPL_STORE3_32(uint32_t, v4u32, v4i32, u32) +MSA_INTERLEAVED_IMPL_STORE3_32(int32_t, v4i32, v4i32, s32) +MSA_INTERLEAVED_IMPL_STORE3_32(float, v4f32, v4i32, f32) + +#define MSA_INTERLEAVED_IMPL_STORE3_64(_Tp, _Tpv, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st3q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c) \ +{ \ + *ptr = a[0]; *(ptr + 1) = b[0]; *(ptr + 2) = c[0]; \ + *(ptr + 3) = a[1]; *(ptr + 4) = b[1]; *(ptr + 5) = c[1]; \ +} + +MSA_INTERLEAVED_IMPL_STORE3_64(uint64_t, v2u64, u64) +MSA_INTERLEAVED_IMPL_STORE3_64(int64_t, v2i64, s64) +MSA_INTERLEAVED_IMPL_STORE3_64(double, v2f64, f64) + +#define MSA_INTERLEAVED_IMPL_LOAD4_STORE4(_Tp, _Tpv, _Tpvs, suffix, df, nlanes) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld4q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c, _Tpv* d) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + nlanes); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + nlanes * 2); \ + _Tpv v3 = msa_ld1q_##suffix(ptr + nlanes * 3); \ + _Tpvs t0 = __builtin_msa_pckev_##df((_Tpvs)v1, (_Tpvs)v0); \ + _Tpvs t1 = __builtin_msa_pckev_##df((_Tpvs)v3, (_Tpvs)v2); \ + _Tpvs t2 = __builtin_msa_pckod_##df((_Tpvs)v1, (_Tpvs)v0); \ + _Tpvs t3 = __builtin_msa_pckod_##df((_Tpvs)v3, (_Tpvs)v2); \ + *a = (_Tpv)__builtin_msa_pckev_##df(t1, t0); \ + *b = (_Tpv)__builtin_msa_pckev_##df(t3, t2); \ + *c = (_Tpv)__builtin_msa_pckod_##df(t1, t0); \ + *d = (_Tpv)__builtin_msa_pckod_##df(t3, t2); \ +} \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st4q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c, const _Tpv d) \ +{ \ + _Tpvs v0 = __builtin_msa_ilvr_##df((_Tpvs)c, (_Tpvs)a); \ + _Tpvs v1 = __builtin_msa_ilvr_##df((_Tpvs)d, (_Tpvs)b); \ + _Tpvs v2 = __builtin_msa_ilvl_##df((_Tpvs)c, (_Tpvs)a); \ + _Tpvs v3 = __builtin_msa_ilvl_##df((_Tpvs)d, (_Tpvs)b); \ + msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_##df(v1, v0)); \ + msa_st1q_##suffix(ptr + nlanes, (_Tpv)__builtin_msa_ilvl_##df(v1, v0)); \ + msa_st1q_##suffix(ptr + 2 * nlanes, (_Tpv)__builtin_msa_ilvr_##df(v3, v2)); \ + msa_st1q_##suffix(ptr + 3 * nlanes, (_Tpv)__builtin_msa_ilvl_##df(v3, v2)); \ +} + +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint8_t, v16u8, v16i8, u8, b, 16) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int8_t, v16i8, v16i8, s8, b, 16) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint16_t, v8u16, v8i16, u16, h, 8) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int16_t, v8i16, v8i16, s16, h, 8) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(uint32_t, v4u32, v4i32, u32, w, 4) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(int32_t, v4i32, v4i32, s32, w, 4) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4(float, v4f32, v4i32, f32, w, 4) + +#define MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(_Tp, _Tpv, _Tpvs, suffix) \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_ld4q_##suffix(const _Tp* ptr, _Tpv* a, _Tpv* b, _Tpv* c, _Tpv* d) \ +{ \ + _Tpv v0 = msa_ld1q_##suffix(ptr); \ + _Tpv v1 = msa_ld1q_##suffix(ptr + 2); \ + _Tpv v2 = msa_ld1q_##suffix(ptr + 4); \ + _Tpv v3 = msa_ld1q_##suffix(ptr + 6); \ + *a = (_Tpv)__builtin_msa_ilvr_d((_Tpvs)v2, (_Tpvs)v0); \ + *b = (_Tpv)__builtin_msa_ilvl_d((_Tpvs)v2, (_Tpvs)v0); \ + *c = (_Tpv)__builtin_msa_ilvr_d((_Tpvs)v3, (_Tpvs)v1); \ + *d = (_Tpv)__builtin_msa_ilvl_d((_Tpvs)v3, (_Tpvs)v1); \ +} \ +__extension__ extern __inline void \ +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \ +msa_st4q_##suffix(_Tp* ptr, const _Tpv a, const _Tpv b, const _Tpv c, const _Tpv d) \ +{ \ + msa_st1q_##suffix(ptr, (_Tpv)__builtin_msa_ilvr_d((_Tpvs)b, (_Tpvs)a)); \ + msa_st1q_##suffix(ptr + 2, (_Tpv)__builtin_msa_ilvr_d((_Tpvs)d, (_Tpvs)c)); \ + msa_st1q_##suffix(ptr + 4, (_Tpv)__builtin_msa_ilvl_d((_Tpvs)b, (_Tpvs)a)); \ + msa_st1q_##suffix(ptr + 6, (_Tpv)__builtin_msa_ilvl_d((_Tpvs)d, (_Tpvs)c)); \ +} + +MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(uint64_t, v2u64, v2i64, u64) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(int64_t, v2i64, v2i64, s64) +MSA_INTERLEAVED_IMPL_LOAD4_STORE4_64(double, v2f64, v2i64, f64) + +__extension__ extern __inline v8i16 +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +msa_qdmulhq_n_s16(v8i16 a, int16_t b) +{ + v8i16 a_lo, a_hi; + ILVRL_H2_SH(a, msa_dupq_n_s16(0), a_lo, a_hi); + return msa_packr_s32(msa_shlq_n_s32(msa_mulq_s32(msa_paddlq_s16(a_lo), msa_dupq_n_s32(b)), 1), + msa_shlq_n_s32(msa_mulq_s32(msa_paddlq_s16(a_hi), msa_dupq_n_s32(b)), 1), 16); +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /*__mips_msa*/ +#endif /* OPENCV_CORE_MSA_MACROS_H */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/hal/simd_utils.impl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/simd_utils.impl.hpp new file mode 100644 index 00000000..fff8f942 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/hal/simd_utils.impl.hpp @@ -0,0 +1,146 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +// This header is not standalone. Don't include directly, use "intrin.hpp" instead. +#ifdef OPENCV_HAL_INTRIN_HPP // defined in intrin.hpp + + +#if CV_SIMD128 || CV_SIMD128_CPP + +template struct Type2Vec128_Traits; +#define CV_INTRIN_DEF_TYPE2VEC128_TRAITS(type_, vec_type_) \ + template<> struct Type2Vec128_Traits \ + { \ + typedef vec_type_ vec_type; \ + } + +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(uchar, v_uint8x16); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(schar, v_int8x16); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(ushort, v_uint16x8); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(short, v_int16x8); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(unsigned, v_uint32x4); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(int, v_int32x4); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(float, v_float32x4); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(uint64, v_uint64x2); +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(int64, v_int64x2); +#if CV_SIMD128_64F +CV_INTRIN_DEF_TYPE2VEC128_TRAITS(double, v_float64x2); +#endif + +template static inline +typename Type2Vec128_Traits<_T>::vec_type v_setall(const _T& a); + +template<> inline Type2Vec128_Traits< uchar>::vec_type v_setall< uchar>(const uchar& a) { return v_setall_u8(a); } +template<> inline Type2Vec128_Traits< schar>::vec_type v_setall< schar>(const schar& a) { return v_setall_s8(a); } +template<> inline Type2Vec128_Traits::vec_type v_setall(const ushort& a) { return v_setall_u16(a); } +template<> inline Type2Vec128_Traits< short>::vec_type v_setall< short>(const short& a) { return v_setall_s16(a); } +template<> inline Type2Vec128_Traits< uint>::vec_type v_setall< uint>(const uint& a) { return v_setall_u32(a); } +template<> inline Type2Vec128_Traits< int>::vec_type v_setall< int>(const int& a) { return v_setall_s32(a); } +template<> inline Type2Vec128_Traits::vec_type v_setall(const uint64& a) { return v_setall_u64(a); } +template<> inline Type2Vec128_Traits< int64>::vec_type v_setall< int64>(const int64& a) { return v_setall_s64(a); } +template<> inline Type2Vec128_Traits< float>::vec_type v_setall< float>(const float& a) { return v_setall_f32(a); } +#if CV_SIMD128_64F +template<> inline Type2Vec128_Traits::vec_type v_setall(const double& a) { return v_setall_f64(a); } +#endif + +#endif // SIMD128 + + +#if CV_SIMD256 + +template struct Type2Vec256_Traits; +#define CV_INTRIN_DEF_TYPE2VEC256_TRAITS(type_, vec_type_) \ + template<> struct Type2Vec256_Traits \ + { \ + typedef vec_type_ vec_type; \ + } + +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(uchar, v_uint8x32); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(schar, v_int8x32); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(ushort, v_uint16x16); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(short, v_int16x16); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(unsigned, v_uint32x8); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(int, v_int32x8); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(float, v_float32x8); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(uint64, v_uint64x4); +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(int64, v_int64x4); +#if CV_SIMD256_64F +CV_INTRIN_DEF_TYPE2VEC256_TRAITS(double, v_float64x4); +#endif + +template static inline +typename Type2Vec256_Traits<_T>::vec_type v256_setall(const _T& a); + +template<> inline Type2Vec256_Traits< uchar>::vec_type v256_setall< uchar>(const uchar& a) { return v256_setall_u8(a); } +template<> inline Type2Vec256_Traits< schar>::vec_type v256_setall< schar>(const schar& a) { return v256_setall_s8(a); } +template<> inline Type2Vec256_Traits::vec_type v256_setall(const ushort& a) { return v256_setall_u16(a); } +template<> inline Type2Vec256_Traits< short>::vec_type v256_setall< short>(const short& a) { return v256_setall_s16(a); } +template<> inline Type2Vec256_Traits< uint>::vec_type v256_setall< uint>(const uint& a) { return v256_setall_u32(a); } +template<> inline Type2Vec256_Traits< int>::vec_type v256_setall< int>(const int& a) { return v256_setall_s32(a); } +template<> inline Type2Vec256_Traits::vec_type v256_setall(const uint64& a) { return v256_setall_u64(a); } +template<> inline Type2Vec256_Traits< int64>::vec_type v256_setall< int64>(const int64& a) { return v256_setall_s64(a); } +template<> inline Type2Vec256_Traits< float>::vec_type v256_setall< float>(const float& a) { return v256_setall_f32(a); } +#if CV_SIMD256_64F +template<> inline Type2Vec256_Traits::vec_type v256_setall(const double& a) { return v256_setall_f64(a); } +#endif + +#endif // SIMD256 + + +#if CV_SIMD512 + +template struct Type2Vec512_Traits; +#define CV_INTRIN_DEF_TYPE2VEC512_TRAITS(type_, vec_type_) \ + template<> struct Type2Vec512_Traits \ + { \ + typedef vec_type_ vec_type; \ + } + +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(uchar, v_uint8x64); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(schar, v_int8x64); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(ushort, v_uint16x32); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(short, v_int16x32); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(unsigned, v_uint32x16); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(int, v_int32x16); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(float, v_float32x16); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(uint64, v_uint64x8); +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(int64, v_int64x8); +#if CV_SIMD512_64F +CV_INTRIN_DEF_TYPE2VEC512_TRAITS(double, v_float64x8); +#endif + +template static inline +typename Type2Vec512_Traits<_T>::vec_type v512_setall(const _T& a); + +template<> inline Type2Vec512_Traits< uchar>::vec_type v512_setall< uchar>(const uchar& a) { return v512_setall_u8(a); } +template<> inline Type2Vec512_Traits< schar>::vec_type v512_setall< schar>(const schar& a) { return v512_setall_s8(a); } +template<> inline Type2Vec512_Traits::vec_type v512_setall(const ushort& a) { return v512_setall_u16(a); } +template<> inline Type2Vec512_Traits< short>::vec_type v512_setall< short>(const short& a) { return v512_setall_s16(a); } +template<> inline Type2Vec512_Traits< uint>::vec_type v512_setall< uint>(const uint& a) { return v512_setall_u32(a); } +template<> inline Type2Vec512_Traits< int>::vec_type v512_setall< int>(const int& a) { return v512_setall_s32(a); } +template<> inline Type2Vec512_Traits::vec_type v512_setall(const uint64& a) { return v512_setall_u64(a); } +template<> inline Type2Vec512_Traits< int64>::vec_type v512_setall< int64>(const int64& a) { return v512_setall_s64(a); } +template<> inline Type2Vec512_Traits< float>::vec_type v512_setall< float>(const float& a) { return v512_setall_f32(a); } +#if CV_SIMD512_64F +template<> inline Type2Vec512_Traits::vec_type v512_setall(const double& a) { return v512_setall_f64(a); } +#endif + +#endif // SIMD512 + + +#if CV_SIMD_WIDTH == 16 +template static inline +typename Type2Vec128_Traits<_T>::vec_type vx_setall(const _T& a) { return v_setall(a); } +#elif CV_SIMD_WIDTH == 32 +template static inline +typename Type2Vec256_Traits<_T>::vec_type vx_setall(const _T& a) { return v256_setall(a); } +#elif CV_SIMD_WIDTH == 64 +template static inline +typename Type2Vec512_Traits<_T>::vec_type vx_setall(const _T& a) { return v512_setall(a); } +#else +#error "Build configuration error, unsupported CV_SIMD_WIDTH" +#endif + + +#endif // OPENCV_HAL_INTRIN_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/ippasync.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/ippasync.hpp new file mode 100644 index 00000000..c35d8d81 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/ippasync.hpp @@ -0,0 +1,195 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2015, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_IPPASYNC_HPP +#define OPENCV_CORE_IPPASYNC_HPP + +#ifdef HAVE_IPP_A // this file will be removed in OpenCV 4.0 + +#include "opencv2/core.hpp" +#include +#include + +namespace cv +{ + +namespace hpp +{ + +/** @addtogroup core_ipp +This section describes conversion between OpenCV and [Intel® IPP Asynchronous +C/C++](http://software.intel.com/en-us/intel-ipp-preview) library. [Getting Started +Guide](http://registrationcenter.intel.com/irc_nas/3727/ipp_async_get_started.htm) help you to +install the library, configure header and library build paths. + */ +//! @{ + + //! convert OpenCV data type to hppDataType + inline int toHppType(const int cvType) + { + int depth = CV_MAT_DEPTH(cvType); + int hppType = depth == CV_8U ? HPP_DATA_TYPE_8U : + depth == CV_16U ? HPP_DATA_TYPE_16U : + depth == CV_16S ? HPP_DATA_TYPE_16S : + depth == CV_32S ? HPP_DATA_TYPE_32S : + depth == CV_32F ? HPP_DATA_TYPE_32F : + depth == CV_64F ? HPP_DATA_TYPE_64F : -1; + CV_Assert( hppType >= 0 ); + return hppType; + } + + //! convert hppDataType to OpenCV data type + inline int toCvType(const int hppType) + { + int cvType = hppType == HPP_DATA_TYPE_8U ? CV_8U : + hppType == HPP_DATA_TYPE_16U ? CV_16U : + hppType == HPP_DATA_TYPE_16S ? CV_16S : + hppType == HPP_DATA_TYPE_32S ? CV_32S : + hppType == HPP_DATA_TYPE_32F ? CV_32F : + hppType == HPP_DATA_TYPE_64F ? CV_64F : -1; + CV_Assert( cvType >= 0 ); + return cvType; + } + + /** @brief Convert hppiMatrix to Mat. + + This function allocates and initializes new matrix (if needed) that has the same size and type as + input matrix. Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F. + @param src input hppiMatrix. + @param dst output matrix. + @param accel accelerator instance (see hpp::getHpp for the list of acceleration framework types). + @param cn number of channels. + */ + inline void copyHppToMat(hppiMatrix* src, Mat& dst, hppAccel accel, int cn) + { + hppDataType type; + hpp32u width, height; + hppStatus sts; + + if (src == NULL) + return dst.release(); + + sts = hppiInquireMatrix(src, &type, &width, &height); + + CV_Assert( sts == HPP_STATUS_NO_ERROR); + + int matType = CV_MAKETYPE(toCvType(type), cn); + + CV_Assert(width%cn == 0); + + width /= cn; + + dst.create((int)height, (int)width, (int)matType); + + size_t newSize = (size_t)(height*(hpp32u)(dst.step)); + + sts = hppiGetMatrixData(accel,src,(hpp32u)(dst.step),dst.data,&newSize); + + CV_Assert( sts == HPP_STATUS_NO_ERROR); + } + + /** @brief Create Mat from hppiMatrix. + + This function allocates and initializes the Mat that has the same size and type as input matrix. + Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F. + @param src input hppiMatrix. + @param accel accelerator instance (see hpp::getHpp for the list of acceleration framework types). + @param cn number of channels. + @sa howToUseIPPAconversion, hpp::copyHppToMat, hpp::getHpp. + */ + inline Mat getMat(hppiMatrix* src, hppAccel accel, int cn) + { + Mat dst; + copyHppToMat(src, dst, accel, cn); + return dst; + } + + /** @brief Create hppiMatrix from Mat. + + This function allocates and initializes the hppiMatrix that has the same size and type as input + matrix, returns the hppiMatrix*. + + If you want to use zero-copy for GPU you should to have 4KB aligned matrix data. See details + [hppiCreateSharedMatrix](http://software.intel.com/ru-ru/node/501697). + + Supports CV_8U, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F. + + @note The hppiMatrix pointer to the image buffer in system memory refers to the src.data. Control + the lifetime of the matrix and don't change its data, if there is no special need. + @param src input matrix. + @param accel accelerator instance. Supports type: + - **HPP_ACCEL_TYPE_CPU** - accelerated by optimized CPU instructions. + - **HPP_ACCEL_TYPE_GPU** - accelerated by GPU programmable units or fixed-function + accelerators. + - **HPP_ACCEL_TYPE_ANY** - any acceleration or no acceleration available. + @sa howToUseIPPAconversion, hpp::getMat + */ + inline hppiMatrix* getHpp(const Mat& src, hppAccel accel) + { + int htype = toHppType(src.type()); + int cn = src.channels(); + + CV_Assert(src.data); + hppAccelType accelType = hppQueryAccelType(accel); + + if (accelType!=HPP_ACCEL_TYPE_CPU) + { + hpp32u pitch, size; + hppQueryMatrixAllocParams(accel, src.cols*cn, src.rows, htype, &pitch, &size); + if (pitch!=0 && size!=0) + if ((int)(src.data)%4096==0 && pitch==(hpp32u)(src.step)) + { + return hppiCreateSharedMatrix(htype, src.cols*cn, src.rows, src.data, pitch, size); + } + } + + return hppiCreateMatrix(htype, src.cols*cn, src.rows, src.data, (hpp32s)(src.step));; + } + +//! @} +}} + +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/mat.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/mat.hpp new file mode 100644 index 00000000..fdcb8fc8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/mat.hpp @@ -0,0 +1,3766 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_MAT_HPP +#define OPENCV_CORE_MAT_HPP + +#ifndef __cplusplus +# error mat.hpp header must be compiled as C++ +#endif + +#include "opencv2/core/matx.hpp" +#include "opencv2/core/types.hpp" + +#include "opencv2/core/bufferpool.hpp" + +#ifdef CV_CXX11 +#include +#endif + +namespace cv +{ + +//! @addtogroup core_basic +//! @{ + +enum { ACCESS_READ=1<<24, ACCESS_WRITE=1<<25, + ACCESS_RW=3<<24, ACCESS_MASK=ACCESS_RW, ACCESS_FAST=1<<26 }; + +CV__DEBUG_NS_BEGIN + +class CV_EXPORTS _OutputArray; + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/** @brief This is the proxy class for passing read-only input arrays into OpenCV functions. + +It is defined as: +@code + typedef const _InputArray& InputArray; +@endcode +where _InputArray is a class that can be constructed from `Mat`, `Mat_`, `Matx`, +`std::vector`, `std::vector >`, `std::vector`, `std::vector >`, +`UMat`, `std::vector` or `double`. It can also be constructed from a matrix expression. + +Since this is mostly implementation-level class, and its interface may change in future versions, we +do not describe it in details. There are a few key things, though, that should be kept in mind: + +- When you see in the reference manual or in OpenCV source code a function that takes + InputArray, it means that you can actually pass `Mat`, `Matx`, `vector` etc. (see above the + complete list). +- Optional input arguments: If some of the input arrays may be empty, pass cv::noArray() (or + simply cv::Mat() as you probably did before). +- The class is designed solely for passing parameters. That is, normally you *should not* + declare class members, local and global variables of this type. +- If you want to design your own function or a class method that can operate of arrays of + multiple types, you can use InputArray (or OutputArray) for the respective parameters. Inside + a function you should use _InputArray::getMat() method to construct a matrix header for the + array (without copying data). _InputArray::kind() can be used to distinguish Mat from + `vector<>` etc., but normally it is not needed. + +Here is how you can use a function that takes InputArray : +@code + std::vector vec; + // points or a circle + for( int i = 0; i < 30; i++ ) + vec.push_back(Point2f((float)(100 + 30*cos(i*CV_PI*2/5)), + (float)(100 - 30*sin(i*CV_PI*2/5)))); + cv::transform(vec, vec, cv::Matx23f(0.707, -0.707, 10, 0.707, 0.707, 20)); +@endcode +That is, we form an STL vector containing points, and apply in-place affine transformation to the +vector using the 2x3 matrix created inline as `Matx` instance. + +Here is how such a function can be implemented (for simplicity, we implement a very specific case of +it, according to the assertion statement inside) : +@code + void myAffineTransform(InputArray _src, OutputArray _dst, InputArray _m) + { + // get Mat headers for input arrays. This is O(1) operation, + // unless _src and/or _m are matrix expressions. + Mat src = _src.getMat(), m = _m.getMat(); + CV_Assert( src.type() == CV_32FC2 && m.type() == CV_32F && m.size() == Size(3, 2) ); + + // [re]create the output array so that it has the proper size and type. + // In case of Mat it calls Mat::create, in case of STL vector it calls vector::resize. + _dst.create(src.size(), src.type()); + Mat dst = _dst.getMat(); + + for( int i = 0; i < src.rows; i++ ) + for( int j = 0; j < src.cols; j++ ) + { + Point2f pt = src.at(i, j); + dst.at(i, j) = Point2f(m.at(0, 0)*pt.x + + m.at(0, 1)*pt.y + + m.at(0, 2), + m.at(1, 0)*pt.x + + m.at(1, 1)*pt.y + + m.at(1, 2)); + } + } +@endcode +There is another related type, InputArrayOfArrays, which is currently defined as a synonym for +InputArray: +@code + typedef InputArray InputArrayOfArrays; +@endcode +It denotes function arguments that are either vectors of vectors or vectors of matrices. A separate +synonym is needed to generate Python/Java etc. wrappers properly. At the function implementation +level their use is similar, but _InputArray::getMat(idx) should be used to get header for the +idx-th component of the outer vector and _InputArray::size().area() should be used to find the +number of components (vectors/matrices) of the outer vector. + +In general, type support is limited to cv::Mat types. Other types are forbidden. +But in some cases we need to support passing of custom non-general Mat types, like arrays of cv::KeyPoint, cv::DMatch, etc. +This data is not intended to be interpreted as an image data, or processed somehow like regular cv::Mat. +To pass such custom type use rawIn() / rawOut() / rawInOut() wrappers. +Custom type is wrapped as Mat-compatible `CV_8UC` values (N = sizeof(T), N <= CV_CN_MAX). + */ +class CV_EXPORTS _InputArray +{ +public: + enum { + KIND_SHIFT = 16, + FIXED_TYPE = 0x8000 << KIND_SHIFT, + FIXED_SIZE = 0x4000 << KIND_SHIFT, + KIND_MASK = 31 << KIND_SHIFT, + + NONE = 0 << KIND_SHIFT, + MAT = 1 << KIND_SHIFT, + MATX = 2 << KIND_SHIFT, + STD_VECTOR = 3 << KIND_SHIFT, + STD_VECTOR_VECTOR = 4 << KIND_SHIFT, + STD_VECTOR_MAT = 5 << KIND_SHIFT, +#if OPENCV_ABI_COMPATIBILITY < 500 + EXPR = 6 << KIND_SHIFT, //!< removed: https://github.com/opencv/opencv/pull/17046 +#endif + OPENGL_BUFFER = 7 << KIND_SHIFT, + CUDA_HOST_MEM = 8 << KIND_SHIFT, + CUDA_GPU_MAT = 9 << KIND_SHIFT, + UMAT =10 << KIND_SHIFT, + STD_VECTOR_UMAT =11 << KIND_SHIFT, + STD_BOOL_VECTOR =12 << KIND_SHIFT, + STD_VECTOR_CUDA_GPU_MAT = 13 << KIND_SHIFT, +#if OPENCV_ABI_COMPATIBILITY < 500 + STD_ARRAY =14 << KIND_SHIFT, //!< removed: https://github.com/opencv/opencv/issues/18897 +#endif + STD_ARRAY_MAT =15 << KIND_SHIFT + }; + + _InputArray(); + _InputArray(int _flags, void* _obj); + _InputArray(const Mat& m); + _InputArray(const MatExpr& expr); + _InputArray(const std::vector& vec); + template _InputArray(const Mat_<_Tp>& m); + template _InputArray(const std::vector<_Tp>& vec); + _InputArray(const std::vector& vec); + template _InputArray(const std::vector >& vec); + _InputArray(const std::vector >&); + template _InputArray(const std::vector >& vec); + template _InputArray(const _Tp* vec, int n); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const double& val); + _InputArray(const cuda::GpuMat& d_mat); + _InputArray(const std::vector& d_mat_array); + _InputArray(const ogl::Buffer& buf); + _InputArray(const cuda::HostMem& cuda_mem); + template _InputArray(const cudev::GpuMat_<_Tp>& m); + _InputArray(const UMat& um); + _InputArray(const std::vector& umv); + +#ifdef CV_CXX_STD_ARRAY + template _InputArray(const std::array<_Tp, _Nm>& arr); + template _InputArray(const std::array& arr); +#endif + + template static _InputArray rawIn(const std::vector<_Tp>& vec); +#ifdef CV_CXX_STD_ARRAY + template static _InputArray rawIn(const std::array<_Tp, _Nm>& arr); +#endif + + Mat getMat(int idx=-1) const; + Mat getMat_(int idx=-1) const; + UMat getUMat(int idx=-1) const; + void getMatVector(std::vector& mv) const; + void getUMatVector(std::vector& umv) const; + void getGpuMatVector(std::vector& gpumv) const; + cuda::GpuMat getGpuMat() const; + ogl::Buffer getOGlBuffer() const; + + int getFlags() const; + void* getObj() const; + Size getSz() const; + + int kind() const; + int dims(int i=-1) const; + int cols(int i=-1) const; + int rows(int i=-1) const; + Size size(int i=-1) const; + int sizend(int* sz, int i=-1) const; + bool sameSize(const _InputArray& arr) const; + size_t total(int i=-1) const; + int type(int i=-1) const; + int depth(int i=-1) const; + int channels(int i=-1) const; + bool isContinuous(int i=-1) const; + bool isSubmatrix(int i=-1) const; + bool empty() const; + void copyTo(const _OutputArray& arr) const; + void copyTo(const _OutputArray& arr, const _InputArray & mask) const; + size_t offset(int i=-1) const; + size_t step(int i=-1) const; + bool isMat() const; + bool isUMat() const; + bool isMatVector() const; + bool isUMatVector() const; + bool isMatx() const; + bool isVector() const; + bool isGpuMat() const; + bool isGpuMatVector() const; + ~_InputArray(); + +protected: + int flags; + void* obj; + Size sz; + + void init(int _flags, const void* _obj); + void init(int _flags, const void* _obj, Size _sz); +}; + + +/** @brief This type is very similar to InputArray except that it is used for input/output and output function +parameters. + +Just like with InputArray, OpenCV users should not care about OutputArray, they just pass `Mat`, +`vector` etc. to the functions. The same limitation as for `InputArray`: *Do not explicitly +create OutputArray instances* applies here too. + +If you want to make your function polymorphic (i.e. accept different arrays as output parameters), +it is also not very difficult. Take the sample above as the reference. Note that +_OutputArray::create() needs to be called before _OutputArray::getMat(). This way you guarantee +that the output array is properly allocated. + +Optional output parameters. If you do not need certain output array to be computed and returned to +you, pass cv::noArray(), just like you would in the case of optional input array. At the +implementation level, use _OutputArray::needed() to check if certain output array needs to be +computed or not. + +There are several synonyms for OutputArray that are used to assist automatic Python/Java/... wrapper +generators: +@code + typedef OutputArray OutputArrayOfArrays; + typedef OutputArray InputOutputArray; + typedef OutputArray InputOutputArrayOfArrays; +@endcode + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + enum + { + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F + }; + + _OutputArray(); + _OutputArray(int _flags, void* _obj); + _OutputArray(Mat& m); + _OutputArray(std::vector& vec); + _OutputArray(cuda::GpuMat& d_mat); + _OutputArray(std::vector& d_mat); + _OutputArray(ogl::Buffer& buf); + _OutputArray(cuda::HostMem& cuda_mem); + template _OutputArray(cudev::GpuMat_<_Tp>& m); + template _OutputArray(std::vector<_Tp>& vec); + _OutputArray(std::vector& vec); + template _OutputArray(std::vector >& vec); + _OutputArray(std::vector >&); + template _OutputArray(std::vector >& vec); + template _OutputArray(Mat_<_Tp>& m); + template _OutputArray(_Tp* vec, int n); + template _OutputArray(Matx<_Tp, m, n>& matx); + _OutputArray(UMat& m); + _OutputArray(std::vector& vec); + + _OutputArray(const Mat& m); + _OutputArray(const std::vector& vec); + _OutputArray(const cuda::GpuMat& d_mat); + _OutputArray(const std::vector& d_mat); + _OutputArray(const ogl::Buffer& buf); + _OutputArray(const cuda::HostMem& cuda_mem); + template _OutputArray(const cudev::GpuMat_<_Tp>& m); + template _OutputArray(const std::vector<_Tp>& vec); + template _OutputArray(const std::vector >& vec); + template _OutputArray(const std::vector >& vec); + template _OutputArray(const Mat_<_Tp>& m); + template _OutputArray(const _Tp* vec, int n); + template _OutputArray(const Matx<_Tp, m, n>& matx); + _OutputArray(const UMat& m); + _OutputArray(const std::vector& vec); + +#ifdef CV_CXX_STD_ARRAY + template _OutputArray(std::array<_Tp, _Nm>& arr); + template _OutputArray(const std::array<_Tp, _Nm>& arr); + template _OutputArray(std::array& arr); + template _OutputArray(const std::array& arr); +#endif + + template static _OutputArray rawOut(std::vector<_Tp>& vec); +#ifdef CV_CXX_STD_ARRAY + template static _OutputArray rawOut(std::array<_Tp, _Nm>& arr); +#endif + + bool fixedSize() const; + bool fixedType() const; + bool needed() const; + Mat& getMatRef(int i=-1) const; + UMat& getUMatRef(int i=-1) const; + cuda::GpuMat& getGpuMatRef() const; + std::vector& getGpuMatVecRef() const; + ogl::Buffer& getOGlBufferRef() const; + cuda::HostMem& getHostMemRef() const; + void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + void createSameSize(const _InputArray& arr, int mtype) const; + void release() const; + void clear() const; + void setTo(const _InputArray& value, const _InputArray & mask = _InputArray()) const; + + void assign(const UMat& u) const; + void assign(const Mat& m) const; + + void assign(const std::vector& v) const; + void assign(const std::vector& v) const; + + void move(UMat& u) const; + void move(Mat& m) const; +}; + + +class CV_EXPORTS _InputOutputArray : public _OutputArray +{ +public: + _InputOutputArray(); + _InputOutputArray(int _flags, void* _obj); + _InputOutputArray(Mat& m); + _InputOutputArray(std::vector& vec); + _InputOutputArray(cuda::GpuMat& d_mat); + _InputOutputArray(ogl::Buffer& buf); + _InputOutputArray(cuda::HostMem& cuda_mem); + template _InputOutputArray(cudev::GpuMat_<_Tp>& m); + template _InputOutputArray(std::vector<_Tp>& vec); + _InputOutputArray(std::vector& vec); + template _InputOutputArray(std::vector >& vec); + template _InputOutputArray(std::vector >& vec); + template _InputOutputArray(Mat_<_Tp>& m); + template _InputOutputArray(_Tp* vec, int n); + template _InputOutputArray(Matx<_Tp, m, n>& matx); + _InputOutputArray(UMat& m); + _InputOutputArray(std::vector& vec); + + _InputOutputArray(const Mat& m); + _InputOutputArray(const std::vector& vec); + _InputOutputArray(const cuda::GpuMat& d_mat); + _InputOutputArray(const std::vector& d_mat); + _InputOutputArray(const ogl::Buffer& buf); + _InputOutputArray(const cuda::HostMem& cuda_mem); + template _InputOutputArray(const cudev::GpuMat_<_Tp>& m); + template _InputOutputArray(const std::vector<_Tp>& vec); + template _InputOutputArray(const std::vector >& vec); + template _InputOutputArray(const std::vector >& vec); + template _InputOutputArray(const Mat_<_Tp>& m); + template _InputOutputArray(const _Tp* vec, int n); + template _InputOutputArray(const Matx<_Tp, m, n>& matx); + _InputOutputArray(const UMat& m); + _InputOutputArray(const std::vector& vec); + +#ifdef CV_CXX_STD_ARRAY + template _InputOutputArray(std::array<_Tp, _Nm>& arr); + template _InputOutputArray(const std::array<_Tp, _Nm>& arr); + template _InputOutputArray(std::array& arr); + template _InputOutputArray(const std::array& arr); +#endif + + template static _InputOutputArray rawInOut(std::vector<_Tp>& vec); +#ifdef CV_CXX_STD_ARRAY + template _InputOutputArray rawInOut(std::array<_Tp, _Nm>& arr); +#endif + +}; + +/** Helper to wrap custom types. @see InputArray */ +template static inline _InputArray rawIn(_Tp& v); +/** Helper to wrap custom types. @see InputArray */ +template static inline _OutputArray rawOut(_Tp& v); +/** Helper to wrap custom types. @see InputArray */ +template static inline _InputOutputArray rawInOut(_Tp& v); + +CV__DEBUG_NS_END + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef const _InputOutputArray& InputOutputArray; +typedef InputOutputArray InputOutputArrayOfArrays; + +CV_EXPORTS InputOutputArray noArray(); + +/////////////////////////////////// MatAllocator ////////////////////////////////////// + +//! Usage flags for allocator +enum UMatUsageFlags +{ + USAGE_DEFAULT = 0, + + // buffer allocation policy is platform and usage specific + USAGE_ALLOCATE_HOST_MEMORY = 1 << 0, + USAGE_ALLOCATE_DEVICE_MEMORY = 1 << 1, + USAGE_ALLOCATE_SHARED_MEMORY = 1 << 2, // It is not equal to: USAGE_ALLOCATE_HOST_MEMORY | USAGE_ALLOCATE_DEVICE_MEMORY + + __UMAT_USAGE_FLAGS_32BIT = 0x7fffffff // Binary compatibility hint +}; + +struct CV_EXPORTS UMatData; + +/** @brief Custom array allocator +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + + // let's comment it off for now to detect and fix all the uses of allocator + //virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + // uchar*& datastart, uchar*& data, size_t* step) = 0; + //virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; + virtual UMatData* allocate(int dims, const int* sizes, int type, + void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const = 0; + virtual bool allocate(UMatData* data, int accessflags, UMatUsageFlags usageFlags) const = 0; + virtual void deallocate(UMatData* data) const = 0; + virtual void map(UMatData* data, int accessflags) const; + virtual void unmap(UMatData* data) const; + virtual void download(UMatData* data, void* dst, int dims, const size_t sz[], + const size_t srcofs[], const size_t srcstep[], + const size_t dststep[]) const; + virtual void upload(UMatData* data, const void* src, int dims, const size_t sz[], + const size_t dstofs[], const size_t dststep[], + const size_t srcstep[]) const; + virtual void copy(UMatData* srcdata, UMatData* dstdata, int dims, const size_t sz[], + const size_t srcofs[], const size_t srcstep[], + const size_t dstofs[], const size_t dststep[], bool sync) const; + + // default implementation returns DummyBufferPoolController + virtual BufferPoolController* getBufferPoolController(const char* id = NULL) const; +}; + + +//////////////////////////////// MatCommaInitializer ////////////////////////////////// + +/** @brief Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +// note that umatdata might be allocated together +// with the matrix data, not as a separate object. +// therefore, it does not have constructor or destructor; +// it should be explicitly initialized using init(). +struct CV_EXPORTS UMatData +{ + enum { COPY_ON_MAP=1, HOST_COPY_OBSOLETE=2, + DEVICE_COPY_OBSOLETE=4, TEMP_UMAT=8, TEMP_COPIED_UMAT=24, + USER_ALLOCATED=32, DEVICE_MEM_MAPPED=64, + ASYNC_CLEANUP=128 + }; + UMatData(const MatAllocator* allocator); + ~UMatData(); + + // provide atomic access to the structure + void lock(); + void unlock(); + + bool hostCopyObsolete() const; + bool deviceCopyObsolete() const; + bool deviceMemMapped() const; + bool copyOnMap() const; + bool tempUMat() const; + bool tempCopiedUMat() const; + void markHostCopyObsolete(bool flag); + void markDeviceCopyObsolete(bool flag); + void markDeviceMemMapped(bool flag); + + const MatAllocator* prevAllocator; + const MatAllocator* currAllocator; + int urefcount; + int refcount; + uchar* data; + uchar* origdata; + size_t size; + + int flags; + void* handle; + void* userdata; + int allocatorFlags_; + int mapcount; + UMatData* originalUMatData; +}; + + +struct CV_EXPORTS MatSize +{ + explicit MatSize(int* _p) CV_NOEXCEPT; + int dims() const CV_NOEXCEPT; + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const CV_NOEXCEPT; // TODO OpenCV 4.0: drop this + bool operator == (const MatSize& sz) const CV_NOEXCEPT; + bool operator != (const MatSize& sz) const CV_NOEXCEPT; + + int* p; +}; + +struct CV_EXPORTS MatStep +{ + MatStep() CV_NOEXCEPT; + explicit MatStep(size_t s) CV_NOEXCEPT; + const size_t& operator[](int i) const CV_NOEXCEPT; + size_t& operator[](int i) CV_NOEXCEPT; + operator size_t() const; + MatStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; +protected: + MatStep& operator = (const MatStep&); +}; + +/** @example samples/cpp/cout_mat.cpp +An example demonstrating the serial out capabilities of cv::Mat +*/ + + /** @brief n-dimensional dense array class \anchor CVMat_Details + +The class Mat represents an n-dimensional dense numerical single-channel or multi-channel array. It +can be used to store real or complex-valued vectors and matrices, grayscale or color images, voxel +volumes, vector fields, point clouds, tensors, histograms (though, very high-dimensional histograms +may be better stored in a SparseMat ). The data layout of the array `M` is defined by the array +`M.step[]`, so that the address of element \f$(i_0,...,i_{M.dims-1})\f$, where \f$0\leq i_k= M.step[i+1]` (in fact, `M.step[i] >= M.step[i+1]*M.size[i+1]` ). This means +that 2-dimensional matrices are stored row-by-row, 3-dimensional matrices are stored plane-by-plane, +and so on. M.step[M.dims-1] is minimal and always equal to the element size M.elemSize() . + +So, the data layout in Mat is fully compatible with CvMat, IplImage, and CvMatND types from OpenCV +1.x. It is also compatible with the majority of dense array types from the standard toolkits and +SDKs, such as Numpy (ndarray), Win32 (independent device bitmaps), and others, that is, with any +array that uses *steps* (or *strides*) to compute the position of a pixel. Due to this +compatibility, it is possible to make a Mat header for user-allocated data and process it in-place +using OpenCV functions. + +There are many different ways to create a Mat object. The most popular options are listed below: + +- Use the create(nrows, ncols, type) method or the similar Mat(nrows, ncols, type[, fillValue]) +constructor. A new array of the specified size and type is allocated. type has the same meaning as +in the cvCreateMat method. For example, CV_8UC1 means a 8-bit single-channel array, CV_32FC2 +means a 2-channel (complex) floating-point array, and so on. +@code + // make a 7x7 complex matrix filled with 1+3j. + Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to a 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); +@endcode +As noted in the introduction to this chapter, create() allocates only a new array when the shape +or type of the current array are different from the specified ones. + +- Create a multi-dimensional array: +@code + // create a 100x100x100 8-bit array + int sz[] = {100, 100, 100}; + Mat bigCube(3, sz, CV_8U, Scalar::all(0)); +@endcode +It passes the number of dimensions =1 to the Mat constructor but the created array will be +2-dimensional with the number of columns set to 1. So, Mat::dims is always \>= 2 (can also be 0 +when the array is empty). + +- Use a copy constructor or assignment operator where there can be an array or expression on the +right side (see below). As noted in the introduction, the array assignment is an O(1) operation +because it only copies the header and increases the reference counter. The Mat::clone() method can +be used to get a full (deep) copy of the array when you need it. + +- Construct a header for a part of another array. It can be a single row, single column, several +rows, several columns, rectangular region in the array (called a *minor* in algebra) or a +diagonal. Such operations are also O(1) because the new header references the same data. You can +actually modify a part of the array using this feature, for example: +@code + // add the 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + // now copy the 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + // create a new 320x240 image + Mat img(Size(320,240),CV_8UC3); + // select a ROI + Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); +@endcode +Due to the additional datastart and dataend members, it is possible to compute a relative +sub-array position in the main *container* array using locateROI(): +@code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C \~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) +@endcode +As in case of whole matrices, if you need a deep copy, use the `clone()` method of the extracted +sub-matrices. + +- Make a header for user-allocated data. It can be useful to do the following: + -# Process "foreign" data using OpenCV (for example, when you implement a DirectShow\* filter or + a processing module for gstreamer, and so on). For example: + @code + Mat process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + // wrap input buffer + Mat img(height, width, CV_8UC3, (unsigned char*)pixels, step); + + Mat result; + GaussianBlur(img, result, Size(7, 7), 1.5, 1.5); + + return result; + } + @endcode + -# Quickly initialize small matrices and/or get a super-fast element access. + @code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + Mat M = Mat(3, 3, CV_64F, m).inv(); + @endcode + . + Partial yet very common cases of this *user-allocated data* case are conversions from CvMat and + IplImage to Mat. For this purpose, there is function cv::cvarrToMat taking pointers to CvMat or + IplImage and the optional flag indicating whether to copy the data or not. + @snippet samples/cpp/image.cpp iplimage + +- Use MATLAB-style array initializers, zeros(), ones(), eye(), for example: +@code + // create a double-precision identity matrix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); +@endcode + +- Use a comma-separated initializer: +@code + // create a 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); +@endcode +With this approach, you first call a constructor of the Mat class with the proper parameters, and +then you just put `<< operator` followed by comma-separated values that can be constants, +variables, expressions, and so on. Also, note the extra parentheses required to avoid compilation +errors. + +Once the array is created, it is automatically managed via a reference-counting mechanism. If the +array header is built on top of user-allocated data, you should handle the data by yourself. The +array data is deallocated when no one points to it. If you want to release the data pointed by a +array header before the array destructor is called, use Mat::release(). + +The next important thing to learn about the array class is element access. This manual already +described how to compute an address of each array element. Normally, you are not required to use the +formula directly in the code. If you know the array element type (which can be retrieved using the +method Mat::type() ), you can access the element \f$M_{ij}\f$ of a 2-dimensional array as: +@code + M.at(i,j) += 1.f; +@endcode +assuming that `M` is a double-precision floating-point array. There are several variants of the method +at for a different number of dimensions. + +If you need to process a whole row of a 2D array, the most efficient way is to get the pointer to +the row first, and then just use the plain C operator [] : +@code + // compute sum of positive matrix elements + // (assuming that M is a double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } +@endcode +Some operations, like the one above, do not actually depend on the array shape. They just process +elements of an array one by one (or elements from multiple arrays that have the same coordinates, +for example, array addition). Such operations are called *element-wise*. It makes sense to check +whether all the input/output arrays are continuous, namely, have no gaps at the end of each row. If +yes, process them as a long single row: +@code + // compute the sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } +@endcode +In case of the continuous matrix, the outer loop body is executed just once. So, the overhead is +smaller, which is especially noticeable in case of small matrices. + +Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: +@code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); +@endcode +The matrix iterators are random-access iterators, so they can be passed to any STL algorithm, +including std::sort(). + +@note Matrix Expressions and arithmetic see MatExpr +*/ +class CV_EXPORTS Mat +{ +public: + /** + These are various constructors that form a matrix. As noted in the AutomaticAllocation, often + the default constructor is enough, and the proper matrix will be allocated by an OpenCV function. + The constructed matrix can further be assigned to another matrix or matrix expression or can be + allocated with Mat::create . In the former case, the old content is de-referenced. + */ + Mat() CV_NOEXCEPT; + + /** @overload + @param rows Number of rows in a 2D array. + @param cols Number of columns in a 2D array. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + */ + Mat(int rows, int cols, int type); + + /** @overload + @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the + number of columns go in the reverse order. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + */ + Mat(Size size, int type); + + /** @overload + @param rows Number of rows in a 2D array. + @param cols Number of columns in a 2D array. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param s An optional value to initialize each matrix element with. To set all the matrix elements to + the particular value after the construction, use the assignment operator + Mat::operator=(const Scalar& value) . + */ + Mat(int rows, int cols, int type, const Scalar& s); + + /** @overload + @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the + number of columns go in the reverse order. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param s An optional value to initialize each matrix element with. To set all the matrix elements to + the particular value after the construction, use the assignment operator + Mat::operator=(const Scalar& value) . + */ + Mat(Size size, int type, const Scalar& s); + + /** @overload + @param ndims Array dimensionality. + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + */ + Mat(int ndims, const int* sizes, int type); + + /** @overload + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + */ + Mat(const std::vector& sizes, int type); + + /** @overload + @param ndims Array dimensionality. + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param s An optional value to initialize each matrix element with. To set all the matrix elements to + the particular value after the construction, use the assignment operator + Mat::operator=(const Scalar& value) . + */ + Mat(int ndims, const int* sizes, int type, const Scalar& s); + + /** @overload + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param s An optional value to initialize each matrix element with. To set all the matrix elements to + the particular value after the construction, use the assignment operator + Mat::operator=(const Scalar& value) . + */ + Mat(const std::vector& sizes, int type, const Scalar& s); + + + /** @overload + @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied + by these constructors. Instead, the header pointing to m data or its sub-array is constructed and + associated with it. The reference counter, if any, is incremented. So, when you modify the matrix + formed using such a constructor, you also modify the corresponding elements of m . If you want to + have an independent copy of the sub-array, use Mat::clone() . + */ + Mat(const Mat& m); + + /** @overload + @param rows Number of rows in a 2D array. + @param cols Number of columns in a 2D array. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param data Pointer to the user data. Matrix constructors that take data and step parameters do not + allocate matrix data. Instead, they just initialize the matrix header that points to the specified + data, which means that no data is copied. This operation is very efficient and can be used to + process external data using OpenCV functions. The external data is not automatically deallocated, so + you should take care of it. + @param step Number of bytes each matrix row occupies. The value should include the padding bytes at + the end of each row, if any. If the parameter is missing (set to AUTO_STEP ), no padding is assumed + and the actual step is calculated as cols*elemSize(). See Mat::elemSize. + */ + Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP); + + /** @overload + @param size 2D array size: Size(cols, rows) . In the Size() constructor, the number of rows and the + number of columns go in the reverse order. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param data Pointer to the user data. Matrix constructors that take data and step parameters do not + allocate matrix data. Instead, they just initialize the matrix header that points to the specified + data, which means that no data is copied. This operation is very efficient and can be used to + process external data using OpenCV functions. The external data is not automatically deallocated, so + you should take care of it. + @param step Number of bytes each matrix row occupies. The value should include the padding bytes at + the end of each row, if any. If the parameter is missing (set to AUTO_STEP ), no padding is assumed + and the actual step is calculated as cols*elemSize(). See Mat::elemSize. + */ + Mat(Size size, int type, void* data, size_t step=AUTO_STEP); + + /** @overload + @param ndims Array dimensionality. + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param data Pointer to the user data. Matrix constructors that take data and step parameters do not + allocate matrix data. Instead, they just initialize the matrix header that points to the specified + data, which means that no data is copied. This operation is very efficient and can be used to + process external data using OpenCV functions. The external data is not automatically deallocated, so + you should take care of it. + @param steps Array of ndims-1 steps in case of a multi-dimensional array (the last step is always + set to the element size). If not specified, the matrix is assumed to be continuous. + */ + Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0); + + /** @overload + @param sizes Array of integers specifying an n-dimensional array shape. + @param type Array type. Use CV_8UC1, ..., CV_64FC4 to create 1-4 channel matrices, or + CV_8UC(n), ..., CV_64FC(n) to create multi-channel (up to CV_CN_MAX channels) matrices. + @param data Pointer to the user data. Matrix constructors that take data and step parameters do not + allocate matrix data. Instead, they just initialize the matrix header that points to the specified + data, which means that no data is copied. This operation is very efficient and can be used to + process external data using OpenCV functions. The external data is not automatically deallocated, so + you should take care of it. + @param steps Array of ndims-1 steps in case of a multi-dimensional array (the last step is always + set to the element size). If not specified, the matrix is assumed to be continuous. + */ + Mat(const std::vector& sizes, int type, void* data, const size_t* steps=0); + + /** @overload + @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied + by these constructors. Instead, the header pointing to m data or its sub-array is constructed and + associated with it. The reference counter, if any, is incremented. So, when you modify the matrix + formed using such a constructor, you also modify the corresponding elements of m . If you want to + have an independent copy of the sub-array, use Mat::clone() . + @param rowRange Range of the m rows to take. As usual, the range start is inclusive and the range + end is exclusive. Use Range::all() to take all the rows. + @param colRange Range of the m columns to take. Use Range::all() to take all the columns. + */ + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + + /** @overload + @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied + by these constructors. Instead, the header pointing to m data or its sub-array is constructed and + associated with it. The reference counter, if any, is incremented. So, when you modify the matrix + formed using such a constructor, you also modify the corresponding elements of m . If you want to + have an independent copy of the sub-array, use Mat::clone() . + @param roi Region of interest. + */ + Mat(const Mat& m, const Rect& roi); + + /** @overload + @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied + by these constructors. Instead, the header pointing to m data or its sub-array is constructed and + associated with it. The reference counter, if any, is incremented. So, when you modify the matrix + formed using such a constructor, you also modify the corresponding elements of m . If you want to + have an independent copy of the sub-array, use Mat::clone() . + @param ranges Array of selected ranges of m along each dimensionality. + */ + Mat(const Mat& m, const Range* ranges); + + /** @overload + @param m Array that (as a whole or partly) is assigned to the constructed matrix. No data is copied + by these constructors. Instead, the header pointing to m data or its sub-array is constructed and + associated with it. The reference counter, if any, is incremented. So, when you modify the matrix + formed using such a constructor, you also modify the corresponding elements of m . If you want to + have an independent copy of the sub-array, use Mat::clone() . + @param ranges Array of selected ranges of m along each dimensionality. + */ + Mat(const Mat& m, const std::vector& ranges); + + /** @overload + @param vec STL vector whose elements form the matrix. The matrix has a single column and the number + of rows equal to the number of vector elements. Type of the matrix matches the type of vector + elements. The constructor can handle arbitrary types, for which there is a properly declared + DataType . This means that the vector elements must be primitive numbers or uni-type numerical + tuples of numbers. Mixed-type structures are not supported. The corresponding constructor is + explicit. Since STL vectors are not automatically converted to Mat instances, you should write + Mat(vec) explicitly. Unless you copy the data into the matrix ( copyData=true ), no new elements + will be added to the vector because it can potentially yield vector data reallocation, and, thus, + the matrix data pointer will be invalid. + @param copyData Flag to specify whether the underlying data of the STL vector should be copied + to (true) or shared with (false) the newly constructed matrix. When the data is copied, the + allocated buffer is managed using Mat reference counting mechanism. While the data is shared, + the reference counter is NULL, and you should not deallocate the data until the matrix is not + destructed. + */ + template explicit Mat(const std::vector<_Tp>& vec, bool copyData=false); + +#ifdef CV_CXX11 + /** @overload + */ + template::value>::type> + explicit Mat(const std::initializer_list<_Tp> list); + + /** @overload + */ + template explicit Mat(const std::initializer_list sizes, const std::initializer_list<_Tp> list); +#endif + +#ifdef CV_CXX_STD_ARRAY + /** @overload + */ + template explicit Mat(const std::array<_Tp, _Nm>& arr, bool copyData=false); +#endif + + /** @overload + */ + template explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true); + + /** @overload + */ + template explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true); + + /** @overload + */ + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + + /** @overload + */ + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + + /** @overload + */ + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + + //! download data from GpuMat + explicit Mat(const cuda::GpuMat& m); + + //! destructor - calls release() + ~Mat(); + + /** @brief assignment operators + + These are available assignment operators. Since they all are very different, make sure to read the + operator parameters description. + @param m Assigned, right-hand-side matrix. Matrix assignment is an O(1) operation. This means that + no data is copied but the data is shared and the reference counter, if any, is incremented. Before + assigning new data, the old data is de-referenced via Mat::release . + */ + Mat& operator = (const Mat& m); + + /** @overload + @param expr Assigned matrix expression object. As opposite to the first form of the assignment + operation, the second form can reuse already allocated matrix if it has the right size and type to + fit the matrix expression result. It is automatically handled by the real function that the matrix + expressions is expanded to. For example, C=A+B is expanded to add(A, B, C), and add takes care of + automatic C reallocation. + */ + Mat& operator = (const MatExpr& expr); + + //! retrieve UMat from Mat + UMat getUMat(int accessFlags, UMatUsageFlags usageFlags = USAGE_DEFAULT) const; + + /** @brief Creates a matrix header for the specified matrix row. + + The method makes a new header for the specified matrix row and returns it. This is an O(1) + operation, regardless of the matrix size. The underlying data of the new matrix is shared with the + original matrix. Here is the example of one of the classical basic matrix processing operations, + axpy, used by LU and many other algorithms: + @code + inline void matrix_axpy(Mat& A, int i, int j, double alpha) + { + A.row(i) += A.row(j)*alpha; + } + @endcode + @note In the current implementation, the following code does not work as expected: + @code + Mat A; + ... + A.row(i) = A.row(j); // will not work + @endcode + This happens because A.row(i) forms a temporary header that is further assigned to another header. + Remember that each of these operations is O(1), that is, no data is copied. Thus, the above + assignment is not true if you may have expected the j-th row to be copied to the i-th row. To + achieve that, you should either turn this simple assignment into an expression or use the + Mat::copyTo method: + @code + Mat A; + ... + // works, but looks a bit obscure. + A.row(i) = A.row(j) + 0; + // this is a bit longer, but the recommended method. + A.row(j).copyTo(A.row(i)); + @endcode + @param y A 0-based row index. + */ + Mat row(int y) const; + + /** @brief Creates a matrix header for the specified matrix column. + + The method makes a new header for the specified matrix column and returns it. This is an O(1) + operation, regardless of the matrix size. The underlying data of the new matrix is shared with the + original matrix. See also the Mat::row description. + @param x A 0-based column index. + */ + Mat col(int x) const; + + /** @brief Creates a matrix header for the specified row span. + + The method makes a new header for the specified row span of the matrix. Similarly to Mat::row and + Mat::col , this is an O(1) operation. + @param startrow An inclusive 0-based start index of the row span. + @param endrow An exclusive 0-based ending index of the row span. + */ + Mat rowRange(int startrow, int endrow) const; + + /** @overload + @param r Range structure containing both the start and the end indices. + */ + Mat rowRange(const Range& r) const; + + /** @brief Creates a matrix header for the specified column span. + + The method makes a new header for the specified column span of the matrix. Similarly to Mat::row and + Mat::col , this is an O(1) operation. + @param startcol An inclusive 0-based start index of the column span. + @param endcol An exclusive 0-based ending index of the column span. + */ + Mat colRange(int startcol, int endcol) const; + + /** @overload + @param r Range structure containing both the start and the end indices. + */ + Mat colRange(const Range& r) const; + + /** @brief Extracts a diagonal from a matrix + + The method makes a new header for the specified matrix diagonal. The new matrix is represented as a + single-column matrix. Similarly to Mat::row and Mat::col, this is an O(1) operation. + @param d index of the diagonal, with the following values: + - `d=0` is the main diagonal. + - `d<0` is a diagonal from the lower half. For example, d=-1 means the diagonal is set + immediately below the main one. + - `d>0` is a diagonal from the upper half. For example, d=1 means the diagonal is set + immediately above the main one. + For example: + @code + Mat m = (Mat_(3,3) << + 1,2,3, + 4,5,6, + 7,8,9); + Mat d0 = m.diag(0); + Mat d1 = m.diag(1); + Mat d_1 = m.diag(-1); + @endcode + The resulting matrices are + @code + d0 = + [1; + 5; + 9] + d1 = + [2; + 6] + d_1 = + [4; + 8] + @endcode + */ + Mat diag(int d=0) const; + + /** @brief creates a diagonal matrix + + The method creates a square diagonal matrix from specified main diagonal. + @param d One-dimensional matrix that represents the main diagonal. + */ + CV_NODISCARD_STD static Mat diag(const Mat& d); + + /** @brief Creates a full copy of the array and the underlying data. + + The method creates a full copy of the array. The original step[] is not taken into account. So, the + array copy is a continuous array occupying total()*elemSize() bytes. + */ + CV_NODISCARD_STD Mat clone() const; + + /** @brief Copies the matrix to another one. + + The method copies the matrix data to another matrix. Before copying the data, the method invokes : + @code + m.create(this->size(), this->type()); + @endcode + so that the destination matrix is reallocated if needed. While m.copyTo(m); works flawlessly, the + function does not handle the case of a partial overlap between the source and the destination + matrices. + + When the operation mask is specified, if the Mat::create call shown above reallocates the matrix, + the newly allocated matrix is initialized with all zeros before copying the data. + @param m Destination matrix. If it does not have a proper size or type before the operation, it is + reallocated. + */ + void copyTo( OutputArray m ) const; + + /** @overload + @param m Destination matrix. If it does not have a proper size or type before the operation, it is + reallocated. + @param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix + elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels. + */ + void copyTo( OutputArray m, InputArray mask ) const; + + /** @brief Converts an array to another data type with optional scaling. + + The method converts source pixel values to the target data type. saturate_cast\<\> is applied at + the end to avoid possible overflows: + + \f[m(x,y) = saturate \_ cast( \alpha (*this)(x,y) + \beta )\f] + @param m output matrix; if it does not have a proper size or type before the operation, it is + reallocated. + @param rtype desired output matrix type or, rather, the depth since the number of channels are the + same as the input has; if rtype is negative, the output matrix will have the same type as the input. + @param alpha optional scale factor. + @param beta optional delta added to the scaled values. + */ + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + /** @brief Provides a functional form of convertTo. + + This is an internally used method called by the @ref MatrixExpressions engine. + @param m Destination array. + @param type Desired destination array depth (or -1 if it should be the same as the source type). + */ + void assignTo( Mat& m, int type=-1 ) const; + + /** @brief Sets all or some of the array elements to the specified value. + @param s Assigned scalar converted to the actual array type. + */ + Mat& operator = (const Scalar& s); + + /** @brief Sets all or some of the array elements to the specified value. + + This is an advanced variant of the Mat::operator=(const Scalar& s) operator. + @param value Assigned scalar converted to the actual array type. + @param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix + elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels + */ + Mat& setTo(InputArray value, InputArray mask=noArray()); + + /** @brief Changes the shape and/or the number of channels of a 2D matrix without copying the data. + + The method makes a new matrix header for \*this elements. The new matrix may have a different size + and/or different number of channels. Any combination is possible if: + - No extra elements are included into the new matrix and no elements are excluded. Consequently, + the product rows\*cols\*channels() must stay the same after the transformation. + - No data is copied. That is, this is an O(1) operation. Consequently, if you change the number of + rows, or the operation changes the indices of elements row in some other way, the matrix must be + continuous. See Mat::isContinuous . + + For example, if there is a set of 3D points stored as an STL vector, and you want to represent the + points as a 3xN matrix, do the following: + @code + std::vector vec; + ... + Mat pointMat = Mat(vec). // convert vector to Mat, O(1) operation + reshape(1). // make Nx3 1-channel matrix out of Nx1 3-channel. + // Also, an O(1) operation + t(); // finally, transpose the Nx3 matrix. + // This involves copying all the elements + @endcode + @param cn New number of channels. If the parameter is 0, the number of channels remains the same. + @param rows New number of rows. If the parameter is 0, the number of rows remains the same. + */ + Mat reshape(int cn, int rows=0) const; + + /** @overload */ + Mat reshape(int cn, int newndims, const int* newsz) const; + + /** @overload */ + Mat reshape(int cn, const std::vector& newshape) const; + + /** @brief Transposes a matrix. + + The method performs matrix transposition by means of matrix expressions. It does not perform the + actual transposition but returns a temporary matrix transposition object that can be further used as + a part of more complex matrix expressions or can be assigned to a matrix: + @code + Mat A1 = A + Mat::eye(A.size(), A.type())*lambda; + Mat C = A1.t()*A1; // compute (A + lambda*I)^t * (A + lamda*I) + @endcode + */ + MatExpr t() const; + + /** @brief Inverses a matrix. + + The method performs a matrix inversion by means of matrix expressions. This means that a temporary + matrix inversion object is returned by the method and can be used further as a part of more complex + matrix expressions or can be assigned to a matrix. + @param method Matrix inversion method. One of cv::DecompTypes + */ + MatExpr inv(int method=DECOMP_LU) const; + + /** @brief Performs an element-wise multiplication or division of the two matrices. + + The method returns a temporary object encoding per-element array multiplication, with optional + scale. Note that this is not a matrix multiplication that corresponds to a simpler "\*" operator. + + Example: + @code + Mat C = A.mul(5/B); // equivalent to divide(A, B, C, 5) + @endcode + @param m Another array of the same type and the same size as \*this, or a matrix expression. + @param scale Optional scale factor. + */ + MatExpr mul(InputArray m, double scale=1) const; + + /** @brief Computes a cross-product of two 3-element vectors. + + The method computes a cross-product of two 3-element vectors. The vectors must be 3-element + floating-point vectors of the same shape and size. The result is another 3-element vector of the + same shape and type as operands. + @param m Another cross-product operand. + */ + Mat cross(InputArray m) const; + + /** @brief Computes a dot-product of two vectors. + + The method computes a dot-product of two matrices. If the matrices are not single-column or + single-row vectors, the top-to-bottom left-to-right scan ordering is used to treat them as 1D + vectors. The vectors must have the same size and type. If the matrices have more than one channel, + the dot products from all the channels are summed together. + @param m another dot-product operand. + */ + double dot(InputArray m) const; + + /** @brief Returns a zero array of the specified size and type. + + The method returns a Matlab-style zero array initializer. It can be used to quickly form a constant + array as a function parameter, part of a matrix expression, or as a matrix initializer: + @code + Mat A; + A = Mat::zeros(3, 3, CV_32F); + @endcode + In the example above, a new matrix is allocated only if A is not a 3x3 floating-point matrix. + Otherwise, the existing matrix A is filled with zeros. + @param rows Number of rows. + @param cols Number of columns. + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr zeros(int rows, int cols, int type); + + /** @overload + @param size Alternative to the matrix size specification Size(cols, rows) . + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr zeros(Size size, int type); + + /** @overload + @param ndims Array dimensionality. + @param sz Array of integers specifying the array shape. + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr zeros(int ndims, const int* sz, int type); + + /** @brief Returns an array of all 1's of the specified size and type. + + The method returns a Matlab-style 1's array initializer, similarly to Mat::zeros. Note that using + this method you can initialize an array with an arbitrary value, using the following Matlab idiom: + @code + Mat A = Mat::ones(100, 100, CV_8U)*3; // make 100x100 matrix filled with 3. + @endcode + The above operation does not form a 100x100 matrix of 1's and then multiply it by 3. Instead, it + just remembers the scale factor (3 in this case) and use it when actually invoking the matrix + initializer. + @note In case of multi-channels type, only the first channel will be initialized with 1's, the + others will be set to 0's. + @param rows Number of rows. + @param cols Number of columns. + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr ones(int rows, int cols, int type); + + /** @overload + @param size Alternative to the matrix size specification Size(cols, rows) . + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr ones(Size size, int type); + + /** @overload + @param ndims Array dimensionality. + @param sz Array of integers specifying the array shape. + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr ones(int ndims, const int* sz, int type); + + /** @brief Returns an identity matrix of the specified size and type. + + The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to + Mat::ones, you can use a scale operation to create a scaled identity matrix efficiently: + @code + // make a 4x4 diagonal matrix with 0.1's on the diagonal. + Mat A = Mat::eye(4, 4, CV_32F)*0.1; + @endcode + @note In case of multi-channels type, identity matrix will be initialized only for the first channel, + the others will be set to 0's + @param rows Number of rows. + @param cols Number of columns. + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr eye(int rows, int cols, int type); + + /** @overload + @param size Alternative matrix size specification as Size(cols, rows) . + @param type Created matrix type. + */ + CV_NODISCARD_STD static MatExpr eye(Size size, int type); + + /** @brief Allocates new array data if needed. + + This is one of the key Mat methods. Most new-style OpenCV functions and methods that produce arrays + call this method for each output array. The method uses the following algorithm: + + -# If the current array shape and the type match the new ones, return immediately. Otherwise, + de-reference the previous data by calling Mat::release. + -# Initialize the new header. + -# Allocate the new data of total()\*elemSize() bytes. + -# Allocate the new, associated with the data, reference counter and set it to 1. + + Such a scheme makes the memory management robust and efficient at the same time and helps avoid + extra typing for you. This means that usually there is no need to explicitly allocate output arrays. + That is, instead of writing: + @code + Mat color; + ... + Mat gray(color.rows, color.cols, color.depth()); + cvtColor(color, gray, COLOR_BGR2GRAY); + @endcode + you can simply write: + @code + Mat color; + ... + Mat gray; + cvtColor(color, gray, COLOR_BGR2GRAY); + @endcode + because cvtColor, as well as the most of OpenCV functions, calls Mat::create() for the output array + internally. + @param rows New number of rows. + @param cols New number of columns. + @param type New matrix type. + */ + void create(int rows, int cols, int type); + + /** @overload + @param size Alternative new matrix size specification: Size(cols, rows) + @param type New matrix type. + */ + void create(Size size, int type); + + /** @overload + @param ndims New array dimensionality. + @param sizes Array of integers specifying a new array shape. + @param type New matrix type. + */ + void create(int ndims, const int* sizes, int type); + + /** @overload + @param sizes Array of integers specifying a new array shape. + @param type New matrix type. + */ + void create(const std::vector& sizes, int type); + + /** @brief Increments the reference counter. + + The method increments the reference counter associated with the matrix data. If the matrix header + points to an external data set (see Mat::Mat ), the reference counter is NULL, and the method has no + effect in this case. Normally, to avoid memory leaks, the method should not be called explicitly. It + is called implicitly by the matrix assignment operator. The reference counter increment is an atomic + operation on the platforms that support it. Thus, it is safe to operate on the same matrices + asynchronously in different threads. + */ + void addref(); + + /** @brief Decrements the reference counter and deallocates the matrix if needed. + + The method decrements the reference counter associated with the matrix data. When the reference + counter reaches 0, the matrix data is deallocated and the data and the reference counter pointers + are set to NULL's. If the matrix header points to an external data set (see Mat::Mat ), the + reference counter is NULL, and the method has no effect in this case. + + This method can be called manually to force the matrix data deallocation. But since this method is + automatically called in the destructor, or by any other method that changes the data pointer, it is + usually not needed. The reference counter decrement and check for 0 is an atomic operation on the + platforms that support it. Thus, it is safe to operate on the same matrices asynchronously in + different threads. + */ + void release(); + + //! internal use function, consider to use 'release' method instead; deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + /** @brief Reserves space for the certain number of rows. + + The method reserves space for sz rows. If the matrix already has enough space to store sz rows, + nothing happens. If the matrix is reallocated, the first Mat::rows rows are preserved. The method + emulates the corresponding method of the STL vector class. + @param sz Number of rows. + */ + void reserve(size_t sz); + + /** @brief Reserves space for the certain number of bytes. + + The method reserves space for sz bytes. If the matrix already has enough space to store sz bytes, + nothing happens. If matrix has to be reallocated its previous content could be lost. + @param sz Number of bytes. + */ + void reserveBuffer(size_t sz); + + /** @brief Changes the number of matrix rows. + + The methods change the number of matrix rows. If the matrix is reallocated, the first + min(Mat::rows, sz) rows are preserved. The methods emulate the corresponding methods of the STL + vector class. + @param sz New number of rows. + */ + void resize(size_t sz); + + /** @overload + @param sz New number of rows. + @param s Value assigned to the newly added elements. + */ + void resize(size_t sz, const Scalar& s); + + //! internal function + void push_back_(const void* elem); + + /** @brief Adds elements to the bottom of the matrix. + + The methods add one or more elements to the bottom of the matrix. They emulate the corresponding + method of the STL vector class. When elem is Mat , its type and the number of columns must be the + same as in the container matrix. + @param elem Added element(s). + */ + template void push_back(const _Tp& elem); + + /** @overload + @param elem Added element(s). + */ + template void push_back(const Mat_<_Tp>& elem); + + /** @overload + @param elem Added element(s). + */ + template void push_back(const std::vector<_Tp>& elem); + + /** @overload + @param m Added line(s). + */ + void push_back(const Mat& m); + + /** @brief Removes elements from the bottom of the matrix. + + The method removes one or more rows from the bottom of the matrix. + @param nelems Number of removed rows. If it is greater than the total number of rows, an exception + is thrown. + */ + void pop_back(size_t nelems=1); + + /** @brief Locates the matrix header within a parent matrix. + + After you extracted a submatrix from a matrix using Mat::row, Mat::col, Mat::rowRange, + Mat::colRange, and others, the resultant submatrix points just to the part of the original big + matrix. However, each submatrix contains information (represented by datastart and dataend + fields) that helps reconstruct the original matrix size and the position of the extracted + submatrix within the original matrix. The method locateROI does exactly that. + @param wholeSize Output parameter that contains the size of the whole matrix containing *this* + as a part. + @param ofs Output parameter that contains an offset of *this* inside the whole matrix. + */ + void locateROI( Size& wholeSize, Point& ofs ) const; + + /** @brief Adjusts a submatrix size and position within the parent matrix. + + The method is complimentary to Mat::locateROI . The typical use of these functions is to determine + the submatrix position within the parent matrix and then shift the position somehow. Typically, it + can be required for filtering operations when pixels outside of the ROI should be taken into + account. When all the method parameters are positive, the ROI needs to grow in all directions by the + specified amount, for example: + @code + A.adjustROI(2, 2, 2, 2); + @endcode + In this example, the matrix size is increased by 4 elements in each direction. The matrix is shifted + by 2 elements to the left and 2 elements up, which brings in all the necessary pixels for the + filtering with the 5x5 kernel. + + adjustROI forces the adjusted ROI to be inside of the parent matrix that is boundaries of the + adjusted ROI are constrained by boundaries of the parent matrix. For example, if the submatrix A is + located in the first row of a parent matrix and you called A.adjustROI(2, 2, 2, 2) then A will not + be increased in the upward direction. + + The function is used internally by the OpenCV filtering functions, like filter2D , morphological + operations, and so on. + @param dtop Shift of the top submatrix boundary upwards. + @param dbottom Shift of the bottom submatrix boundary downwards. + @param dleft Shift of the left submatrix boundary to the left. + @param dright Shift of the right submatrix boundary to the right. + @sa copyMakeBorder + */ + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + + /** @brief Extracts a rectangular submatrix. + + The operators make a new header for the specified sub-array of \*this . They are the most + generalized forms of Mat::row, Mat::col, Mat::rowRange, and Mat::colRange . For example, + `A(Range(0, 10), Range::all())` is equivalent to `A.rowRange(0, 10)`. Similarly to all of the above, + the operators are O(1) operations, that is, no matrix data is copied. + @param rowRange Start and end row of the extracted submatrix. The upper boundary is not included. To + select all the rows, use Range::all(). + @param colRange Start and end column of the extracted submatrix. The upper boundary is not included. + To select all the columns, use Range::all(). + */ + Mat operator()( Range rowRange, Range colRange ) const; + + /** @overload + @param roi Extracted submatrix specified as a rectangle. + */ + Mat operator()( const Rect& roi ) const; + + /** @overload + @param ranges Array of selected ranges along each array dimension. + */ + Mat operator()( const Range* ranges ) const; + + /** @overload + @param ranges Array of selected ranges along each array dimension. + */ + Mat operator()(const std::vector& ranges) const; + + // //! converts header to CvMat; no data is copied + // operator CvMat() const; + // //! converts header to CvMatND; no data is copied + // operator CvMatND() const; + // //! converts header to IplImage; no data is copied + // operator IplImage() const; + + template operator std::vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + +#ifdef CV_CXX_STD_ARRAY + template operator std::array<_Tp, _Nm>() const; +#endif + + /** @brief Reports whether the matrix is continuous or not. + + The method returns true if the matrix elements are stored continuously without gaps at the end of + each row. Otherwise, it returns false. Obviously, 1x1 or 1xN matrices are always continuous. + Matrices created with Mat::create are always continuous. But if you extract a part of the matrix + using Mat::col, Mat::diag, and so on, or constructed a matrix header for externally allocated data, + such matrices may no longer have this property. + + The continuity flag is stored as a bit in the Mat::flags field and is computed automatically when + you construct a matrix header. Thus, the continuity check is a very fast operation, though + theoretically it could be done as follows: + @code + // alternative implementation of Mat::isContinuous() + bool myCheckMatContinuity(const Mat& m) + { + //return (m.flags & Mat::CONTINUOUS_FLAG) != 0; + return m.rows == 1 || m.step == m.cols*m.elemSize(); + } + @endcode + The method is used in quite a few of OpenCV functions. The point is that element-wise operations + (such as arithmetic and logical operations, math functions, alpha blending, color space + transformations, and others) do not depend on the image geometry. Thus, if all the input and output + arrays are continuous, the functions can process them as very long single-row vectors. The example + below illustrates how an alpha-blending function can be implemented: + @code + template + void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst) + { + const float alpha_scale = (float)std::numeric_limits::max(), + inv_scale = 1.f/alpha_scale; + + CV_Assert( src1.type() == src2.type() && + src1.type() == CV_MAKETYPE(traits::Depth::value, 4) && + src1.size() == src2.size()); + Size size = src1.size(); + dst.create(size, src1.type()); + + // here is the idiom: check the arrays for continuity and, + // if this is the case, + // treat the arrays as 1D vectors + if( src1.isContinuous() && src2.isContinuous() && dst.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + size.width *= 4; + + for( int i = 0; i < size.height; i++ ) + { + // when the arrays are continuous, + // the outer loop is executed only once + const T* ptr1 = src1.ptr(i); + const T* ptr2 = src2.ptr(i); + T* dptr = dst.ptr(i); + + for( int j = 0; j < size.width; j += 4 ) + { + float alpha = ptr1[j+3]*inv_scale, beta = ptr2[j+3]*inv_scale; + dptr[j] = saturate_cast(ptr1[j]*alpha + ptr2[j]*beta); + dptr[j+1] = saturate_cast(ptr1[j+1]*alpha + ptr2[j+1]*beta); + dptr[j+2] = saturate_cast(ptr1[j+2]*alpha + ptr2[j+2]*beta); + dptr[j+3] = saturate_cast((1 - (1-alpha)*(1-beta))*alpha_scale); + } + } + } + @endcode + This approach, while being very simple, can boost the performance of a simple element-operation by + 10-20 percents, especially if the image is rather small and the operation is quite simple. + + Another OpenCV idiom in this function, a call of Mat::create for the destination array, that + allocates the destination array unless it already has the proper size and type. And while the newly + allocated arrays are always continuous, you still need to check the destination array because + Mat::create does not always allocate a new matrix. + */ + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + /** @brief Returns the matrix element size in bytes. + + The method returns the matrix element size in bytes. For example, if the matrix type is CV_16SC3 , + the method returns 3\*sizeof(short) or 6. + */ + size_t elemSize() const; + + /** @brief Returns the size of each matrix element channel in bytes. + + The method returns the matrix element channel size in bytes, that is, it ignores the number of + channels. For example, if the matrix type is CV_16SC3 , the method returns sizeof(short) or 2. + */ + size_t elemSize1() const; + + /** @brief Returns the type of a matrix element. + + The method returns a matrix element type. This is an identifier compatible with the CvMat type + system, like CV_16SC3 or 16-bit signed 3-channel array, and so on. + */ + int type() const; + + /** @brief Returns the depth of a matrix element. + + The method returns the identifier of the matrix element depth (the type of each individual channel). + For example, for a 16-bit signed element array, the method returns CV_16S . A complete list of + matrix types contains the following values: + - CV_8U - 8-bit unsigned integers ( 0..255 ) + - CV_8S - 8-bit signed integers ( -128..127 ) + - CV_16U - 16-bit unsigned integers ( 0..65535 ) + - CV_16S - 16-bit signed integers ( -32768..32767 ) + - CV_32S - 32-bit signed integers ( -2147483648..2147483647 ) + - CV_32F - 32-bit floating-point numbers ( -FLT_MAX..FLT_MAX, INF, NAN ) + - CV_64F - 64-bit floating-point numbers ( -DBL_MAX..DBL_MAX, INF, NAN ) + */ + int depth() const; + + /** @brief Returns the number of matrix channels. + + The method returns the number of matrix channels. + */ + int channels() const; + + /** @brief Returns a normalized step. + + The method returns a matrix step divided by Mat::elemSize1() . It can be useful to quickly access an + arbitrary matrix element. + */ + size_t step1(int i=0) const; + + /** @brief Returns true if the array has no elements. + + The method returns true if Mat::total() is 0 or if Mat::data is NULL. Because of pop_back() and + resize() methods `M.total() == 0` does not imply that `M.data == NULL`. + */ + bool empty() const; + + /** @brief Returns the total number of array elements. + + The method returns the number of array elements (a number of pixels if the array represents an + image). + */ + size_t total() const; + + /** @brief Returns the total number of array elements. + + The method returns the number of elements within a certain sub-array slice with startDim <= dim < endDim + */ + size_t total(int startDim, int endDim=INT_MAX) const; + + /** + * @param elemChannels Number of channels or number of columns the matrix should have. + * For a 2-D matrix, when the matrix has only 1 column, then it should have + * elemChannels channels; When the matrix has only 1 channel, + * then it should have elemChannels columns. + * For a 3-D matrix, it should have only one channel. Furthermore, + * if the number of planes is not one, then the number of rows + * within every plane has to be 1; if the number of rows within + * every plane is not 1, then the number of planes has to be 1. + * @param depth The depth the matrix should have. Set it to -1 when any depth is fine. + * @param requireContinuous Set it to true to require the matrix to be continuous + * @return -1 if the requirement is not satisfied. + * Otherwise, it returns the number of elements in the matrix. Note + * that an element may have multiple channels. + * + * The following code demonstrates its usage for a 2-d matrix: + * @snippet snippets/core_mat_checkVector.cpp example-2d + * + * The following code demonstrates its usage for a 3-d matrix: + * @snippet snippets/core_mat_checkVector.cpp example-3d + */ + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + /** @brief Returns a pointer to the specified matrix row. + + The methods return `uchar*` or typed pointer to the specified matrix row. See the sample in + Mat::isContinuous to know how to use these methods. + @param i0 A 0-based row index. + */ + uchar* ptr(int i0=0); + /** @overload */ + const uchar* ptr(int i0=0) const; + + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + uchar* ptr(int row, int col); + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + const uchar* ptr(int row, int col) const; + + /** @overload */ + uchar* ptr(int i0, int i1, int i2); + /** @overload */ + const uchar* ptr(int i0, int i1, int i2) const; + + /** @overload */ + uchar* ptr(const int* idx); + /** @overload */ + const uchar* ptr(const int* idx) const; + /** @overload */ + template uchar* ptr(const Vec& idx); + /** @overload */ + template const uchar* ptr(const Vec& idx) const; + + /** @overload */ + template _Tp* ptr(int i0=0); + /** @overload */ + template const _Tp* ptr(int i0=0) const; + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + template _Tp* ptr(int row, int col); + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + template const _Tp* ptr(int row, int col) const; + /** @overload */ + template _Tp* ptr(int i0, int i1, int i2); + /** @overload */ + template const _Tp* ptr(int i0, int i1, int i2) const; + /** @overload */ + template _Tp* ptr(const int* idx); + /** @overload */ + template const _Tp* ptr(const int* idx) const; + /** @overload */ + template _Tp* ptr(const Vec& idx); + /** @overload */ + template const _Tp* ptr(const Vec& idx) const; + + /** @brief Returns a reference to the specified array element. + + The template methods return a reference to the specified array element. For the sake of higher + performance, the index range checks are only performed in the Debug configuration. + + Note that the variants with a single index (i) can be used to access elements of single-row or + single-column 2-dimensional arrays. That is, if, for example, A is a 1 x N floating-point matrix and + B is an M x 1 integer matrix, you can simply write `A.at(k+4)` and `B.at(2*i+1)` + instead of `A.at(0,k+4)` and `B.at(2*i+1,0)`, respectively. + + The example below initializes a Hilbert matrix: + @code + Mat H(100, 100, CV_64F); + for(int i = 0; i < H.rows; i++) + for(int j = 0; j < H.cols; j++) + H.at(i,j)=1./(i+j+1); + @endcode + + Keep in mind that the size identifier used in the at operator cannot be chosen at random. It depends + on the image from which you are trying to retrieve the data. The table below gives a better insight in this: + - If matrix is of type `CV_8U` then use `Mat.at(y,x)`. + - If matrix is of type `CV_8S` then use `Mat.at(y,x)`. + - If matrix is of type `CV_16U` then use `Mat.at(y,x)`. + - If matrix is of type `CV_16S` then use `Mat.at(y,x)`. + - If matrix is of type `CV_32S` then use `Mat.at(y,x)`. + - If matrix is of type `CV_32F` then use `Mat.at(y,x)`. + - If matrix is of type `CV_64F` then use `Mat.at(y,x)`. + + @param i0 Index along the dimension 0 + */ + template _Tp& at(int i0=0); + /** @overload + @param i0 Index along the dimension 0 + */ + template const _Tp& at(int i0=0) const; + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + template _Tp& at(int row, int col); + /** @overload + @param row Index along the dimension 0 + @param col Index along the dimension 1 + */ + template const _Tp& at(int row, int col) const; + + /** @overload + @param i0 Index along the dimension 0 + @param i1 Index along the dimension 1 + @param i2 Index along the dimension 2 + */ + template _Tp& at(int i0, int i1, int i2); + /** @overload + @param i0 Index along the dimension 0 + @param i1 Index along the dimension 1 + @param i2 Index along the dimension 2 + */ + template const _Tp& at(int i0, int i1, int i2) const; + + /** @overload + @param idx Array of Mat::dims indices. + */ + template _Tp& at(const int* idx); + /** @overload + @param idx Array of Mat::dims indices. + */ + template const _Tp& at(const int* idx) const; + + /** @overload */ + template _Tp& at(const Vec& idx); + /** @overload */ + template const _Tp& at(const Vec& idx) const; + + /** @overload + special versions for 2D arrays (especially convenient for referencing image pixels) + @param pt Element position specified as Point(j,i) . + */ + template _Tp& at(Point pt); + /** @overload + special versions for 2D arrays (especially convenient for referencing image pixels) + @param pt Element position specified as Point(j,i) . + */ + template const _Tp& at(Point pt) const; + + /** @brief Returns the matrix iterator and sets it to the first matrix element. + + The methods return the matrix read-only or read-write iterators. The use of matrix iterators is very + similar to the use of bi-directional STL iterators. In the example below, the alpha blending + function is rewritten using the matrix iterators: + @code + template + void alphaBlendRGBA(const Mat& src1, const Mat& src2, Mat& dst) + { + typedef Vec VT; + + const float alpha_scale = (float)std::numeric_limits::max(), + inv_scale = 1.f/alpha_scale; + + CV_Assert( src1.type() == src2.type() && + src1.type() == traits::Type::value && + src1.size() == src2.size()); + Size size = src1.size(); + dst.create(size, src1.type()); + + MatConstIterator_ it1 = src1.begin(), it1_end = src1.end(); + MatConstIterator_ it2 = src2.begin(); + MatIterator_ dst_it = dst.begin(); + + for( ; it1 != it1_end; ++it1, ++it2, ++dst_it ) + { + VT pix1 = *it1, pix2 = *it2; + float alpha = pix1[3]*inv_scale, beta = pix2[3]*inv_scale; + *dst_it = VT(saturate_cast(pix1[0]*alpha + pix2[0]*beta), + saturate_cast(pix1[1]*alpha + pix2[1]*beta), + saturate_cast(pix1[2]*alpha + pix2[2]*beta), + saturate_cast((1 - (1-alpha)*(1-beta))*alpha_scale)); + } + } + @endcode + */ + template MatIterator_<_Tp> begin(); + template MatConstIterator_<_Tp> begin() const; + + /** @brief Returns the matrix iterator and sets it to the after-last matrix element. + + The methods return the matrix read-only or read-write iterators, set to the point following the last + matrix element. + */ + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> end() const; + + /** @brief Runs the given functor over all matrix elements in parallel. + + The operation passed as argument has to be a function pointer, a function object or a lambda(C++11). + + Example 1. All of the operations below put 0xFF the first channel of all matrix elements: + @code + Mat image(1920, 1080, CV_8UC3); + typedef cv::Point3_ Pixel; + + // first. raw pointer access. + for (int r = 0; r < image.rows; ++r) { + Pixel* ptr = image.ptr(r, 0); + const Pixel* ptr_end = ptr + image.cols; + for (; ptr != ptr_end; ++ptr) { + ptr->x = 255; + } + } + + // Using MatIterator. (Simple but there are a Iterator's overhead) + for (Pixel &p : cv::Mat_(image)) { + p.x = 255; + } + + // Parallel execution with function object. + struct Operator { + void operator ()(Pixel &pixel, const int * position) { + pixel.x = 255; + } + }; + image.forEach(Operator()); + + // Parallel execution using C++11 lambda. + image.forEach([](Pixel &p, const int * position) -> void { + p.x = 255; + }); + @endcode + Example 2. Using the pixel's position: + @code + // Creating 3D matrix (255 x 255 x 255) typed uint8_t + // and initialize all elements by the value which equals elements position. + // i.e. pixels (x,y,z) = (1,2,3) is (b,g,r) = (1,2,3). + + int sizes[] = { 255, 255, 255 }; + typedef cv::Point3_ Pixel; + + Mat_ image = Mat::zeros(3, sizes, CV_8UC3); + + image.forEach([&](Pixel& pixel, const int position[]) -> void { + pixel.x = position[0]; + pixel.y = position[1]; + pixel.z = position[2]; + }); + @endcode + */ + template void forEach(const Functor& operation); + /** @overload */ + template void forEach(const Functor& operation) const; + +#ifdef CV_CXX_MOVE_SEMANTICS + Mat(Mat&& m); + Mat& operator = (Mat&& m); +#endif + + enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG }; + enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! helper fields used in locateROI and adjustROI + const uchar* datastart; + const uchar* dataend; + const uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + //! and the standard allocator + static MatAllocator* getStdAllocator(); + static MatAllocator* getDefaultAllocator(); + static void setDefaultAllocator(MatAllocator* allocator); + + //! internal use method: updates the continuity flag + void updateContinuityFlag(); + + //! interaction with UMat + UMatData* u; + + MatSize size; + MatStep step; + +protected: + template void forEach_impl(const Functor& operation); +}; + + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/** @brief Template matrix class derived from Mat + +@code{.cpp} + template class Mat_ : public Mat + { + public: + // ... some specific methods + // and + // no new extra fields + }; +@endcode +The class `Mat_<_Tp>` is a *thin* template wrapper on top of the Mat class. It does not have any +extra data fields. Nor this class nor Mat has any virtual methods. Thus, references or pointers to +these two classes can be freely but carefully converted one to another. For example: +@code{.cpp} + // create a 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will be compiled fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program is likely to crash at the statement below + M1(99,99) = 1.f; +@endcode +While Mat is sufficient in most cases, Mat_ can be more convenient if you use a lot of element +access operations and if you know matrix type at the compilation time. Note that +`Mat::at(int y,int x)` and `Mat_::operator()(int y,int x)` do absolutely the same +and run at the same speed, but the latter is certainly shorter: +@code{.cpp} + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); +@endcode +To use Mat_ for multi-channel images/matrices, pass Vec as a Mat_ parameter: +@code{.cpp} + // allocate a 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now scramble the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); +@endcode +Mat_ is fully compatible with C++11 range-based for loop. For example such loop +can be used to safely apply look-up table: +@code{.cpp} +void applyTable(Mat_& I, const uchar* const table) +{ + for(auto& pixel : I) + { + pixel = table[pixel]; + } +} +@endcode + */ +template class Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_() CV_NOEXCEPT; + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion constructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const std::vector& ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const std::vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + +#ifdef CV_CXX11 + Mat_(std::initializer_list<_Tp> values); + explicit Mat_(const std::initializer_list sizes, const std::initializer_list<_Tp> values); +#endif + +#ifdef CV_CXX_STD_ARRAY + template explicit Mat_(const std::array<_Tp, _Nm>& arr, bool copyData=false); +#endif + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! template methods for for operation over all matrix elements. + // the operations take care of skipping gaps in the end of rows (if any) + template void forEach(const Functor& operation); + template void forEach(const Functor& operation) const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! equivalent to Mat::release() + void release(); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + CV_NODISCARD_STD Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + CV_NODISCARD_STD static MatExpr zeros(int rows, int cols); + CV_NODISCARD_STD static MatExpr zeros(Size size); + CV_NODISCARD_STD static MatExpr zeros(int _ndims, const int* _sizes); + CV_NODISCARD_STD static MatExpr ones(int rows, int cols); + CV_NODISCARD_STD static MatExpr ones(Size size); + CV_NODISCARD_STD static MatExpr ones(int _ndims, const int* _sizes); + CV_NODISCARD_STD static MatExpr eye(int rows, int cols); + CV_NODISCARD_STD static MatExpr eye(Size size); + + //! some more overridden methods + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + Mat_ operator()(const std::vector& ranges) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int row, int col); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int row, int col) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator std::vector<_Tp>() const; + +#ifdef CV_CXX_STD_ARRAY + //! conversion to array. + template operator std::array<_Tp, _Nm>() const; +#endif + + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; + +#ifdef CV_CXX_MOVE_SEMANTICS + Mat_(Mat_&& m); + Mat_& operator = (Mat_&& m); + + Mat_(Mat&& m); + Mat_& operator = (Mat&& m); + + Mat_(MatExpr&& e); +#endif +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +/** @todo document */ +class CV_EXPORTS UMat +{ +public: + //! default constructor + UMat(UMatUsageFlags usageFlags = USAGE_DEFAULT) CV_NOEXCEPT; + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + UMat(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + UMat(Size size, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + //! constructs 2D matrix and fills it with the specified value _s. + UMat(int rows, int cols, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + UMat(Size size, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + + //! constructs n-dimensional matrix + UMat(int ndims, const int* sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + UMat(int ndims, const int* sizes, int type, const Scalar& s, UMatUsageFlags usageFlags = USAGE_DEFAULT); + + //! copy constructor + UMat(const UMat& m); + + //! creates a matrix header for a part of the bigger matrix + UMat(const UMat& m, const Range& rowRange, const Range& colRange=Range::all()); + UMat(const UMat& m, const Rect& roi); + UMat(const UMat& m, const Range* ranges); + UMat(const UMat& m, const std::vector& ranges); + + // FIXIT copyData=false is not implemented, drop this in favor of cv::Mat (OpenCV 5.0) + //! builds matrix from std::vector with or without copying the data + template explicit UMat(const std::vector<_Tp>& vec, bool copyData=false); + + //! destructor - calls release() + ~UMat(); + //! assignment operators + UMat& operator = (const UMat& m); + + Mat getMat(int flags) const; + + //! returns a new matrix header for the specified row + UMat row(int y) const; + //! returns a new matrix header for the specified column + UMat col(int x) const; + //! ... for the specified row span + UMat rowRange(int startrow, int endrow) const; + UMat rowRange(const Range& r) const; + //! ... for the specified column span + UMat colRange(int startcol, int endcol) const; + UMat colRange(const Range& r) const; + //! ... for the specified diagonal + //! (d=0 - the main diagonal, + //! >0 - a diagonal from the upper half, + //! <0 - a diagonal from the lower half) + UMat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + CV_NODISCARD_STD static UMat diag(const UMat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + CV_NODISCARD_STD UMat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scaling. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( UMat& m, int type=-1 ) const; + + //! sets every matrix element to s + UMat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + UMat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + UMat reshape(int cn, int rows=0) const; + UMat reshape(int cn, int newndims, const int* newsz) const; + + //! matrix transposition by means of matrix expressions + UMat t() const; + //! matrix inversion by means of matrix expressions + UMat inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + UMat mul(InputArray m, double scale=1) const; + + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + CV_NODISCARD_STD static UMat zeros(int rows, int cols, int type); + CV_NODISCARD_STD static UMat zeros(Size size, int type); + CV_NODISCARD_STD static UMat zeros(int ndims, const int* sz, int type); + CV_NODISCARD_STD static UMat ones(int rows, int cols, int type); + CV_NODISCARD_STD static UMat ones(Size size, int type); + CV_NODISCARD_STD static UMat ones(int ndims, const int* sz, int type); + CV_NODISCARD_STD static UMat eye(int rows, int cols, int type); + CV_NODISCARD_STD static UMat eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int rows, int cols, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + void create(Size size, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + void create(int ndims, const int* sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + void create(const std::vector& sizes, int type, UMatUsageFlags usageFlags = USAGE_DEFAULT); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const UMat& m); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + UMat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + UMat operator()( Range rowRange, Range colRange ) const; + UMat operator()( const Rect& roi ) const; + UMat operator()( const Range* ranges ) const; + UMat operator()(const std::vector& ranges) const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + +#ifdef CV_CXX_MOVE_SEMANTICS + UMat(UMat&& m); + UMat& operator = (UMat&& m); +#endif + + /*! Returns the OpenCL buffer handle on which UMat operates on. + The UMat instance should be kept alive during the use of the handle to prevent the buffer to be + returned to the OpenCV buffer pool. + */ + void* handle(int accessFlags) const; + void ndoffset(size_t* ofs) const; + + enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG }; + enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + + //! custom allocator + MatAllocator* allocator; + UMatUsageFlags usageFlags; // usage flags for allocator + //! and the standard allocator + static MatAllocator* getStdAllocator(); + + //! internal use method: updates the continuity flag + void updateContinuityFlag(); + + // black-box container of UMat data + UMatData* u; + + // offset of the submatrix (or 0) + size_t offset; + + MatSize size; + MatStep step; + +protected: +}; + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +/** @brief The class SparseMat represents multi-dimensional sparse numerical arrays. + +Such a sparse array can store elements of any type that Mat can store. *Sparse* means that only +non-zero elements are stored (though, as a result of operations on a sparse matrix, some of its +stored elements can actually become 0. It is up to you to detect such elements and delete them +using SparseMat::erase ). The non-zero elements are stored in a hash table that grows when it is +filled so that the search time is O(1) in average (regardless of whether element is there or not). +Elements can be accessed using the following methods: +- Query operations (SparseMat::ptr and the higher-level SparseMat::ref, SparseMat::value and + SparseMat::find), for example: + @code + const int dims = 5; + int size[5] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand() % size[k]; + sparse_mat.ref(idx) += 1.f; + } + cout << "nnz = " << sparse_mat.nzcount() << endl; + @endcode +- Sparse matrix iterators. They are similar to MatIterator but different from NAryMatIterator. + That is, the iteration loop is familiar to STL users: + @code + // prints elements of a sparse floating-point matrix + // and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const SparseMat::Node* n = it.node(); + printf("("); + for(int i = 0; i < dims; i++) + printf("%d%s", n->idx[i], i < dims-1 ? ", " : ")"); + printf(": %g\n", it.value()); + s += *it; + } + printf("Element sum is %g\n", s); + @endcode + If you run this loop, you will notice that elements are not enumerated in a logical order + (lexicographical, and so on). They come in the same order as they are stored in the hash table + (semi-randomly). You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more elements to the + matrix. This may happen due to possible buffer reallocation. +- Combination of the above 2 methods when you need to process 2 or more sparse matrices + simultaneously. For example, this is how you can compute unnormalized cross-correlation of the 2 + floating-point sparse matrices: + @code + double cross_corr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it is faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find an element with the same index in the second matrix. + // since the hash value depends only on the element index, + // reuse the hash value stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + @endcode + */ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=32, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + std::vector pool; + std::vector hashtab; + int size[MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[MAX_DIM]; + }; + + /** @brief Various SparseMat constructors. + */ + SparseMat(); + + /** @overload + @param dims Array dimensionality. + @param _sizes Sparce matrix size on all dementions. + @param _type Sparse matrix data type. + */ + SparseMat(int dims, const int* _sizes, int _type); + + /** @overload + @param m Source matrix for copy constructor. If m is dense matrix (ocvMat) then it will be converted + to sparse representation. + */ + SparseMat(const SparseMat& m); + + /** @overload + @param m Source matrix for copy constructor. If m is dense matrix (ocvMat) then it will be converted + to sparse representation. + */ + explicit SparseMat(const Mat& m); + + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + CV_NODISCARD_STD SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + @param [out] m - output matrix; if it does not have a proper size or type before the operation, + it is reallocated + @param [in] rtype - desired output matrix type or, rather, the depth since the number of channels + are the same as the input has; if rtype is negative, the output matrix will have the + same type as the input. + @param [in] alpha - optional scale factor + @param [in] beta - optional delta added to the scaled values + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + //operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //!@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + return pointer to the matrix element. + - if the element is there (it's non-zero), the pointer to it is returned + - if it's not there and createMissing=false, NULL pointer is returned + - if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned + - if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. + */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //!@} + + //!@{ + /*! + return read-write reference to the specified sparse matrix element. + + `ref<_Tp>(i0,...[,hashval])` is equivalent to `*(_Tp*)ptr(i0,...,true[,hashval])`. + The methods always return a valid reference. + If the element did not exist, it is created and initialized with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //!@} + + //!@{ + /*! + return value of the specified sparse matrix element. + + `value<_Tp>(i0,...[,hashval])` is equivalent to + @code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + @endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //!@} + + //!@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + `find<_Tp>(i0,...[,hashval])` is equivalent to `(_const Tp*)ptr(i0,...false[,hashval])`. + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + //!@} + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //!@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //!@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + int flags; + Hdr* hdr; +}; + + + +///////////////////////////////// SparseMat_<_Tp> //////////////////////////////////// + +/** @brief Template sparse n-dimensional array class derived from SparseMat + +SparseMat_ is a thin wrapper on top of SparseMat created in the same way as Mat_ . It simplifies +notation of some operations: +@code + int sz[] = {10, 20, 30}; + SparseMat_ M(3, sz); + ... + M.ref(1, 2, 3) = M(4, 5, 6) + M(7, 8, 9); +@endcode + */ +template class SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivalent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + //SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + CV_NODISCARD_STD SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + //operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + + +////////////////////////////////// MatConstIterator ////////////////////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + const uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + const uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative = false); + void seek(const int* _idx, bool relative = false); + + const Mat* m; + size_t elemSize; + const uchar* ptr; + const uchar* sliceStart; + const uchar* sliceEnd; +}; + + + +////////////////////////////////// MatConstIterator_ ///////////////////////////////// + +/** @brief Matrix read-only iterator + */ +template +class MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + const _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + const _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + + +//////////////////////////////////// MatIterator_ //////////////////////////////////// + +/** @brief Matrix read-write iterator +*/ +template +class MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + + + +/////////////////////////////// SparseMatConstIterator /////////////////////////////// + +/** @brief Read-Only Sparse Matrix Iterator. + + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + + + +////////////////////////////////// SparseMatIterator ///////////////////////////////// + +/** @brief Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + + + +/////////////////////////////// SparseMatConstIterator_ ////////////////////////////// + +/** @brief Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + SparseMatConstIterator_(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + + + +///////////////////////////////// SparseMatIterator_ ///////////////////////////////// + +/** @brief Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + SparseMatIterator_(SparseMat* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + + + +/////////////////////////////////// NAryMatIterator ////////////////////////////////// + +/** @brief n-ary multi-dimensional array iterator. + +Use the class to implement unary, binary, and, generally, n-ary element-wise operations on +multi-dimensional arrays. Some of the arguments of an n-ary function may be continuous arrays, some +may be not. It is possible to use conventional MatIterator 's for each array but incrementing all of +the iterators after each small operations may be a big overhead. In this case consider using +NAryMatIterator to iterate through several matrices simultaneously as long as they have the same +geometry (dimensionality and all the dimension sizes are the same). On each iteration `it.planes[0]`, +`it.planes[1]`,... will be the slices of the corresponding matrices. + +The example below illustrates how you can compute a normalized and threshold 3D color histogram: +@code + void computeNormalizedColorHist(const Mat& image, Mat& hist, int N, double minProb) + { + const int histSize[] = {N, N, N}; + + // make sure that the histogram has a proper size and type + hist.create(3, histSize, CV_32F); + + // and clear it + hist = Scalar(0); + + // the loop below assumes that the image + // is a 8-bit 3-channel. check it. + CV_Assert(image.type() == CV_8UC3); + MatConstIterator_ it = image.begin(), + it_end = image.end(); + for( ; it != it_end; ++it ) + { + const Vec3b& pix = *it; + hist.at(pix[0]*N/256, pix[1]*N/256, pix[2]*N/256) += 1.f; + } + + minProb *= image.rows*image.cols; + + // initialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes the iterator will go through. + // it simultaneously increments iterators for several matrices + // supplied as a null terminated list of pointers + const Mat* arrays[] = {&hist, 0}; + Mat planes[1]; + NAryMatIterator itNAry(arrays, planes, 1); + double s = 0; + // iterate through the matrix. on each iteration + // itNAry.planes[i] (of type Mat) will be set to the current plane + // of the i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < itNAry.nplanes; p++, ++itNAry) + { + threshold(itNAry.planes[0], itNAry.planes[0], minProb, 0, THRESH_TOZERO); + s += sum(itNAry.planes[0])[0]; + } + + s = 1./s; + itNAry = NAryMatIterator(arrays, planes, 1); + for(int p = 0; p < itNAry.nplanes; p++, ++itNAry) + itNAry.planes[0] *= s; + } +@endcode + */ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + + + +///////////////////////////////// Matrix Expressions ///////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp(); + virtual ~MatOp(); + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + +/** @brief Matrix expression representation +@anchor MatrixExpressions +This is a list of implemented matrix operations that can be combined in arbitrary complex +expressions (here A, B stand for matrices ( Mat ), s for a scalar ( Scalar ), alpha for a +real-valued scalar ( double )): +- Addition, subtraction, negation: `A+B`, `A-B`, `A+s`, `A-s`, `s+A`, `s-A`, `-A` +- Scaling: `A*alpha` +- Per-element multiplication and division: `A.mul(B)`, `A/B`, `alpha/A` +- Matrix multiplication: `A*B` +- Transposition: `A.t()` (means AT) +- Matrix inversion and pseudo-inversion, solving linear systems and least-squares problems: + `A.inv([method]) (~ A-1)`, `A.inv([method])*B (~ X: AX=B)` +- Comparison: `A cmpop B`, `A cmpop alpha`, `alpha cmpop A`, where *cmpop* is one of + `>`, `>=`, `==`, `!=`, `<=`, `<`. The result of comparison is an 8-bit single channel mask whose + elements are set to 255 (if the particular element or pair of elements satisfy the condition) or + 0. +- Bitwise logical operations: `A logicop B`, `A logicop s`, `s logicop A`, `~A`, where *logicop* is one of + `&`, `|`, `^`. +- Element-wise minimum and maximum: `min(A, B)`, `min(A, alpha)`, `max(A, B)`, `max(A, alpha)` +- Element-wise absolute value: `abs(A)` +- Cross-product, dot-product: `A.cross(B)`, `A.dot(B)` +- Any function of matrix or matrices and scalars that returns a matrix or a scalar, such as norm, + mean, sum, countNonZero, trace, determinant, repeat, and others. +- Matrix initializers ( Mat::eye(), Mat::zeros(), Mat::ones() ), matrix comma-separated + initializers, matrix constructors and operators that extract sub-matrices (see Mat description). +- Mat_() constructors to cast the result to the proper type. +@note Comma-separated initializers and probably some other operations may require additional +explicit Mat() or Mat_() constructor calls to resolve a possible ambiguity. + +Here are examples of matrix expressions: +@code + // compute pseudo-inverse of A, equivalent to A.inv(DECOMP_SVD) + SVD svd(A); + Mat pinvA = svd.vt.t()*Mat::diag(1./svd.w)*svd.u.t(); + + // compute the new vector of parameters in the Levenberg-Marquardt algorithm + x -= (A.t()*A + lambda*Mat::eye(A.cols,A.cols,A.type())).inv(DECOMP_CHOLESKY)*(A.t()*err); + + // sharpen image using "unsharp mask" algorithm + Mat blurred; double sigma = 1, threshold = 5, amount = 1; + GaussianBlur(img, blurred, Size(), sigma, sigma); + Mat lowContrastMask = abs(img - blurred) < threshold; + Mat sharpened = img*(1+amount) + blurred*(-amount); + img.copyTo(sharpened, lowContrastMask); +@endcode +*/ +class CV_EXPORTS MatExpr +{ +public: + MatExpr(); + explicit MatExpr(const Mat& m); + + MatExpr(const MatOp* _op, int _flags, const Mat& _a = Mat(), const Mat& _b = Mat(), + const Mat& _c = Mat(), double _alpha = 1, double _beta = 1, const Scalar& _s = Scalar()); + + operator Mat() const; + template operator Mat_<_Tp>() const; + + Size size() const; + int type() const; + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d = 0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + void swap(MatExpr& b); + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + +//! @} core_basic + +//! @relates cv::MatExpr +//! @{ +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); +template static inline +MatExpr operator + (const Mat& a, const Matx<_Tp, m, n>& b) { return a + Mat(b); } +template static inline +MatExpr operator + (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) + b; } + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); +template static inline +MatExpr operator - (const Mat& a, const Matx<_Tp, m, n>& b) { return a - Mat(b); } +template static inline +MatExpr operator - (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) - b; } + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); +template static inline +MatExpr operator * (const Mat& a, const Matx<_Tp, m, n>& b) { return a * Mat(b); } +template static inline +MatExpr operator * (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) * b; } + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); +template static inline +MatExpr operator / (const Mat& a, const Matx<_Tp, m, n>& b) { return a / Mat(b); } +template static inline +MatExpr operator / (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) / b; } + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); +template static inline +MatExpr operator < (const Mat& a, const Matx<_Tp, m, n>& b) { return a < Mat(b); } +template static inline +MatExpr operator < (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) < b; } + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); +template static inline +MatExpr operator <= (const Mat& a, const Matx<_Tp, m, n>& b) { return a <= Mat(b); } +template static inline +MatExpr operator <= (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) <= b; } + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); +template static inline +MatExpr operator == (const Mat& a, const Matx<_Tp, m, n>& b) { return a == Mat(b); } +template static inline +MatExpr operator == (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) == b; } + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); +template static inline +MatExpr operator != (const Mat& a, const Matx<_Tp, m, n>& b) { return a != Mat(b); } +template static inline +MatExpr operator != (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) != b; } + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); +template static inline +MatExpr operator >= (const Mat& a, const Matx<_Tp, m, n>& b) { return a >= Mat(b); } +template static inline +MatExpr operator >= (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) >= b; } + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); +template static inline +MatExpr operator > (const Mat& a, const Matx<_Tp, m, n>& b) { return a > Mat(b); } +template static inline +MatExpr operator > (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) > b; } + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); +template static inline +MatExpr operator & (const Mat& a, const Matx<_Tp, m, n>& b) { return a & Mat(b); } +template static inline +MatExpr operator & (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) & b; } + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); +template static inline +MatExpr operator | (const Mat& a, const Matx<_Tp, m, n>& b) { return a | Mat(b); } +template static inline +MatExpr operator | (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) | b; } + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); +template static inline +MatExpr operator ^ (const Mat& a, const Matx<_Tp, m, n>& b) { return a ^ Mat(b); } +template static inline +MatExpr operator ^ (const Matx<_Tp, m, n>& a, const Mat& b) { return Mat(a) ^ b; } + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); +template static inline +MatExpr min (const Mat& a, const Matx<_Tp, m, n>& b) { return min(a, Mat(b)); } +template static inline +MatExpr min (const Matx<_Tp, m, n>& a, const Mat& b) { return min(Mat(a), b); } + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); +template static inline +MatExpr max (const Mat& a, const Matx<_Tp, m, n>& b) { return max(a, Mat(b)); } +template static inline +MatExpr max (const Matx<_Tp, m, n>& a, const Mat& b) { return max(Mat(a), b); } + +/** @brief Calculates an absolute value of each matrix element. + +abs is a meta-function that is expanded to one of absdiff or convertScaleAbs forms: +- C = abs(A-B) is equivalent to `absdiff(A, B, C)` +- C = abs(A) is equivalent to `absdiff(A, Scalar::all(0), C)` +- C = `Mat_ >(abs(A*alpha + beta))` is equivalent to `convertScaleAbs(A, C, alpha, +beta)` + +The output matrix has the same size and the same type as the input one except for the last case, +where C is depth=CV_8U . +@param m matrix. +@sa @ref MatrixExpressions, absdiff, convertScaleAbs + */ +CV_EXPORTS MatExpr abs(const Mat& m); +/** @overload +@param e matrix expression. +*/ +CV_EXPORTS MatExpr abs(const MatExpr& e); +//! @} relates cv::MatExpr + +} // cv + +#include "opencv2/core/mat.inl.hpp" + +#endif // OPENCV_CORE_MAT_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/mat.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/mat.inl.hpp new file mode 100644 index 00000000..fffb860f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/mat.inl.hpp @@ -0,0 +1,3533 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_MATRIX_OPERATIONS_HPP +#define OPENCV_CORE_MATRIX_OPERATIONS_HPP + +#ifndef __cplusplus +# error mat.inl.hpp header must be compiled as C++ +#endif + +#ifdef _MSC_VER +#pragma warning( push ) +#pragma warning( disable: 4127 ) +#endif + +#if defined(CV_SKIP_DISABLE_CLANG_ENUM_WARNINGS) + // nothing +#elif defined(CV_FORCE_DISABLE_CLANG_ENUM_WARNINGS) + #define CV_DISABLE_CLANG_ENUM_WARNINGS +#elif defined(__clang__) && defined(__has_warning) + #if __has_warning("-Wdeprecated-enum-enum-conversion") && __has_warning("-Wdeprecated-anon-enum-enum-conversion") + #define CV_DISABLE_CLANG_ENUM_WARNINGS + #endif +#endif +#ifdef CV_DISABLE_CLANG_ENUM_WARNINGS +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-enum-enum-conversion" +#pragma clang diagnostic ignored "-Wdeprecated-anon-enum-enum-conversion" +#endif + +namespace cv +{ +CV__DEBUG_NS_BEGIN + + +//! @cond IGNORED + +////////////////////////// Custom (raw) type wrapper ////////////////////////// + +template static inline +int rawType() +{ + CV_StaticAssert(sizeof(_Tp) <= CV_CN_MAX, "sizeof(_Tp) is too large"); + const int elemSize = sizeof(_Tp); + return (int)CV_MAKETYPE(CV_8U, elemSize); +} + +//////////////////////// Input/Output Arrays //////////////////////// + +inline void _InputArray::init(int _flags, const void* _obj) +{ flags = _flags; obj = (void*)_obj; } + +inline void _InputArray::init(int _flags, const void* _obj, Size _sz) +{ flags = _flags; obj = (void*)_obj; sz = _sz; } + +inline void* _InputArray::getObj() const { return obj; } +inline int _InputArray::getFlags() const { return flags; } +inline Size _InputArray::getSz() const { return sz; } + +inline _InputArray::_InputArray() { init(NONE, 0); } +inline _InputArray::_InputArray(int _flags, void* _obj) { init(_flags, _obj); } +inline _InputArray::_InputArray(const Mat& m) { init(MAT+ACCESS_READ, &m); } +inline _InputArray::_InputArray(const std::vector& vec) { init(STD_VECTOR_MAT+ACCESS_READ, &vec); } +inline _InputArray::_InputArray(const UMat& m) { init(UMAT+ACCESS_READ, &m); } +inline _InputArray::_InputArray(const std::vector& vec) { init(STD_VECTOR_UMAT+ACCESS_READ, &vec); } + +template inline +_InputArray::_InputArray(const std::vector<_Tp>& vec) +{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_READ, &vec); } + +#ifdef CV_CXX_STD_ARRAY +template inline +_InputArray::_InputArray(const std::array<_Tp, _Nm>& arr) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_READ, arr.data(), Size(1, _Nm)); } + +template inline +_InputArray::_InputArray(const std::array& arr) +{ init(STD_ARRAY_MAT + ACCESS_READ, arr.data(), Size(1, _Nm)); } +#endif + +inline +_InputArray::_InputArray(const std::vector& vec) +{ init(FIXED_TYPE + STD_BOOL_VECTOR + traits::Type::value + ACCESS_READ, &vec); } + +template inline +_InputArray::_InputArray(const std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_VECTOR + traits::Type<_Tp>::value + ACCESS_READ, &vec); } + +inline +_InputArray::_InputArray(const std::vector >&) +{ CV_Error(Error::StsUnsupportedFormat, "std::vector > is not supported!\n"); } + +template inline +_InputArray::_InputArray(const std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_MAT + traits::Type<_Tp>::value + ACCESS_READ, &vec); } + +template inline +_InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_READ, &mtx, Size(n, m)); } + +template inline +_InputArray::_InputArray(const _Tp* vec, int n) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_READ, vec, Size(n, 1)); } + +template inline +_InputArray::_InputArray(const Mat_<_Tp>& m) +{ init(FIXED_TYPE + MAT + traits::Type<_Tp>::value + ACCESS_READ, &m); } + +inline _InputArray::_InputArray(const double& val) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F + ACCESS_READ, &val, Size(1,1)); } + +inline _InputArray::_InputArray(const cuda::GpuMat& d_mat) +{ init(CUDA_GPU_MAT + ACCESS_READ, &d_mat); } + +inline _InputArray::_InputArray(const std::vector& d_mat) +{ init(STD_VECTOR_CUDA_GPU_MAT + ACCESS_READ, &d_mat);} + +inline _InputArray::_InputArray(const ogl::Buffer& buf) +{ init(OPENGL_BUFFER + ACCESS_READ, &buf); } + +inline _InputArray::_InputArray(const cuda::HostMem& cuda_mem) +{ init(CUDA_HOST_MEM + ACCESS_READ, &cuda_mem); } + +template inline +_InputArray _InputArray::rawIn(const std::vector<_Tp>& vec) +{ + _InputArray v; + v.flags = _InputArray::FIXED_TYPE + _InputArray::STD_VECTOR + rawType<_Tp>() + ACCESS_READ; + v.obj = (void*)&vec; + return v; +} + +#ifdef CV_CXX_STD_ARRAY +template inline +_InputArray _InputArray::rawIn(const std::array<_Tp, _Nm>& arr) +{ + _InputArray v; + v.flags = FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_READ; + v.obj = (void*)arr.data(); + v.sz = Size(1, _Nm); + return v; +} +#endif + +inline _InputArray::~_InputArray() {} + +inline Mat _InputArray::getMat(int i) const +{ + if( kind() == MAT && i < 0 ) + return *(const Mat*)obj; + return getMat_(i); +} + +inline bool _InputArray::isMat() const { return kind() == _InputArray::MAT; } +inline bool _InputArray::isUMat() const { return kind() == _InputArray::UMAT; } +inline bool _InputArray::isMatVector() const { return kind() == _InputArray::STD_VECTOR_MAT; } +inline bool _InputArray::isUMatVector() const { return kind() == _InputArray::STD_VECTOR_UMAT; } +inline bool _InputArray::isMatx() const { return kind() == _InputArray::MATX; } +inline bool _InputArray::isVector() const { return kind() == _InputArray::STD_VECTOR || + kind() == _InputArray::STD_BOOL_VECTOR || + (kind() == _InputArray::MATX && (sz.width <= 1 || sz.height <= 1)); } +inline bool _InputArray::isGpuMat() const { return kind() == _InputArray::CUDA_GPU_MAT; } +inline bool _InputArray::isGpuMatVector() const { return kind() == _InputArray::STD_VECTOR_CUDA_GPU_MAT; } + +//////////////////////////////////////////////////////////////////////////////////////// + +inline _OutputArray::_OutputArray() { init(ACCESS_WRITE, 0); } +inline _OutputArray::_OutputArray(int _flags, void* _obj) { init(_flags|ACCESS_WRITE, _obj); } +inline _OutputArray::_OutputArray(Mat& m) { init(MAT+ACCESS_WRITE, &m); } +inline _OutputArray::_OutputArray(std::vector& vec) { init(STD_VECTOR_MAT+ACCESS_WRITE, &vec); } +inline _OutputArray::_OutputArray(UMat& m) { init(UMAT+ACCESS_WRITE, &m); } +inline _OutputArray::_OutputArray(std::vector& vec) { init(STD_VECTOR_UMAT+ACCESS_WRITE, &vec); } + +template inline +_OutputArray::_OutputArray(std::vector<_Tp>& vec) +{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +#ifdef CV_CXX_STD_ARRAY +template inline +_OutputArray::_OutputArray(std::array<_Tp, _Nm>& arr) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, arr.data(), Size(1, _Nm)); } + +template inline +_OutputArray::_OutputArray(std::array& arr) +{ init(STD_ARRAY_MAT + ACCESS_WRITE, arr.data(), Size(1, _Nm)); } +#endif + +inline +_OutputArray::_OutputArray(std::vector&) +{ CV_Error(Error::StsUnsupportedFormat, "std::vector cannot be an output array\n"); } + +template inline +_OutputArray::_OutputArray(std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +inline +_OutputArray::_OutputArray(std::vector >&) +{ CV_Error(Error::StsUnsupportedFormat, "std::vector > cannot be an output array\n"); } + +template inline +_OutputArray::_OutputArray(std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_MAT + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +template inline +_OutputArray::_OutputArray(Mat_<_Tp>& m) +{ init(FIXED_TYPE + MAT + traits::Type<_Tp>::value + ACCESS_WRITE, &m); } + +template inline +_OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, &mtx, Size(n, m)); } + +template inline +_OutputArray::_OutputArray(_Tp* vec, int n) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, vec, Size(n, 1)); } + +template inline +_OutputArray::_OutputArray(const std::vector<_Tp>& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +#ifdef CV_CXX_STD_ARRAY +template inline +_OutputArray::_OutputArray(const std::array<_Tp, _Nm>& arr) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, arr.data(), Size(1, _Nm)); } + +template inline +_OutputArray::_OutputArray(const std::array& arr) +{ init(FIXED_SIZE + STD_ARRAY_MAT + ACCESS_WRITE, arr.data(), Size(1, _Nm)); } +#endif + +template inline +_OutputArray::_OutputArray(const std::vector >& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +template inline +_OutputArray::_OutputArray(const std::vector >& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); } + +template inline +_OutputArray::_OutputArray(const Mat_<_Tp>& m) +{ init(FIXED_TYPE + FIXED_SIZE + MAT + traits::Type<_Tp>::value + ACCESS_WRITE, &m); } + +template inline +_OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, &mtx, Size(n, m)); } + +template inline +_OutputArray::_OutputArray(const _Tp* vec, int n) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE, vec, Size(n, 1)); } + +inline _OutputArray::_OutputArray(cuda::GpuMat& d_mat) +{ init(CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); } + +inline _OutputArray::_OutputArray(std::vector& d_mat) +{ init(STD_VECTOR_CUDA_GPU_MAT + ACCESS_WRITE, &d_mat);} + +inline _OutputArray::_OutputArray(ogl::Buffer& buf) +{ init(OPENGL_BUFFER + ACCESS_WRITE, &buf); } + +inline _OutputArray::_OutputArray(cuda::HostMem& cuda_mem) +{ init(CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); } + +inline _OutputArray::_OutputArray(const Mat& m) +{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_WRITE, &m); } + +inline _OutputArray::_OutputArray(const std::vector& vec) +{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_WRITE, &vec); } + +inline _OutputArray::_OutputArray(const UMat& m) +{ init(FIXED_TYPE + FIXED_SIZE + UMAT + ACCESS_WRITE, &m); } + +inline _OutputArray::_OutputArray(const std::vector& vec) +{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_WRITE, &vec); } + +inline _OutputArray::_OutputArray(const cuda::GpuMat& d_mat) +{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_WRITE, &d_mat); } + + +inline _OutputArray::_OutputArray(const ogl::Buffer& buf) +{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_WRITE, &buf); } + +inline _OutputArray::_OutputArray(const cuda::HostMem& cuda_mem) +{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_WRITE, &cuda_mem); } + +template inline +_OutputArray _OutputArray::rawOut(std::vector<_Tp>& vec) +{ + _OutputArray v; + v.flags = _InputArray::FIXED_TYPE + _InputArray::STD_VECTOR + rawType<_Tp>() + ACCESS_WRITE; + v.obj = (void*)&vec; + return v; +} + +#ifdef CV_CXX_STD_ARRAY +template inline +_OutputArray _OutputArray::rawOut(std::array<_Tp, _Nm>& arr) +{ + _OutputArray v; + v.flags = FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_WRITE; + v.obj = (void*)arr.data(); + v.sz = Size(1, _Nm); + return v; +} +#endif + +/////////////////////////////////////////////////////////////////////////////////////////// + +inline _InputOutputArray::_InputOutputArray() { init(ACCESS_RW, 0); } +inline _InputOutputArray::_InputOutputArray(int _flags, void* _obj) { init(_flags|ACCESS_RW, _obj); } +inline _InputOutputArray::_InputOutputArray(Mat& m) { init(MAT+ACCESS_RW, &m); } +inline _InputOutputArray::_InputOutputArray(std::vector& vec) { init(STD_VECTOR_MAT+ACCESS_RW, &vec); } +inline _InputOutputArray::_InputOutputArray(UMat& m) { init(UMAT+ACCESS_RW, &m); } +inline _InputOutputArray::_InputOutputArray(std::vector& vec) { init(STD_VECTOR_UMAT+ACCESS_RW, &vec); } + +template inline +_InputOutputArray::_InputOutputArray(std::vector<_Tp>& vec) +{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +#ifdef CV_CXX_STD_ARRAY +template inline +_InputOutputArray::_InputOutputArray(std::array<_Tp, _Nm>& arr) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, arr.data(), Size(1, _Nm)); } + +template inline +_InputOutputArray::_InputOutputArray(std::array& arr) +{ init(STD_ARRAY_MAT + ACCESS_RW, arr.data(), Size(1, _Nm)); } +#endif + +inline _InputOutputArray::_InputOutputArray(std::vector&) +{ CV_Error(Error::StsUnsupportedFormat, "std::vector cannot be an input/output array\n"); } + +template inline +_InputOutputArray::_InputOutputArray(std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +template inline +_InputOutputArray::_InputOutputArray(std::vector >& vec) +{ init(FIXED_TYPE + STD_VECTOR_MAT + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +template inline +_InputOutputArray::_InputOutputArray(Mat_<_Tp>& m) +{ init(FIXED_TYPE + MAT + traits::Type<_Tp>::value + ACCESS_RW, &m); } + +template inline +_InputOutputArray::_InputOutputArray(Matx<_Tp, m, n>& mtx) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, &mtx, Size(n, m)); } + +template inline +_InputOutputArray::_InputOutputArray(_Tp* vec, int n) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, vec, Size(n, 1)); } + +template inline +_InputOutputArray::_InputOutputArray(const std::vector<_Tp>& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +#ifdef CV_CXX_STD_ARRAY +template inline +_InputOutputArray::_InputOutputArray(const std::array<_Tp, _Nm>& arr) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, arr.data(), Size(1, _Nm)); } + +template inline +_InputOutputArray::_InputOutputArray(const std::array& arr) +{ init(FIXED_SIZE + STD_ARRAY_MAT + ACCESS_RW, arr.data(), Size(1, _Nm)); } +#endif + +template inline +_InputOutputArray::_InputOutputArray(const std::vector >& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +template inline +_InputOutputArray::_InputOutputArray(const std::vector >& vec) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_MAT + traits::Type<_Tp>::value + ACCESS_RW, &vec); } + +template inline +_InputOutputArray::_InputOutputArray(const Mat_<_Tp>& m) +{ init(FIXED_TYPE + FIXED_SIZE + MAT + traits::Type<_Tp>::value + ACCESS_RW, &m); } + +template inline +_InputOutputArray::_InputOutputArray(const Matx<_Tp, m, n>& mtx) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, &mtx, Size(n, m)); } + +template inline +_InputOutputArray::_InputOutputArray(const _Tp* vec, int n) +{ init(FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW, vec, Size(n, 1)); } + +inline _InputOutputArray::_InputOutputArray(cuda::GpuMat& d_mat) +{ init(CUDA_GPU_MAT + ACCESS_RW, &d_mat); } + +inline _InputOutputArray::_InputOutputArray(ogl::Buffer& buf) +{ init(OPENGL_BUFFER + ACCESS_RW, &buf); } + +inline _InputOutputArray::_InputOutputArray(cuda::HostMem& cuda_mem) +{ init(CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); } + +inline _InputOutputArray::_InputOutputArray(const Mat& m) +{ init(FIXED_TYPE + FIXED_SIZE + MAT + ACCESS_RW, &m); } + +inline _InputOutputArray::_InputOutputArray(const std::vector& vec) +{ init(FIXED_SIZE + STD_VECTOR_MAT + ACCESS_RW, &vec); } + +inline _InputOutputArray::_InputOutputArray(const UMat& m) +{ init(FIXED_TYPE + FIXED_SIZE + UMAT + ACCESS_RW, &m); } + +inline _InputOutputArray::_InputOutputArray(const std::vector& vec) +{ init(FIXED_SIZE + STD_VECTOR_UMAT + ACCESS_RW, &vec); } + +inline _InputOutputArray::_InputOutputArray(const cuda::GpuMat& d_mat) +{ init(FIXED_TYPE + FIXED_SIZE + CUDA_GPU_MAT + ACCESS_RW, &d_mat); } + +inline _InputOutputArray::_InputOutputArray(const std::vector& d_mat) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_CUDA_GPU_MAT + ACCESS_RW, &d_mat);} + +template<> inline _InputOutputArray::_InputOutputArray(std::vector& d_mat) +{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR_CUDA_GPU_MAT + ACCESS_RW, &d_mat);} + +inline _InputOutputArray::_InputOutputArray(const ogl::Buffer& buf) +{ init(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER + ACCESS_RW, &buf); } + +inline _InputOutputArray::_InputOutputArray(const cuda::HostMem& cuda_mem) +{ init(FIXED_TYPE + FIXED_SIZE + CUDA_HOST_MEM + ACCESS_RW, &cuda_mem); } + +template inline +_InputOutputArray _InputOutputArray::rawInOut(std::vector<_Tp>& vec) +{ + _InputOutputArray v; + v.flags = _InputArray::FIXED_TYPE + _InputArray::STD_VECTOR + rawType<_Tp>() + ACCESS_RW; + v.obj = (void*)&vec; + return v; +} + +#ifdef CV_CXX_STD_ARRAY +template inline +_InputOutputArray _InputOutputArray::rawInOut(std::array<_Tp, _Nm>& arr) +{ + _InputOutputArray v; + v.flags = FIXED_TYPE + FIXED_SIZE + MATX + traits::Type<_Tp>::value + ACCESS_RW; + v.obj = (void*)arr.data(); + v.sz = Size(1, _Nm); + return v; +} +#endif + + +template static inline _InputArray rawIn(_Tp& v) { return _InputArray::rawIn(v); } +template static inline _OutputArray rawOut(_Tp& v) { return _OutputArray::rawOut(v); } +template static inline _InputOutputArray rawInOut(_Tp& v) { return _InputOutputArray::rawInOut(v); } + +CV__DEBUG_NS_END + +//////////////////////////////////////////// Mat ////////////////////////////////////////// + +template inline +Mat::Mat(const std::vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()), + cols(1), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + datastart = data = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows * step[0]; + } + else + Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this); +} + +#ifdef CV_CXX11 +template inline +Mat::Mat(const std::initializer_list<_Tp> list) + : Mat() +{ + CV_Assert(list.size() != 0); + Mat((int)list.size(), 1, traits::Type<_Tp>::value, (uchar*)list.begin()).copyTo(*this); +} + +template inline +Mat::Mat(const std::initializer_list sizes, const std::initializer_list<_Tp> list) + : Mat() +{ + size_t size_total = 1; + for(auto s : sizes) + size_total *= s; + CV_Assert(list.size() != 0); + CV_Assert(size_total == list.size()); + Mat((int)sizes.size(), (int*)sizes.begin(), traits::Type<_Tp>::value, (uchar*)list.begin()).copyTo(*this); +} +#endif + +#ifdef CV_CXX_STD_ARRAY +template inline +Mat::Mat(const std::array<_Tp, _Nm>& arr, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)arr.size()), + cols(1), data(0), datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if(arr.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + datastart = data = (uchar*)arr.data(); + datalimit = dataend = datastart + rows * step[0]; + } + else + Mat((int)arr.size(), 1, traits::Type<_Tp>::value, (uchar*)arr.data()).copyTo(*this); +} +#endif + +template inline +Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows(n), cols(1), data(0), + datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + datastart = data = (uchar*)vec.val; + datalimit = dataend = datastart + rows * step[0]; + } + else + Mat(n, 1, traits::Type<_Tp>::value, (void*)vec.val).copyTo(*this); +} + + +template inline +Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows(m), cols(n), data(0), + datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if( !copyData ) + { + step[0] = cols * sizeof(_Tp); + step[1] = sizeof(_Tp); + datastart = data = (uchar*)M.val; + datalimit = dataend = datastart + rows * step[0]; + } + else + Mat(m, n, traits::Type<_Tp>::value, (uchar*)M.val).copyTo(*this); +} + +template inline +Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows(2), cols(1), data(0), + datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + datastart = data = (uchar*)&pt.x; + datalimit = dataend = datastart + rows * step[0]; + } + else + { + create(2, 1, traits::Type<_Tp>::value); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + +template inline +Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows(3), cols(1), data(0), + datastart(0), dataend(0), datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + datastart = data = (uchar*)&pt.x; + datalimit = dataend = datastart + rows * step[0]; + } + else + { + create(3, 1, traits::Type<_Tp>::value); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + +template inline +Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(0), rows(0), cols(0), data(0), + datastart(0), dataend(0), allocator(0), u(0), size(&rows) +{ + *this = commaInitializer.operator Mat_<_Tp>(); +} + +inline +Mat Mat::row(int y) const +{ + return Mat(*this, Range(y, y + 1), Range::all()); +} + +inline +Mat Mat::col(int x) const +{ + return Mat(*this, Range::all(), Range(x, x + 1)); +} + +inline +Mat Mat::rowRange(int startrow, int endrow) const +{ + return Mat(*this, Range(startrow, endrow), Range::all()); +} + +inline +Mat Mat::rowRange(const Range& r) const +{ + return Mat(*this, r, Range::all()); +} + +inline +Mat Mat::colRange(int startcol, int endcol) const +{ + return Mat(*this, Range::all(), Range(startcol, endcol)); +} + +inline +Mat Mat::colRange(const Range& r) const +{ + return Mat(*this, Range::all(), r); +} + +inline +Mat Mat::operator()( Range _rowRange, Range _colRange ) const +{ + return Mat(*this, _rowRange, _colRange); +} + +inline +Mat Mat::operator()( const Rect& roi ) const +{ + return Mat(*this, roi); +} + +inline +Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline +Mat Mat::operator()(const std::vector& ranges) const +{ + return Mat(*this, ranges); +} + +inline +bool Mat::isContinuous() const +{ + return (flags & CONTINUOUS_FLAG) != 0; +} + +inline +bool Mat::isSubmatrix() const +{ + return (flags & SUBMATRIX_FLAG) != 0; +} + +inline +size_t Mat::elemSize() const +{ + size_t res = dims > 0 ? step.p[dims - 1] : 0; + CV_DbgAssert(res != 0); + return res; +} + +inline +size_t Mat::elemSize1() const +{ + return CV_ELEM_SIZE1(flags); +} + +inline +int Mat::type() const +{ + return CV_MAT_TYPE(flags); +} + +inline +int Mat::depth() const +{ + return CV_MAT_DEPTH(flags); +} + +inline +int Mat::channels() const +{ + return CV_MAT_CN(flags); +} + +inline +uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0] * y; +} + +inline +const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0] * y; +} + +template inline +_Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0] * y); +} + +template inline +const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0] * y); +} + +inline +uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert(dims >= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + return data + i0 * step.p[0] + i1 * step.p[1]; +} + +inline +const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert(dims >= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + return data + i0 * step.p[0] + i1 * step.p[1]; +} + +template inline +_Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert(dims >= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + return (_Tp*)(data + i0 * step.p[0] + i1 * step.p[1]); +} + +template inline +const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert(dims >= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + return (const _Tp*)(data + i0 * step.p[0] + i1 * step.p[1]); +} + +inline +uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert(dims >= 3); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]); + return data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]; +} + +inline +const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert(dims >= 3); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]); + return data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]; +} + +template inline +_Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert(dims >= 3); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]); + return (_Tp*)(data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]); +} + +template inline +const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert(dims >= 3); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert((unsigned)i2 < (unsigned)size.p[2]); + return (const _Tp*)(data + i0 * step.p[0] + i1 * step.p[1] + i2 * step.p[2]); +} + +inline +uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i] * step.p[i]; + } + return p; +} + +inline +const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i] * step.p[i]; + } + return p; +} + +template inline +_Tp* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i] * step.p[i]; + } + return (_Tp*)p; +} + +template inline +const _Tp* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i] * step.p[i]; + } + return (const _Tp*)p; +} + +template inline +uchar* Mat::ptr(const Vec& idx) +{ + return Mat::ptr(idx.val); +} + +template inline +const uchar* Mat::ptr(const Vec& idx) const +{ + return Mat::ptr(idx.val); +} + +template inline +_Tp* Mat::ptr(const Vec& idx) +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return Mat::ptr<_Tp>(idx.val); +} + +template inline +const _Tp* Mat::ptr(const Vec& idx) const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return Mat::ptr<_Tp>(idx.val); +} + + +template inline +_Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels())); + CV_DbgAssert(CV_ELEM_SIZE1(traits::Depth<_Tp>::value) == elemSize1()); + return ((_Tp*)(data + step.p[0] * i0))[i1]; +} + +template inline +const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels())); + CV_DbgAssert(CV_ELEM_SIZE1(traits::Depth<_Tp>::value) == elemSize1()); + return ((const _Tp*)(data + step.p[0] * i0))[i1]; +} + +template inline +_Tp& Mat::at(Point pt) +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)(pt.x * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels())); + CV_DbgAssert(CV_ELEM_SIZE1(traits::Depth<_Tp>::value) == elemSize1()); + return ((_Tp*)(data + step.p[0] * pt.y))[pt.x]; +} + +template inline +const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)(pt.x * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels())); + CV_DbgAssert(CV_ELEM_SIZE1(traits::Depth<_Tp>::value) == elemSize1()); + return ((const _Tp*)(data + step.p[0] * pt.y))[pt.x]; +} + +template inline +_Tp& Mat::at(int i0) +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)(size.p[0] * size.p[1])); + CV_DbgAssert(elemSize() == sizeof(_Tp)); + if( isContinuous() || size.p[0] == 1 ) + return ((_Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(_Tp*)(data + step.p[0] * i0); + int i = i0 / cols, j = i0 - i * cols; + return ((_Tp*)(data + step.p[0] * i))[j]; +} + +template inline +const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)(size.p[0] * size.p[1])); + CV_DbgAssert(elemSize() == sizeof(_Tp)); + if( isContinuous() || size.p[0] == 1 ) + return ((const _Tp*)data)[i0]; + if( size.p[1] == 1 ) + return *(const _Tp*)(data + step.p[0] * i0); + int i = i0 / cols, j = i0 - i * cols; + return ((const _Tp*)(data + step.p[0] * i))[j]; +} + +template inline +_Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(_Tp*)ptr(i0, i1, i2); +} + +template inline +const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(const _Tp*)ptr(i0, i1, i2); +} + +template inline +_Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(_Tp*)ptr(idx); +} + +template inline +const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(const _Tp*)ptr(idx); +} + +template inline +_Tp& Mat::at(const Vec& idx) +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(_Tp*)ptr(idx.val); +} + +template inline +const _Tp& Mat::at(const Vec& idx) const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return *(const _Tp*)ptr(idx.val); +} + +template inline +MatConstIterator_<_Tp> Mat::begin() const +{ + if (empty()) + return MatConstIterator_<_Tp>(); + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline +MatConstIterator_<_Tp> Mat::end() const +{ + if (empty()) + return MatConstIterator_<_Tp>(); + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline +MatIterator_<_Tp> Mat::begin() +{ + if (empty()) + return MatIterator_<_Tp>(); + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline +MatIterator_<_Tp> Mat::end() +{ + if (empty()) + return MatIterator_<_Tp>(); + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline +void Mat::forEach(const Functor& operation) { + this->forEach_impl<_Tp>(operation); +} + +template inline +void Mat::forEach(const Functor& operation) const { + // call as not const + (const_cast(this))->forEach<_Tp>(operation); +} + +template inline +Mat::operator std::vector<_Tp>() const +{ + std::vector<_Tp> v; + copyTo(v); + return v; +} + +#ifdef CV_CXX_STD_ARRAY +template inline +Mat::operator std::array<_Tp, _Nm>() const +{ + std::array<_Tp, _Nm> v; + copyTo(v); + return v; +} +#endif + +template inline +Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == traits::Type<_Tp>::value ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; + Mat tmp(rows, cols, traits::Type<_Tp>::value, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline +Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == traits::Type<_Tp>::value ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; + Mat tmp(rows, cols, traits::Type<_Tp>::value, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + +template inline +void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, traits::Type<_Tp>::value, (void*)&elem).clone(); + return; + } + CV_Assert(traits::Type<_Tp>::value == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + const uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++) * step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline +void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +template<> inline +void Mat::push_back(const MatExpr& expr) +{ + push_back(static_cast(expr)); +} + + +template inline +void Mat::push_back(const std::vector<_Tp>& v) +{ + push_back(Mat(v)); +} + +#ifdef CV_CXX_MOVE_SEMANTICS + +inline +Mat::Mat(Mat&& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), + u(m.u), size(&rows) +{ + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; + m.allocator = NULL; + m.u = NULL; +} + +inline +Mat& Mat::operator = (Mat&& m) +{ + if (this == &m) + return *this; + + release(); + flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data; + datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator; + u = m.u; + if (step.p != step.buf) // release self step/size + { + fastFree(step.p); + step.p = step.buf; + size.p = &rows; + } + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; + m.allocator = NULL; + m.u = NULL; + return *this; +} + +#endif + + +///////////////////////////// MatSize //////////////////////////// + +inline +MatSize::MatSize(int* _p) CV_NOEXCEPT + : p(_p) {} + +inline +int MatSize::dims() const CV_NOEXCEPT +{ + return (p - 1)[0]; +} + +inline +Size MatSize::operator()() const +{ + CV_DbgAssert(dims() <= 2); + return Size(p[1], p[0]); +} + +inline +const int& MatSize::operator[](int i) const +{ + CV_DbgAssert(i < dims()); +#ifdef __OPENCV_BUILD + CV_DbgAssert(i >= 0); +#endif + return p[i]; +} + +inline +int& MatSize::operator[](int i) +{ + CV_DbgAssert(i < dims()); +#ifdef __OPENCV_BUILD + CV_DbgAssert(i >= 0); +#endif + return p[i]; +} + +inline +MatSize::operator const int*() const CV_NOEXCEPT +{ + return p; +} + +inline +bool MatSize::operator != (const MatSize& sz) const CV_NOEXCEPT +{ + return !(*this == sz); +} + + + +///////////////////////////// MatStep //////////////////////////// + +inline +MatStep::MatStep() CV_NOEXCEPT +{ + p = buf; p[0] = p[1] = 0; +} + +inline +MatStep::MatStep(size_t s) CV_NOEXCEPT +{ + p = buf; p[0] = s; p[1] = 0; +} + +inline +const size_t& MatStep::operator[](int i) const CV_NOEXCEPT +{ + return p[i]; +} + +inline +size_t& MatStep::operator[](int i) CV_NOEXCEPT +{ + return p[i]; +} + +inline MatStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} + +inline MatStep& MatStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + + + +////////////////////////////// Mat_<_Tp> //////////////////////////// + +template inline +Mat_<_Tp>::Mat_() CV_NOEXCEPT + : Mat() +{ + flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; +} + +template inline +Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, traits::Type<_Tp>::value) +{ +} + +template inline +Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, traits::Type<_Tp>::value) +{ + *this = value; +} + +template inline +Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, traits::Type<_Tp>::value) +{} + +template inline +Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, traits::Type<_Tp>::value) +{ + *this = value; +} + +template inline +Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, traits::Type<_Tp>::value) +{} + +template inline +Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, traits::Type<_Tp>::value, Scalar(_s)) +{} + +template inline +Mat_<_Tp>::Mat_(int _dims, const int* _sz, _Tp* _data, const size_t* _steps) + : Mat(_dims, _sz, traits::Type<_Tp>::value, _data, _steps) +{} + +template inline +Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) +{} + +template inline +Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const std::vector& ranges) + : Mat(m, ranges) +{} + +template inline +Mat_<_Tp>::Mat_(const Mat& m) + : Mat() +{ + flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; + *this = m; +} + +template inline +Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) +{} + +template inline +Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, traits::Type<_Tp>::value, _data, steps) +{} + +template inline +Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange) + : Mat(m, _rowRange, _colRange) +{} + +template inline +Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) +{} + +template template inline +Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n / DataType<_Tp>::channels, 1, traits::Type<_Tp>::value, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline +Mat_<_Tp>::Mat_(const Matx::channel_type, m, n>& M, bool copyData) + : Mat(m, n / DataType<_Tp>::channels, traits::Type<_Tp>::value, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline +Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2 / DataType<_Tp>::channels, 1, traits::Type<_Tp>::value, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline +Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3 / DataType<_Tp>::channels, 1, traits::Type<_Tp>::value, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline +Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) +{} + +template inline +Mat_<_Tp>::Mat_(const std::vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) +{} + +#ifdef CV_CXX11 +template inline +Mat_<_Tp>::Mat_(std::initializer_list<_Tp> list) + : Mat(list) +{} + +template inline +Mat_<_Tp>::Mat_(const std::initializer_list sizes, std::initializer_list<_Tp> list) + : Mat(sizes, list) +{} +#endif + +#ifdef CV_CXX_STD_ARRAY +template template inline +Mat_<_Tp>::Mat_(const std::array<_Tp, _Nm>& arr, bool copyData) + : Mat(arr, copyData) +{} +#endif + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if (m.empty()) + { + release(); + return *this; + } + if( traits::Type<_Tp>::value == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( traits::Depth<_Tp>::value == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_Assert(DataType<_Tp>::channels == m.channels() || m.empty()); + m.convertTo(*this, type()); + return *this; +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline +void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, traits::Type<_Tp>::value); +} + +template inline +void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, traits::Type<_Tp>::value); +} + +template inline +void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, traits::Type<_Tp>::value); +} + +template inline +void Mat_<_Tp>::release() +{ + Mat::release(); + flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; +} + +template inline +Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ + return Mat_<_Tp>(Mat::cross(m)); +} + +template template inline +Mat_<_Tp>::operator Mat_() const +{ + return Mat_(static_cast(*this)); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ + return Mat_(*this, Range(y, y+1), Range::all()); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ + return Mat_(*this, Range::all(), Range(x, x+1)); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ + return Mat_(Mat::diag(d)); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::clone() const +{ + return Mat_(Mat::clone()); +} + +template inline +size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline +size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp) / DataType<_Tp>::channels ); + return sizeof(_Tp) / DataType<_Tp>::channels; +} + +template inline +int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == traits::Type<_Tp>::value ); + return traits::Type<_Tp>::value; +} + +template inline +int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == traits::Depth<_Tp>::value ); + return traits::Depth<_Tp>::value; +} + +template inline +int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} + +template inline +size_t Mat_<_Tp>::stepT(int i) const +{ + return step.p[i] / elemSize(); +} + +template inline +size_t Mat_<_Tp>::step1(int i) const +{ + return step.p[i] / elemSize1(); +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ + return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const +{ + return Mat_<_Tp>(*this, _rowRange, _colRange); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ + return Mat_<_Tp>(*this, roi); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ + return Mat_<_Tp>(*this, ranges); +} + +template inline +Mat_<_Tp> Mat_<_Tp>::operator()(const std::vector& ranges) const +{ + return Mat_<_Tp>(*this, ranges); +} + +template inline +_Tp* Mat_<_Tp>::operator [](int y) +{ + CV_DbgAssert( 0 <= y && y < size.p[0] ); + return (_Tp*)(data + y*step.p[0]); +} + +template inline +const _Tp* Mat_<_Tp>::operator [](int y) const +{ + CV_DbgAssert( 0 <= y && y < size.p[0] ); + return (const _Tp*)(data + y*step.p[0]); +} + +template inline +_Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert(type() == traits::Type<_Tp>::value); + return ((_Tp*)(data + step.p[0] * i0))[i1]; +} + +template inline +const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)i0 < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)i1 < (unsigned)size.p[1]); + CV_DbgAssert(type() == traits::Type<_Tp>::value); + return ((const _Tp*)(data + step.p[0] * i0))[i1]; +} + +template inline +_Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)pt.x < (unsigned)size.p[1]); + CV_DbgAssert(type() == traits::Type<_Tp>::value); + return ((_Tp*)(data + step.p[0] * pt.y))[pt.x]; +} + +template inline +const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert(dims <= 2); + CV_DbgAssert(data); + CV_DbgAssert((unsigned)pt.y < (unsigned)size.p[0]); + CV_DbgAssert((unsigned)pt.x < (unsigned)size.p[1]); + CV_DbgAssert(type() == traits::Type<_Tp>::value); + return ((const _Tp*)(data + step.p[0] * pt.y))[pt.x]; +} + +template inline +_Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline +const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template template inline +_Tp& Mat_<_Tp>::operator ()(const Vec& idx) +{ + return Mat::at<_Tp>(idx); +} + +template template inline +const _Tp& Mat_<_Tp>::operator ()(const Vec& idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline +_Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline +const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline +_Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline +const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline +Mat_<_Tp>::operator std::vector<_Tp>() const +{ + std::vector<_Tp> v; + copyTo(v); + return v; +} + +#ifdef CV_CXX_STD_ARRAY +template template inline +Mat_<_Tp>::operator std::array<_Tp, _Nm>() const +{ + std::array<_Tp, _Nm> a; + copyTo(a); + return a; +} +#endif + +template template inline +Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + +#if defined _MSC_VER + const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround) + return pMat->operator Vec::channel_type, n>(); +#else + return this->Mat::operator Vec::channel_type, n>(); +#endif +} + +template template inline +Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + +#if defined _MSC_VER + const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround) + Matx::channel_type, m, n> res = pMat->operator Matx::channel_type, m, n>(); + return res; +#else + Matx::channel_type, m, n> res = this->Mat::operator Matx::channel_type, m, n>(); + return res; +#endif +} + +template inline +MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ + return Mat::begin<_Tp>(); +} + +template inline +MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ + return Mat::end<_Tp>(); +} + +template inline +MatIterator_<_Tp> Mat_<_Tp>::begin() +{ + return Mat::begin<_Tp>(); +} + +template inline +MatIterator_<_Tp> Mat_<_Tp>::end() +{ + return Mat::end<_Tp>(); +} + +template template inline +void Mat_<_Tp>::forEach(const Functor& operation) { + Mat::forEach<_Tp, Functor>(operation); +} + +template template inline +void Mat_<_Tp>::forEach(const Functor& operation) const { + Mat::forEach<_Tp, Functor>(operation); +} + +#ifdef CV_CXX_MOVE_SEMANTICS + +template inline +Mat_<_Tp>::Mat_(Mat_&& m) + : Mat(std::move(m)) +{ +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (Mat_&& m) +{ + Mat::operator = (std::move(m)); + return *this; +} + +template inline +Mat_<_Tp>::Mat_(Mat&& m) + : Mat() +{ + flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; + *this = std::move(m); +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (Mat&& m) +{ + if (m.empty()) + { + release(); + return *this; + } + if( traits::Type<_Tp>::value == m.type() ) + { + Mat::operator = ((Mat&&)m); + return *this; + } + if( traits::Depth<_Tp>::value == m.depth() ) + { + Mat::operator = ((Mat&&)m.reshape(DataType<_Tp>::channels, m.dims, 0)); + return *this; + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline +Mat_<_Tp>::Mat_(MatExpr&& e) + : Mat() +{ + flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; + *this = Mat(e); +} + +#endif + +///////////////////////////// SparseMat ///////////////////////////// + +inline +SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + +inline +size_t SparseMat::elemSize() const +{ + return CV_ELEM_SIZE(flags); +} + +inline +size_t SparseMat::elemSize1() const +{ + return CV_ELEM_SIZE1(flags); +} + +inline +int SparseMat::type() const +{ + return CV_MAT_TYPE(flags); +} + +inline +int SparseMat::depth() const +{ + return CV_MAT_DEPTH(flags); +} + +inline +int SparseMat::channels() const +{ + return CV_MAT_CN(flags); +} + +inline +const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline +int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline +int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline +size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +template inline +_Tp& SparseMat::ref(int i0, size_t* hashval) +{ + return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); +} + +template inline +_Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ + return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); +} + +template inline +_Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ + return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); +} + +template inline +_Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ + return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); +} + +template inline +_Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline +_Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline +_Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline +_Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline +const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ + return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); +} + +template inline +const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ + return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); +} + +template inline +const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ + return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); +} + +template inline +const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ + return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); +} + +template inline +_Tp& SparseMat::value(Node* n) +{ + return *(_Tp*)((uchar*)n + hdr->valueOffset); +} + +template inline +const _Tp& SparseMat::value(const Node* n) const +{ + return *(const _Tp*)((const uchar*)n + hdr->valueOffset); +} + +inline +SparseMat::Node* SparseMat::node(size_t nidx) +{ + return (Node*)(void*)&hdr->pool[nidx]; +} + +inline +const SparseMat::Node* SparseMat::node(size_t nidx) const +{ + return (const Node*)(const void*)&hdr->pool[nidx]; +} + +inline +SparseMatIterator SparseMat::begin() +{ + return SparseMatIterator(this); +} + +inline +SparseMatConstIterator SparseMat::begin() const +{ + return SparseMatConstIterator(this); +} + +inline +SparseMatIterator SparseMat::end() +{ + SparseMatIterator it(this); + it.seekEnd(); + return it; +} + +inline +SparseMatConstIterator SparseMat::end() const +{ + SparseMatConstIterator it(this); + it.seekEnd(); + return it; +} + +template inline +SparseMatIterator_<_Tp> SparseMat::begin() +{ + return SparseMatIterator_<_Tp>(this); +} + +template inline +SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ + return SparseMatConstIterator_<_Tp>(this); +} + +template inline +SparseMatIterator_<_Tp> SparseMat::end() +{ + SparseMatIterator_<_Tp> it(this); + it.seekEnd(); + return it; +} + +template inline +SparseMatConstIterator_<_Tp> SparseMat::end() const +{ + SparseMatConstIterator_<_Tp> it(this); + it.seekEnd(); + return it; +} + + + +///////////////////////////// SparseMat_ //////////////////////////// + +template inline +SparseMat_<_Tp>::SparseMat_() +{ + flags = MAGIC_VAL | traits::Type<_Tp>::value; +} + +template inline +SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) + : SparseMat(_dims, _sizes, traits::Type<_Tp>::value) +{} + +template inline +SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == traits::Type<_Tp>::value ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(*this, traits::Type<_Tp>::value); +} + +template inline +SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline +SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline +SparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline +SparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == traits::Type<_Tp>::value ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, traits::Type<_Tp>::value); + return *this; +} + +template inline +SparseMat_<_Tp>& SparseMat_<_Tp>::operator = (const Mat& m) +{ + return (*this = SparseMat(m)); +} + +template inline +SparseMat_<_Tp> SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline +void SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, traits::Type<_Tp>::value); +} + +template inline +int SparseMat_<_Tp>::type() const +{ + return traits::Type<_Tp>::value; +} + +template inline +int SparseMat_<_Tp>::depth() const +{ + return traits::Depth<_Tp>::value; +} + +template inline +int SparseMat_<_Tp>::channels() const +{ + return DataType<_Tp>::channels; +} + +template inline +_Tp& SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ + return SparseMat::ref<_Tp>(i0, hashval); +} + +template inline +_Tp SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ + return SparseMat::value<_Tp>(i0, hashval); +} + +template inline +_Tp& SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ + return SparseMat::ref<_Tp>(i0, i1, hashval); +} + +template inline +_Tp SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ + return SparseMat::value<_Tp>(i0, i1, hashval); +} + +template inline +_Tp& SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ + return SparseMat::ref<_Tp>(i0, i1, i2, hashval); +} + +template inline +_Tp SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ + return SparseMat::value<_Tp>(i0, i1, i2, hashval); +} + +template inline +_Tp& SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ + return SparseMat::ref<_Tp>(idx, hashval); +} + +template inline +_Tp SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ + return SparseMat::value<_Tp>(idx, hashval); +} + +template inline +SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ + return SparseMatIterator_<_Tp>(this); +} + +template inline +SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ + return SparseMatConstIterator_<_Tp>(this); +} + +template inline +SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ + SparseMatIterator_<_Tp> it(this); + it.seekEnd(); + return it; +} + +template inline +SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ + SparseMatConstIterator_<_Tp> it(this); + it.seekEnd(); + return it; +} + + + +////////////////////////// MatConstIterator ///////////////////////// + +inline +MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) +{} + +inline +MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + CV_Assert(!m->empty()); + sliceStart = m->ptr(); + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline +MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + CV_Assert(!m->empty()); + sliceStart = m->ptr(); + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[] = {_row, _col}; + seek(idx); +} + +inline +MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + CV_Assert(!m->empty()); + sliceStart = m->ptr(); + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[] = {_pt.y, _pt.x}; + seek(idx); +} + +inline +MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline +MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline +const uchar* MatConstIterator::operator *() const +{ + return ptr; +} + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline +MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ + return (*this += -ofs); +} + +inline +MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline +MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline +MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + + +static inline +bool operator == (const MatConstIterator& a, const MatConstIterator& b) +{ + return a.m == b.m && a.ptr == b.ptr; +} + +static inline +bool operator != (const MatConstIterator& a, const MatConstIterator& b) +{ + return !(a == b); +} + +static inline +bool operator < (const MatConstIterator& a, const MatConstIterator& b) +{ + return a.ptr < b.ptr; +} + +static inline +bool operator > (const MatConstIterator& a, const MatConstIterator& b) +{ + return a.ptr > b.ptr; +} + +static inline +bool operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ + return a.ptr <= b.ptr; +} + +static inline +bool operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ + return a.ptr >= b.ptr; +} + +static inline +ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a) +{ + if( a.m != b.m ) + return ((size_t)(-1) >> 1); + if( a.sliceEnd == b.sliceEnd ) + return (b.ptr - a.ptr)/static_cast(b.elemSize); + + return b.lpos() - a.lpos(); +} + +static inline +MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ + MatConstIterator b = a; + return b += ofs; +} + +static inline +MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ + MatConstIterator b = a; + return b += ofs; +} + +static inline +MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ + MatConstIterator b = a; + return b += -ofs; +} + + +inline +const uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ + return *(*this + i); +} + + + +///////////////////////// MatConstIterator_ ///////////////////////// + +template inline +MatConstIterator_<_Tp>::MatConstIterator_() +{} + +template inline +MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) +{} + +template inline +MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) +{} + +template inline +MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) +{} + +template inline +MatConstIterator_<_Tp>::MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) +{} + +template inline +MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline +const _Tp& MatConstIterator_<_Tp>::operator *() const +{ + return *(_Tp*)(this->ptr); +} + +template inline +MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline +MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + return (*this += -ofs); +} + +template inline +MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline +MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline +MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline +MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + + +template inline +Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols); + int x = (int)(ofs - (ptrdiff_t)y * m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step); + int x = (int)((ofs - y * m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + + +template static inline +bool operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ + return a.m == b.m && a.ptr == b.ptr; +} + +template static inline +bool operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ + return a.m != b.m || a.ptr != b.ptr; +} + +template static inline +MatConstIterator_<_Tp> operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ + MatConstIterator t = (const MatConstIterator&)a + ofs; + return (MatConstIterator_<_Tp>&)t; +} + +template static inline +MatConstIterator_<_Tp> operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ + MatConstIterator t = (const MatConstIterator&)a + ofs; + return (MatConstIterator_<_Tp>&)t; +} + +template static inline +MatConstIterator_<_Tp> operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ + MatConstIterator t = (const MatConstIterator&)a - ofs; + return (MatConstIterator_<_Tp>&)t; +} + +template inline +const _Tp& MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ + return *(_Tp*)MatConstIterator::operator [](i); +} + + + +//////////////////////////// MatIterator_ /////////////////////////// + +template inline +MatIterator_<_Tp>::MatIterator_() + : MatConstIterator_<_Tp>() +{} + +template inline +MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) +{} + +template inline +MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) +{} + +template inline +MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) +{} + +template inline +MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) +{} + +template inline +MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) +{} + +template inline +MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline +_Tp& MatIterator_<_Tp>::operator *() const +{ + return *(_Tp*)(this->ptr); +} + +template inline +MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline +MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline +MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline +MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline +MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline +MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline +_Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ + return *(*this + i); +} + + +template static inline +bool operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ + return a.m == b.m && a.ptr == b.ptr; +} + +template static inline +bool operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ + return a.m != b.m || a.ptr != b.ptr; +} + +template static inline +MatIterator_<_Tp> operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ + MatConstIterator t = (const MatConstIterator&)a + ofs; + return (MatIterator_<_Tp>&)t; +} + +template static inline +MatIterator_<_Tp> operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ + MatConstIterator t = (const MatConstIterator&)a + ofs; + return (MatIterator_<_Tp>&)t; +} + +template static inline +MatIterator_<_Tp> operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ + MatConstIterator t = (const MatConstIterator&)a - ofs; + return (MatIterator_<_Tp>&)t; +} + + + +/////////////////////// SparseMatConstIterator ////////////////////// + +inline +SparseMatConstIterator::SparseMatConstIterator() + : m(0), hashidx(0), ptr(0) +{} + +inline +SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) + : m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{} + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline +const _Tp& SparseMatConstIterator::value() const +{ + return *(const _Tp*)ptr; +} + +inline +const SparseMat::Node* SparseMatConstIterator::node() const +{ + return (ptr && m && m->hdr) ? (const SparseMat::Node*)(const void*)(ptr - m->hdr->valueOffset) : 0; +} + +inline +SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + +inline +void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + + +static inline +bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ + return it1.m == it2.m && it1.ptr == it2.ptr; +} + +static inline +bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ + return !(it1 == it2); +} + + + +///////////////////////// SparseMatIterator ///////////////////////// + +inline +SparseMatIterator::SparseMatIterator() +{} + +inline +SparseMatIterator::SparseMatIterator(SparseMat* _m) + : SparseMatConstIterator(_m) +{} + +inline +SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) + : SparseMatConstIterator(it) +{} + +inline +SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline +_Tp& SparseMatIterator::value() const +{ + return *(_Tp*)ptr; +} + +inline +SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline +SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline +SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + + +////////////////////// SparseMatConstIterator_ ////////////////////// + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) + : SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat* _m) + : SparseMatConstIterator(_m) +{ + CV_Assert( _m->type() == traits::Type<_Tp>::value ); +} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) + : SparseMatConstIterator(it) +{} + +template inline +SparseMatConstIterator_<_Tp>& SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ + return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); +} + +template inline +const _Tp& SparseMatConstIterator_<_Tp>::operator *() const +{ + return *(const _Tp*)this->ptr; +} + +template inline +SparseMatConstIterator_<_Tp>& SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline +SparseMatConstIterator_<_Tp> SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator_<_Tp> it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + + + +///////////////////////// SparseMatIterator_ //////////////////////// + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) + : SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat* _m) + : SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) + : SparseMatConstIterator_<_Tp>(it) +{} + +template inline +SparseMatIterator_<_Tp>& SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ + return reinterpret_cast&> + (*reinterpret_cast(this) = + reinterpret_cast(it)); +} + +template inline +_Tp& SparseMatIterator_<_Tp>::operator *() const +{ + return *(_Tp*)this->ptr; +} + +template inline +SparseMatIterator_<_Tp>& SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline +SparseMatIterator_<_Tp> SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator_<_Tp> it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + + + +//////////////////////// MatCommaInitializer_ /////////////////////// + +template inline +MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) + : it(_m) +{} + +template template inline +MatCommaInitializer_<_Tp>& MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); + ++this->it; + return *this; +} + +template inline +MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + + +template static inline +MatCommaInitializer_<_Tp> operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + + + +///////////////////////// Matrix Expressions //////////////////////// + +inline +Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline +Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, traits::Type<_Tp>::value); +} + +template inline +Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, traits::Type<_Tp>::value); + return *this; +} + +template inline +MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, traits::Type<_Tp>::value); +} + +template inline +MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, traits::Type<_Tp>::value); +} + +template inline +MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, traits::Type<_Tp>::value); +} + +template inline +MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, traits::Type<_Tp>::value); +} + +template inline +MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, traits::Type<_Tp>::value); +} + +template inline +MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, traits::Type<_Tp>::value); +} + +inline +MatExpr::MatExpr() + : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s() +{} + +inline +MatExpr::MatExpr(const MatOp* _op, int _flags, const Mat& _a, const Mat& _b, + const Mat& _c, double _alpha, double _beta, const Scalar& _s) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) +{} + +inline +MatExpr::operator Mat() const +{ + Mat m; + op->assign(*this, m); + return m; +} + +template inline +MatExpr::operator Mat_<_Tp>() const +{ + Mat_<_Tp> m; + op->assign(*this, m, traits::Type<_Tp>::value); + return m; +} + + +template static inline +MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline +MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline +MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline +MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline +MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline +MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline +MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + + +static inline +Mat& operator += (Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, a); + return a; +} + +static inline +const Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return a; +} + +template static inline +Mat_<_Tp>& operator += (Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, a); + return a; +} + +template static inline +const Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return a; +} + +static inline +Mat& operator -= (Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, a); + return a; +} + +static inline +const Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return a; +} + +template static inline +Mat_<_Tp>& operator -= (Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, a); + return a; +} + +template static inline +const Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return a; +} + +static inline +Mat& operator *= (Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, a); + return a; +} + +static inline +const Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return a; +} + +template static inline +Mat_<_Tp>& operator *= (Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, a); + return a; +} + +template static inline +const Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return a; +} + +static inline +Mat& operator /= (Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, a); + return a; +} + +static inline +const Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return a; +} + +template static inline +Mat_<_Tp>& operator /= (Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, a); + return a; +} + +template static inline +const Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return a; +} + + +//////////////////////////////// UMat //////////////////////////////// + +template inline +UMat::UMat(const std::vector<_Tp>& vec, bool copyData) +: flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()), +cols(1), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + // !!!TODO!!! + CV_Error(Error::StsNotImplemented, ""); + } + else + Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this); +} + +inline +UMat UMat::row(int y) const +{ + return UMat(*this, Range(y, y + 1), Range::all()); +} + +inline +UMat UMat::col(int x) const +{ + return UMat(*this, Range::all(), Range(x, x + 1)); +} + +inline +UMat UMat::rowRange(int startrow, int endrow) const +{ + return UMat(*this, Range(startrow, endrow), Range::all()); +} + +inline +UMat UMat::rowRange(const Range& r) const +{ + return UMat(*this, r, Range::all()); +} + +inline +UMat UMat::colRange(int startcol, int endcol) const +{ + return UMat(*this, Range::all(), Range(startcol, endcol)); +} + +inline +UMat UMat::colRange(const Range& r) const +{ + return UMat(*this, Range::all(), r); +} + +inline +UMat UMat::operator()( Range _rowRange, Range _colRange ) const +{ + return UMat(*this, _rowRange, _colRange); +} + +inline +UMat UMat::operator()( const Rect& roi ) const +{ + return UMat(*this, roi); +} + +inline +UMat UMat::operator()(const Range* ranges) const +{ + return UMat(*this, ranges); +} + +inline +UMat UMat::operator()(const std::vector& ranges) const +{ + return UMat(*this, ranges); +} + +inline +bool UMat::isContinuous() const +{ + return (flags & CONTINUOUS_FLAG) != 0; +} + +inline +bool UMat::isSubmatrix() const +{ + return (flags & SUBMATRIX_FLAG) != 0; +} + +inline +size_t UMat::elemSize() const +{ + size_t res = dims > 0 ? step.p[dims - 1] : 0; + CV_DbgAssert(res != 0); + return res; +} + +inline +size_t UMat::elemSize1() const +{ + return CV_ELEM_SIZE1(flags); +} + +inline +int UMat::type() const +{ + return CV_MAT_TYPE(flags); +} + +inline +int UMat::depth() const +{ + return CV_MAT_DEPTH(flags); +} + +inline +int UMat::channels() const +{ + return CV_MAT_CN(flags); +} + +inline +size_t UMat::step1(int i) const +{ + return step.p[i] / elemSize1(); +} + +#ifdef CV_CXX_MOVE_SEMANTICS + +inline +UMat::UMat(UMat&& m) +: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator), + usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows) +{ + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.allocator = NULL; + m.u = NULL; + m.offset = 0; +} + +inline +UMat& UMat::operator = (UMat&& m) +{ + if (this == &m) + return *this; + release(); + flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; + allocator = m.allocator; usageFlags = m.usageFlags; + u = m.u; + offset = m.offset; + if (step.p != step.buf) // release self step/size + { + fastFree(step.p); + step.p = step.buf; + size.p = &rows; + } + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.allocator = NULL; + m.u = NULL; + m.offset = 0; + return *this; +} + +#endif + + +inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; } +inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; } +inline bool UMatData::deviceMemMapped() const { return (flags & DEVICE_MEM_MAPPED) != 0; } +inline bool UMatData::copyOnMap() const { return (flags & COPY_ON_MAP) != 0; } +inline bool UMatData::tempUMat() const { return (flags & TEMP_UMAT) != 0; } +inline bool UMatData::tempCopiedUMat() const { return (flags & TEMP_COPIED_UMAT) == TEMP_COPIED_UMAT; } + +inline void UMatData::markDeviceMemMapped(bool flag) +{ + if(flag) + flags |= DEVICE_MEM_MAPPED; + else + flags &= ~DEVICE_MEM_MAPPED; +} + +inline void UMatData::markHostCopyObsolete(bool flag) +{ + if(flag) + flags |= HOST_COPY_OBSOLETE; + else + flags &= ~HOST_COPY_OBSOLETE; +} +inline void UMatData::markDeviceCopyObsolete(bool flag) +{ + if(flag) + flags |= DEVICE_COPY_OBSOLETE; + else + flags &= ~DEVICE_COPY_OBSOLETE; +} + +//! @endcond + +static inline +void swap(MatExpr& a, MatExpr& b) { a.swap(b); } + +} //cv + +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + +#ifdef CV_DISABLE_CLANG_ENUM_WARNINGS +#undef CV_DISABLE_CLANG_ENUM_WARNINGS +#pragma clang diagnostic pop +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/matx.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/matx.hpp new file mode 100644 index 00000000..f25c8bce --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/matx.hpp @@ -0,0 +1,1518 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_MATX_HPP +#define OPENCV_CORE_MATX_HPP + +#ifndef __cplusplus +# error matx.hpp header must be compiled as C++ +#endif + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/base.hpp" +#include "opencv2/core/traits.hpp" +#include "opencv2/core/saturate.hpp" + +#ifdef CV_CXX11 +#include +#endif + +namespace cv +{ + +//! @addtogroup core_basic +//! @{ + +////////////////////////////// Small Matrix /////////////////////////// + +//! @cond IGNORED +// FIXIT Remove this (especially CV_EXPORTS modifier) +struct CV_EXPORTS Matx_AddOp { Matx_AddOp() {} Matx_AddOp(const Matx_AddOp&) {} }; +struct CV_EXPORTS Matx_SubOp { Matx_SubOp() {} Matx_SubOp(const Matx_SubOp&) {} }; +struct CV_EXPORTS Matx_ScaleOp { Matx_ScaleOp() {} Matx_ScaleOp(const Matx_ScaleOp&) {} }; +struct CV_EXPORTS Matx_MulOp { Matx_MulOp() {} Matx_MulOp(const Matx_MulOp&) {} }; +struct CV_EXPORTS Matx_DivOp { Matx_DivOp() {} Matx_DivOp(const Matx_DivOp&) {} }; +struct CV_EXPORTS Matx_MatMulOp { Matx_MatMulOp() {} Matx_MatMulOp(const Matx_MatMulOp&) {} }; +struct CV_EXPORTS Matx_TOp { Matx_TOp() {} Matx_TOp(const Matx_TOp&) {} }; +//! @endcond + +/** @brief Template class for small matrices whose type and size are known at compilation time + +If you need a more flexible type, use Mat . The elements of the matrix M are accessible using the +M(i,j) notation. Most of the common matrix operations (see also @ref MatrixExpressions ) are +available. To do an operation on Matx that is not implemented, you can easily convert the matrix to +Mat and backwards: +@code{.cpp} + Matx33f m(1, 2, 3, + 4, 5, 6, + 7, 8, 9); + cout << sum(Mat(m*m.t())) << endl; +@endcode +Except of the plain constructor which takes a list of elements, Matx can be initialized from a C-array: +@code{.cpp} + float values[] = { 1, 2, 3}; + Matx31f m(values); +@endcode +In case if C++11 features are available, std::initializer_list can be also used to initialize Matx: +@code{.cpp} + Matx31f m = { 1, 2, 3}; +@endcode + */ +template class Matx +{ +public: + enum { + rows = m, + cols = n, + channels = rows*cols, +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + depth = traits::Type<_Tp>::value, + type = CV_MAKETYPE(depth, channels), +#endif + shortdim = (m < n ? m : n) + }; + + typedef _Tp value_type; + typedef Matx<_Tp, m, n> mat_type; + typedef Matx<_Tp, shortdim, 1> diag_type; + + //! default constructor + Matx(); + + explicit Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13); //!< 1x14, 2x7, 7x2 or 14x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + +#ifdef CV_CXX11 + Matx(std::initializer_list<_Tp>); //!< initialize from an initializer list +#endif + + CV_NODISCARD_STD static Matx all(_Tp alpha); + CV_NODISCARD_STD static Matx zeros(); + CV_NODISCARD_STD static Matx ones(); + CV_NODISCARD_STD static Matx eye(); + CV_NODISCARD_STD static Matx diag(const diag_type& d); + /** @brief Generates uniformly distributed random numbers + @param a Range boundary. + @param b The other range boundary (boundaries don't have to be ordered, the lower boundary is inclusive, + the upper one is exclusive). + */ + CV_NODISCARD_STD static Matx randu(_Tp a, _Tp b); + /** @brief Generates normally distributed random numbers + @param a Mean value. + @param b Standard deviation. + */ + CV_NODISCARD_STD static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! conversion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int base_row, int base_col) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + diag_type diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU, bool *p_is_ok = NULL) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! divide two matrices element-wise + Matx<_Tp, m, n> div(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int row, int col) const; + _Tp& operator ()(int row, int col); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_DivOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + +/*! + traits +*/ +template class DataType< Matx<_Tp, m, n> > +{ +public: + typedef Matx<_Tp, m, n> value_type; + typedef Matx::work_type, m, n> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + + enum { generic_type = 0, + channels = m * n, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; +}; + +namespace traits { +template +struct Depth< Matx<_Tp, m, n> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Matx<_Tp, m, n> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, n*m) }; }; +} // namespace + + +/** @brief Comma-separated Matrix Initializer +*/ +template class MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +/* + Utility methods +*/ +template static double determinant(const Matx<_Tp, m, m>& a); +template static double trace(const Matx<_Tp, m, n>& a); +template static double norm(const Matx<_Tp, m, n>& M); +template static double norm(const Matx<_Tp, m, n>& M, int normType); + + + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/** @brief Template class for short numerical vectors, a partial case of Matx + +This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) on which you +can perform basic arithmetical operations, access individual elements using [] operator etc. The +vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., which +elements are dynamically allocated in the heap. + +The template takes 2 parameters: +@tparam _Tp element type +@tparam cn the number of elements + +In addition to the universal notation like Vec, you can use shorter aliases +for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + +It is possible to convert Vec\ to/from Point_, Vec\ to/from Point3_ , and Vec\ +to CvScalar or Scalar_. Use operator[] to access the elements of Vec. + +All the expected vector operations are also implemented: +- v1 = v2 + v3 +- v1 = v2 - v3 +- v1 = v2 \* scale +- v1 = scale \* v2 +- v1 = -v2 +- v1 += v2 and other augmenting operations +- v1 == v2, v1 != v2 +- norm(v1) (euclidean norm) +The Vec class is commonly used to describe pixel types of multi-channel arrays. See Mat for details. +*/ +template class Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { + channels = cn, +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + depth = Matx<_Tp, cn, 1>::depth, + type = CV_MAKETYPE(depth, channels), +#endif + _dummy_enum_finalizer = 0 + }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13); //!< 14-element vector constructor + explicit Vec(const _Tp* values); + +#ifdef CV_CXX11 + Vec(std::initializer_list<_Tp>); +#endif + + Vec(const Vec<_Tp, cn>& v); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + //! conjugation (makes sense for complex numbers and quaternions) + Vec conj() const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! conversion to another data type + template operator Vec() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + +#ifdef CV_CXX11 + Vec<_Tp, cn>& operator=(const Vec<_Tp, cn>& rhs) = default; +#endif + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + +/** @name Shorter aliases for the most popular specializations of Vec + @{ +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; +/** @} */ + +/*! + traits +*/ +template class DataType< Vec<_Tp, cn> > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + + enum { generic_type = 0, + channels = cn, + fmt = DataType::fmt + ((channels - 1) << 8), +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + depth = DataType::depth, + type = CV_MAKETYPE(depth, channels), +#endif + _dummy_enum_finalizer = 0 + }; +}; + +namespace traits { +template +struct Depth< Vec<_Tp, cn> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Vec<_Tp, cn> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, cn) }; }; +} // namespace + + +/** @brief Comma-separated Vec Initializer +*/ +template class VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +template static Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v); + +//! @} core_basic + +//! @cond IGNORED + +///////////////////////////////////// helper classes ///////////////////////////////////// +namespace internal +{ + +template struct Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return p; + } +}; + +template struct Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + +template struct Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + +template struct Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template Vec<_Tp, 2> inline conjugate(const Vec<_Tp, 2>& v) +{ + return Vec<_Tp, 2>(v[0], -v[1]); +} + +template Vec<_Tp, 4> inline conjugate(const Vec<_Tp, 4>& v) +{ + return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]); +} + +} // internal + + + +////////////////////////////////// Matx Implementation /////////////////////////////////// + +template inline +Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + CV_StaticAssert(channels >= 2, "Matx should have at least 2 elements."); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + CV_StaticAssert(channels >= 3, "Matx should have at least 3 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + CV_StaticAssert(channels >= 4, "Matx should have at least 4 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + CV_StaticAssert(channels >= 5, "Matx should have at least 5 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) +{ + CV_StaticAssert(channels >= 6, "Matx should have at least 6 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6) +{ + CV_StaticAssert(channels >= 7, "Matx should have at least 7 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + CV_StaticAssert(channels >= 8, "Matx should have at least 8 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8) +{ + CV_StaticAssert(channels >= 9, "Matx should have at least 9 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9) +{ + CV_StaticAssert(channels >= 10, "Matx should have at least 10 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template inline +Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + CV_StaticAssert(channels >= 12, "Matx should have at least 12 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + for(int i = 12; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13) +{ + CV_StaticAssert(channels >= 14, "Matx should have at least 14 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; + for (int i = 14; i < channels; i++) val[i] = _Tp(0); +} + + +template inline +Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + CV_StaticAssert(channels >= 16, "Matx should have at least 16 elements."); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; + for(int i = 16; i < channels; i++) val[i] = _Tp(0); +} + +template inline +Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +#ifdef CV_CXX11 +template inline +Matx<_Tp, m, n>::Matx(std::initializer_list<_Tp> list) +{ + CV_DbgAssert(list.size() == channels); + int i = 0; + for(const auto& elem : list) + { + val[i++] = elem; + } +} +#endif + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < shortdim; i++) + M(i,i) = 1; + return M; +} + +template inline +_Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < channels; i++ ) s += val[i]*M.val[i]; + return s; +} + +template inline +double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < channels; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < shortdim; i++) + M(i,i) = d(i, 0); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_StaticAssert(m1*n1 == m*n, "Input and destnarion matrices must have the same number of elements"); + return (const Matx<_Tp, m1, n1>&)*this; +} + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int base_row, int base_col) const +{ + CV_DbgAssert(0 <= base_row && base_row+m1 <= m && 0 <= base_col && base_col+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(base_row+di, base_col+dj); + return s; +} + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v.val[i] = val[i*n + j]; + return v; +} + +template inline +typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < shortdim; i++ ) + d.val[i] = val[i*n + i]; + return d; +} + +template inline +const _Tp& Matx<_Tp, m, n>::operator()(int row_idx, int col_idx) const +{ + CV_DbgAssert( (unsigned)row_idx < (unsigned)m && (unsigned)col_idx < (unsigned)n ); + return this->val[row_idx*n + col_idx]; +} + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int row_idx, int col_idx) +{ + CV_DbgAssert( (unsigned)row_idx < (unsigned)m && (unsigned)col_idx < (unsigned)n ); + return val[row_idx*n + col_idx]; +} + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_StaticAssert(m == 1 || n == 1, "Single index indexation requires matrix to be a column or a row"); + CV_DbgAssert( (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_StaticAssert(m == 1 || n == 1, "Single index indexation requires matrix to be a column or a row"); + CV_DbgAssert( (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < channels; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < channels; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < channels; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < channels; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_DivOp) +{ + for( int i = 0; i < channels; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] / b.val[i]); +} + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::div(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_DivOp()); +} + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + +template inline +Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const +{ + Matx<_Tp, n, 1> x = solve((const Matx<_Tp, m, 1>&)(rhs), method); + return (Vec<_Tp, n>&)(x); +} + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return cv::internal::Matx_DetOp<_Tp, m>()(a); +} + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n)); +} + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + switch(normType) { + case NORM_INF: + return (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n); + case NORM_L1: + return (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n); + case NORM_L2SQR: + return (double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n); + default: + case NORM_L2: + return std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n)); + } +} + + + +//////////////////////////////// matx comma initializer ////////////////////////////////// + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + + + +/////////////////////////////////// Vec Implementation /////////////////////////////////// + +template inline +Vec<_Tp, cn>::Vec() {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) {} + +template inline +Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) {} + +template inline +Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) {} + +#ifdef CV_CXX11 +template inline +Vec<_Tp, cn>::Vec(std::initializer_list<_Tp> list) + : Matx<_Tp, cn, 1>(list) {} +#endif + +template inline +Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) {} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) + : Matx<_Tp, cn, 1>(a, b, op) {} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) + : Matx<_Tp, cn, 1>(a, b, op) {} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) + : Matx<_Tp, cn, 1>(a, alpha, op) {} + +template inline +Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline +Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template<> inline +Vec Vec::conj() const +{ + return cv::internal::conjugate(*this); +} + +template<> inline +Vec Vec::conj() const +{ + return cv::internal::conjugate(*this); +} + +template<> inline +Vec Vec::conj() const +{ + return cv::internal::conjugate(*this); +} + +template<> inline +Vec Vec::conj() const +{ + return cv::internal::conjugate(*this); +} + +template inline +Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const +{ + CV_StaticAssert(cn == 3, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template<> inline +Vec Vec::cross(const Vec& v) const +{ + return Vec(this->val[1]*v.val[2] - this->val[2]*v.val[1], + this->val[2]*v.val[0] - this->val[0]*v.val[2], + this->val[0]*v.val[1] - this->val[1]*v.val[0]); +} + +template<> inline +Vec Vec::cross(const Vec& v) const +{ + return Vec(this->val[1]*v.val[2] - this->val[2]*v.val[1], + this->val[2]*v.val[0] - this->val[0]*v.val[2], + this->val[0]*v.val[1] - this->val[1]*v.val[0]); +} + +template template inline +Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline +const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline +_Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline +const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline +_Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline +Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v) +{ + double nv = norm(v); + return v * (nv ? 1./nv : 0.); +} + + + +//////////////////////////////// vec comma initializer ////////////////////////////////// + + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//! @endcond + +///////////////////////////// Matx out-of-class operators //////////////////////////////// + +//! @relates cv::Matx +//! @{ + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n>& operator /= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = a.val[i] / alpha; + return a; +} + +template static inline +Matx<_Tp, m, n>& operator /= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = a.val[i] / alpha; + return a; +} + +template static inline +Matx<_Tp, m, n> operator / (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, 1.f/alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator / (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + +template static inline +Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b) +{ + Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp()); + return (const Vec<_Tp, m>&)(c); +} + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + +//! @} + +////////////////////////////// Vec out-of-class operators //////////////////////////////// + +//! @relates cv::Vec +//! @{ + +template static inline +Vec<_Tp1, cn>& operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline +Vec<_Tp1, cn>& operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline +Vec<_Tp, cn> operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline +Vec<_Tp, cn> operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha) +{ + float ialpha = 1.f/alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha) +{ + double ialpha = 1./alpha; + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*ialpha); + return a; +} + +template static inline +Vec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator / (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp()); +} + +template static inline +Vec<_Tp, cn> operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]), + saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]), + saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]), + saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0])); +} + +template inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2) +{ + v1 = v1 * v2; + return v1; +} + +//! @} + +} // cv + +#endif // OPENCV_CORE_MATX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/neon_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/neon_utils.hpp new file mode 100644 index 00000000..573ba99e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/neon_utils.hpp @@ -0,0 +1,128 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HAL_NEON_UTILS_HPP +#define OPENCV_HAL_NEON_UTILS_HPP + +#include "opencv2/core/cvdef.h" + +//! @addtogroup core_utils_neon +//! @{ + +#if CV_NEON + +inline int32x2_t cv_vrnd_s32_f32(float32x2_t v) +{ + static int32x2_t v_sign = vdup_n_s32(1 << 31), + v_05 = vreinterpret_s32_f32(vdup_n_f32(0.5f)); + + int32x2_t v_addition = vorr_s32(v_05, vand_s32(v_sign, vreinterpret_s32_f32(v))); + return vcvt_s32_f32(vadd_f32(v, vreinterpret_f32_s32(v_addition))); +} + +inline int32x4_t cv_vrndq_s32_f32(float32x4_t v) +{ + static int32x4_t v_sign = vdupq_n_s32(1 << 31), + v_05 = vreinterpretq_s32_f32(vdupq_n_f32(0.5f)); + + int32x4_t v_addition = vorrq_s32(v_05, vandq_s32(v_sign, vreinterpretq_s32_f32(v))); + return vcvtq_s32_f32(vaddq_f32(v, vreinterpretq_f32_s32(v_addition))); +} + +inline uint32x2_t cv_vrnd_u32_f32(float32x2_t v) +{ + static float32x2_t v_05 = vdup_n_f32(0.5f); + return vcvt_u32_f32(vadd_f32(v, v_05)); +} + +inline uint32x4_t cv_vrndq_u32_f32(float32x4_t v) +{ + static float32x4_t v_05 = vdupq_n_f32(0.5f); + return vcvtq_u32_f32(vaddq_f32(v, v_05)); +} + +inline float32x4_t cv_vrecpq_f32(float32x4_t val) +{ + float32x4_t reciprocal = vrecpeq_f32(val); + reciprocal = vmulq_f32(vrecpsq_f32(val, reciprocal), reciprocal); + reciprocal = vmulq_f32(vrecpsq_f32(val, reciprocal), reciprocal); + return reciprocal; +} + +inline float32x2_t cv_vrecp_f32(float32x2_t val) +{ + float32x2_t reciprocal = vrecpe_f32(val); + reciprocal = vmul_f32(vrecps_f32(val, reciprocal), reciprocal); + reciprocal = vmul_f32(vrecps_f32(val, reciprocal), reciprocal); + return reciprocal; +} + +inline float32x4_t cv_vrsqrtq_f32(float32x4_t val) +{ + float32x4_t e = vrsqrteq_f32(val); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e); + e = vmulq_f32(vrsqrtsq_f32(vmulq_f32(e, e), val), e); + return e; +} + +inline float32x2_t cv_vrsqrt_f32(float32x2_t val) +{ + float32x2_t e = vrsqrte_f32(val); + e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e); + e = vmul_f32(vrsqrts_f32(vmul_f32(e, e), val), e); + return e; +} + +inline float32x4_t cv_vsqrtq_f32(float32x4_t val) +{ + return cv_vrecpq_f32(cv_vrsqrtq_f32(val)); +} + +inline float32x2_t cv_vsqrt_f32(float32x2_t val) +{ + return cv_vrecp_f32(cv_vrsqrt_f32(val)); +} + +#endif + +//! @} + +#endif // OPENCV_HAL_NEON_UTILS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/ocl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/ocl.hpp new file mode 100644 index 00000000..b51b3935 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/ocl.hpp @@ -0,0 +1,864 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OPENCL_HPP +#define OPENCV_OPENCL_HPP + +#include "opencv2/core.hpp" + +namespace cv { namespace ocl { + +//! @addtogroup core_opencl +//! @{ + +CV_EXPORTS_W bool haveOpenCL(); +CV_EXPORTS_W bool useOpenCL(); +CV_EXPORTS_W bool haveAmdBlas(); +CV_EXPORTS_W bool haveAmdFft(); +CV_EXPORTS_W void setUseOpenCL(bool flag); +CV_EXPORTS_W void finish(); + +CV_EXPORTS bool haveSVM(); + +class CV_EXPORTS Context; +class CV_EXPORTS_W_SIMPLE Device; +class CV_EXPORTS Kernel; +class CV_EXPORTS Program; +class CV_EXPORTS ProgramSource; +class CV_EXPORTS Queue; +class CV_EXPORTS PlatformInfo; +class CV_EXPORTS Image2D; + +class CV_EXPORTS_W_SIMPLE Device +{ +public: + CV_WRAP Device() CV_NOEXCEPT; + explicit Device(void* d); + Device(const Device& d); + Device& operator = (const Device& d); + CV_WRAP ~Device(); + + void set(void* d); + + enum + { + TYPE_DEFAULT = (1 << 0), + TYPE_CPU = (1 << 1), + TYPE_GPU = (1 << 2), + TYPE_ACCELERATOR = (1 << 3), + TYPE_DGPU = TYPE_GPU + (1 << 16), + TYPE_IGPU = TYPE_GPU + (1 << 17), + TYPE_ALL = 0xFFFFFFFF + }; + + CV_WRAP String name() const; + CV_WRAP String extensions() const; + CV_WRAP bool isExtensionSupported(const String& extensionName) const; + CV_WRAP String version() const; + CV_WRAP String vendorName() const; + CV_WRAP String OpenCL_C_Version() const; + CV_WRAP String OpenCLVersion() const; + CV_WRAP int deviceVersionMajor() const; + CV_WRAP int deviceVersionMinor() const; + CV_WRAP String driverVersion() const; + void* ptr() const; + + CV_WRAP int type() const; + + CV_WRAP int addressBits() const; + CV_WRAP bool available() const; + CV_WRAP bool compilerAvailable() const; + CV_WRAP bool linkerAvailable() const; + + enum + { + FP_DENORM=(1 << 0), + FP_INF_NAN=(1 << 1), + FP_ROUND_TO_NEAREST=(1 << 2), + FP_ROUND_TO_ZERO=(1 << 3), + FP_ROUND_TO_INF=(1 << 4), + FP_FMA=(1 << 5), + FP_SOFT_FLOAT=(1 << 6), + FP_CORRECTLY_ROUNDED_DIVIDE_SQRT=(1 << 7) + }; + CV_WRAP int doubleFPConfig() const; + CV_WRAP int singleFPConfig() const; + CV_WRAP int halfFPConfig() const; + + CV_WRAP bool endianLittle() const; + CV_WRAP bool errorCorrectionSupport() const; + + enum + { + EXEC_KERNEL=(1 << 0), + EXEC_NATIVE_KERNEL=(1 << 1) + }; + CV_WRAP int executionCapabilities() const; + + CV_WRAP size_t globalMemCacheSize() const; + + enum + { + NO_CACHE=0, + READ_ONLY_CACHE=1, + READ_WRITE_CACHE=2 + }; + CV_WRAP int globalMemCacheType() const; + CV_WRAP int globalMemCacheLineSize() const; + CV_WRAP size_t globalMemSize() const; + + CV_WRAP size_t localMemSize() const; + enum + { + NO_LOCAL_MEM=0, + LOCAL_IS_LOCAL=1, + LOCAL_IS_GLOBAL=2 + }; + CV_WRAP int localMemType() const; + CV_WRAP bool hostUnifiedMemory() const; + + CV_WRAP bool imageSupport() const; + + CV_WRAP bool imageFromBufferSupport() const; + uint imagePitchAlignment() const; + uint imageBaseAddressAlignment() const; + + /// deprecated, use isExtensionSupported() method (probably with "cl_khr_subgroups" value) + CV_WRAP bool intelSubgroupsSupport() const; + + CV_WRAP size_t image2DMaxWidth() const; + CV_WRAP size_t image2DMaxHeight() const; + + CV_WRAP size_t image3DMaxWidth() const; + CV_WRAP size_t image3DMaxHeight() const; + CV_WRAP size_t image3DMaxDepth() const; + + CV_WRAP size_t imageMaxBufferSize() const; + CV_WRAP size_t imageMaxArraySize() const; + + enum + { + UNKNOWN_VENDOR=0, + VENDOR_AMD=1, + VENDOR_INTEL=2, + VENDOR_NVIDIA=3 + }; + CV_WRAP int vendorID() const; + // FIXIT + // dev.isAMD() doesn't work for OpenCL CPU devices from AMD OpenCL platform. + // This method should use platform name instead of vendor name. + // After fix restore code in arithm.cpp: ocl_compare() + CV_WRAP inline bool isAMD() const { return vendorID() == VENDOR_AMD; } + CV_WRAP inline bool isIntel() const { return vendorID() == VENDOR_INTEL; } + CV_WRAP inline bool isNVidia() const { return vendorID() == VENDOR_NVIDIA; } + + CV_WRAP int maxClockFrequency() const; + CV_WRAP int maxComputeUnits() const; + CV_WRAP int maxConstantArgs() const; + CV_WRAP size_t maxConstantBufferSize() const; + + CV_WRAP size_t maxMemAllocSize() const; + CV_WRAP size_t maxParameterSize() const; + + CV_WRAP int maxReadImageArgs() const; + CV_WRAP int maxWriteImageArgs() const; + CV_WRAP int maxSamplers() const; + + CV_WRAP size_t maxWorkGroupSize() const; + CV_WRAP int maxWorkItemDims() const; + void maxWorkItemSizes(size_t*) const; + + CV_WRAP int memBaseAddrAlign() const; + + CV_WRAP int nativeVectorWidthChar() const; + CV_WRAP int nativeVectorWidthShort() const; + CV_WRAP int nativeVectorWidthInt() const; + CV_WRAP int nativeVectorWidthLong() const; + CV_WRAP int nativeVectorWidthFloat() const; + CV_WRAP int nativeVectorWidthDouble() const; + CV_WRAP int nativeVectorWidthHalf() const; + + CV_WRAP int preferredVectorWidthChar() const; + CV_WRAP int preferredVectorWidthShort() const; + CV_WRAP int preferredVectorWidthInt() const; + CV_WRAP int preferredVectorWidthLong() const; + CV_WRAP int preferredVectorWidthFloat() const; + CV_WRAP int preferredVectorWidthDouble() const; + CV_WRAP int preferredVectorWidthHalf() const; + + CV_WRAP size_t printfBufferSize() const; + CV_WRAP size_t profilingTimerResolution() const; + + CV_WRAP static const Device& getDefault(); + +protected: + struct Impl; + Impl* p; +}; + + +class CV_EXPORTS Context +{ +public: + Context() CV_NOEXCEPT; + explicit Context(int dtype); + ~Context(); + Context(const Context& c); + Context& operator = (const Context& c); + + bool create(); + bool create(int dtype); + size_t ndevices() const; + const Device& device(size_t idx) const; + Program getProg(const ProgramSource& prog, + const String& buildopt, String& errmsg); + void unloadProg(Program& prog); + + static Context& getDefault(bool initialize = true); + void* ptr() const; + + friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); + + bool useSVM() const; + void setUseSVM(bool enabled); + + struct Impl; + inline Impl* getImpl() const { return (Impl*)p; } +//protected: + Impl* p; +}; + +class CV_EXPORTS Platform +{ +public: + Platform() CV_NOEXCEPT; + ~Platform(); + Platform(const Platform& p); + Platform& operator = (const Platform& p); + + void* ptr() const; + static Platform& getDefault(); + + friend void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); +protected: + struct Impl; + Impl* p; +}; + +/** @brief Attaches OpenCL context to OpenCV +@note + OpenCV will check if available OpenCL platform has platformName name, then assign context to + OpenCV and call `clRetainContext` function. The deviceID device will be used as target device and + new command queue will be created. +@param platformName name of OpenCL platform to attach, this string is used to check if platform is available to OpenCV at runtime +@param platformID ID of platform attached context was created for +@param context OpenCL context to be attached to OpenCV +@param deviceID ID of device, must be created from attached context +*/ +CV_EXPORTS void attachContext(const String& platformName, void* platformID, void* context, void* deviceID); + +/** @brief Convert OpenCL buffer to UMat +@note + OpenCL buffer (cl_mem_buffer) should contain 2D image data, compatible with OpenCV. Memory + content is not copied from `clBuffer` to UMat. Instead, buffer handle assigned to UMat and + `clRetainMemObject` is called. +@param cl_mem_buffer source clBuffer handle +@param step num of bytes in single row +@param rows number of rows +@param cols number of cols +@param type OpenCV type of image +@param dst destination UMat +*/ +CV_EXPORTS void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int type, UMat& dst); + +/** @brief Convert OpenCL image2d_t to UMat +@note + OpenCL `image2d_t` (cl_mem_image), should be compatible with OpenCV UMat formats. Memory content + is copied from image to UMat with `clEnqueueCopyImageToBuffer` function. +@param cl_mem_image source image2d_t handle +@param dst destination UMat +*/ +CV_EXPORTS void convertFromImage(void* cl_mem_image, UMat& dst); + +// TODO Move to internal header +void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); + +class CV_EXPORTS Queue +{ +public: + Queue() CV_NOEXCEPT; + explicit Queue(const Context& c, const Device& d=Device()); + ~Queue(); + Queue(const Queue& q); + Queue& operator = (const Queue& q); + + bool create(const Context& c=Context(), const Device& d=Device()); + void finish(); + void* ptr() const; + static Queue& getDefault(); + + /// @brief Returns OpenCL command queue with enable profiling mode support + const Queue& getProfilingQueue() const; + + struct Impl; friend struct Impl; + inline Impl* getImpl() const { return p; } +protected: + Impl* p; +}; + + +class CV_EXPORTS KernelArg +{ +public: + enum { LOCAL=1, READ_ONLY=2, WRITE_ONLY=4, READ_WRITE=6, CONSTANT=8, PTR_ONLY = 16, NO_SIZE=256 }; + KernelArg(int _flags, UMat* _m, int wscale=1, int iwscale=1, const void* _obj=0, size_t _sz=0); + KernelArg() CV_NOEXCEPT; + + static KernelArg Local(size_t localMemSize) + { return KernelArg(LOCAL, 0, 1, 1, 0, localMemSize); } + static KernelArg PtrWriteOnly(const UMat& m) + { return KernelArg(PTR_ONLY+WRITE_ONLY, (UMat*)&m); } + static KernelArg PtrReadOnly(const UMat& m) + { return KernelArg(PTR_ONLY+READ_ONLY, (UMat*)&m); } + static KernelArg PtrReadWrite(const UMat& m) + { return KernelArg(PTR_ONLY+READ_WRITE, (UMat*)&m); } + static KernelArg ReadWrite(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(READ_WRITE, (UMat*)&m, wscale, iwscale); } + static KernelArg ReadWriteNoSize(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(READ_WRITE+NO_SIZE, (UMat*)&m, wscale, iwscale); } + static KernelArg ReadOnly(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(READ_ONLY, (UMat*)&m, wscale, iwscale); } + static KernelArg WriteOnly(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(WRITE_ONLY, (UMat*)&m, wscale, iwscale); } + static KernelArg ReadOnlyNoSize(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(READ_ONLY+NO_SIZE, (UMat*)&m, wscale, iwscale); } + static KernelArg WriteOnlyNoSize(const UMat& m, int wscale=1, int iwscale=1) + { return KernelArg(WRITE_ONLY+NO_SIZE, (UMat*)&m, wscale, iwscale); } + static KernelArg Constant(const Mat& m); + template static KernelArg Constant(const _Tp* arr, size_t n) + { return KernelArg(CONSTANT, 0, 1, 1, (void*)arr, n); } + + int flags; + UMat* m; + const void* obj; + size_t sz; + int wscale, iwscale; +}; + + +class CV_EXPORTS Kernel +{ +public: + Kernel() CV_NOEXCEPT; + Kernel(const char* kname, const Program& prog); + Kernel(const char* kname, const ProgramSource& prog, + const String& buildopts = String(), String* errmsg=0); + ~Kernel(); + Kernel(const Kernel& k); + Kernel& operator = (const Kernel& k); + + bool empty() const; + bool create(const char* kname, const Program& prog); + bool create(const char* kname, const ProgramSource& prog, + const String& buildopts, String* errmsg=0); + + int set(int i, const void* value, size_t sz); + int set(int i, const Image2D& image2D); + int set(int i, const UMat& m); + int set(int i, const KernelArg& arg); + template int set(int i, const _Tp& value) + { return set(i, &value, sizeof(value)); } + + template + Kernel& args(const _Tp0& a0) + { + set(0, a0); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1) + { + int i = set(0, a0); set(i, a1); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2) + { + int i = set(0, a0); i = set(i, a1); set(i, a2); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, + const _Tp3& a3, const _Tp4& a4) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); + i = set(i, a3); set(i, a4); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, + const _Tp3& a3, const _Tp4& a4, const _Tp5& a5) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); + i = set(i, a3); i = set(i, a4); set(i, a5); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); + i = set(i, a4); i = set(i, a5); set(i, a6); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); + i = set(i, a4); i = set(i, a5); i = set(i, a6); set(i, a7); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); + i = set(i, a5); i = set(i, a6); i = set(i, a7); set(i, a8); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); set(i, a9); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); set(i, a10); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); set(i, a11); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11, + const _Tp12& a12) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11); + set(i, a12); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11, + const _Tp12& a12, const _Tp13& a13) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11); + i = set(i, a12); set(i, a13); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11, + const _Tp12& a12, const _Tp13& a13, const _Tp14& a14) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11); + i = set(i, a12); i = set(i, a13); set(i, a14); return *this; + } + + template + Kernel& args(const _Tp0& a0, const _Tp1& a1, const _Tp2& a2, const _Tp3& a3, + const _Tp4& a4, const _Tp5& a5, const _Tp6& a6, const _Tp7& a7, + const _Tp8& a8, const _Tp9& a9, const _Tp10& a10, const _Tp11& a11, + const _Tp12& a12, const _Tp13& a13, const _Tp14& a14, const _Tp15& a15) + { + int i = set(0, a0); i = set(i, a1); i = set(i, a2); i = set(i, a3); i = set(i, a4); i = set(i, a5); + i = set(i, a6); i = set(i, a7); i = set(i, a8); i = set(i, a9); i = set(i, a10); i = set(i, a11); + i = set(i, a12); i = set(i, a13); i = set(i, a14); set(i, a15); return *this; + } + + /** @brief Run the OpenCL kernel (globalsize value may be adjusted) + + @param dims the work problem dimensions. It is the length of globalsize and localsize. It can be either 1, 2 or 3. + @param globalsize work items for each dimension. It is not the final globalsize passed to + OpenCL. Each dimension will be adjusted to the nearest integer divisible by the corresponding + value in localsize. If localsize is NULL, it will still be adjusted depending on dims. The + adjusted values are greater than or equal to the original values. + @param localsize work-group size for each dimension. + @param sync specify whether to wait for OpenCL computation to finish before return. + @param q command queue + + @note Use run_() if your kernel code doesn't support adjusted globalsize. + */ + bool run(int dims, size_t globalsize[], + size_t localsize[], bool sync, const Queue& q=Queue()); + + /** @brief Run the OpenCL kernel + * + * @param dims the work problem dimensions. It is the length of globalsize and localsize. It can be either 1, 2 or 3. + * @param globalsize work items for each dimension. This value is passed to OpenCL without changes. + * @param localsize work-group size for each dimension. + * @param sync specify whether to wait for OpenCL computation to finish before return. + * @param q command queue + */ + bool run_(int dims, size_t globalsize[], size_t localsize[], bool sync, const Queue& q=Queue()); + + bool runTask(bool sync, const Queue& q=Queue()); + + /** @brief Similar to synchronized run_() call with returning of kernel execution time + * + * Separate OpenCL command queue may be used (with CL_QUEUE_PROFILING_ENABLE) + * @return Execution time in nanoseconds or negative number on error + */ + int64 runProfiling(int dims, size_t globalsize[], size_t localsize[], const Queue& q=Queue()); + + size_t workGroupSize() const; + size_t preferedWorkGroupSizeMultiple() const; + bool compileWorkGroupSize(size_t wsz[]) const; + size_t localMemSize() const; + + void* ptr() const; + struct Impl; + +protected: + Impl* p; +}; + +class CV_EXPORTS Program +{ +public: + Program() CV_NOEXCEPT; + Program(const ProgramSource& src, + const String& buildflags, String& errmsg); + Program(const Program& prog); + + Program& operator = (const Program& prog); + ~Program(); + + bool create(const ProgramSource& src, + const String& buildflags, String& errmsg); + + void* ptr() const; + + /** + * @brief Query device-specific program binary. + * + * Returns RAW OpenCL executable binary without additional attachments. + * + * @sa ProgramSource::fromBinary + * + * @param[out] binary output buffer + */ + void getBinary(std::vector& binary) const; + + struct Impl; friend struct Impl; + inline Impl* getImpl() const { return (Impl*)p; } +protected: + Impl* p; +public: +#ifndef OPENCV_REMOVE_DEPRECATED_API + // TODO Remove this + CV_DEPRECATED bool read(const String& buf, const String& buildflags); // removed, use ProgramSource instead + CV_DEPRECATED bool write(String& buf) const; // removed, use getBinary() method instead (RAW OpenCL binary) + CV_DEPRECATED const ProgramSource& source() const; // implementation removed + CV_DEPRECATED String getPrefix() const; // deprecated, implementation replaced + CV_DEPRECATED static String getPrefix(const String& buildflags); // deprecated, implementation replaced +#endif +}; + + +class CV_EXPORTS ProgramSource +{ +public: + typedef uint64 hash_t; // deprecated + + ProgramSource() CV_NOEXCEPT; + explicit ProgramSource(const String& module, const String& name, const String& codeStr, const String& codeHash); + explicit ProgramSource(const String& prog); // deprecated + explicit ProgramSource(const char* prog); // deprecated + ~ProgramSource(); + ProgramSource(const ProgramSource& prog); + ProgramSource& operator = (const ProgramSource& prog); + + const String& source() const; // deprecated + hash_t hash() const; // deprecated + + + /** @brief Describe OpenCL program binary. + * Do not call clCreateProgramWithBinary() and/or clBuildProgram(). + * + * Caller should guarantee binary buffer lifetime greater than ProgramSource object (and any of its copies). + * + * This kind of binary is not portable between platforms in general - it is specific to OpenCL vendor / device / driver version. + * + * @param module name of program owner module + * @param name unique name of program (module+name is used as key for OpenCL program caching) + * @param binary buffer address. See buffer lifetime requirement in description. + * @param size buffer size + * @param buildOptions additional program-related build options passed to clBuildProgram() + * @return created ProgramSource object + */ + static ProgramSource fromBinary(const String& module, const String& name, + const unsigned char* binary, const size_t size, + const cv::String& buildOptions = cv::String()); + + /** @brief Describe OpenCL program in SPIR format. + * Do not call clCreateProgramWithBinary() and/or clBuildProgram(). + * + * Supports SPIR 1.2 by default (pass '-spir-std=X.Y' in buildOptions to override this behavior) + * + * Caller should guarantee binary buffer lifetime greater than ProgramSource object (and any of its copies). + * + * Programs in this format are portable between OpenCL implementations with 'khr_spir' extension: + * https://www.khronos.org/registry/OpenCL/sdk/2.0/docs/man/xhtml/cl_khr_spir.html + * (but they are not portable between different platforms: 32-bit / 64-bit) + * + * Note: these programs can't support vendor specific extensions, like 'cl_intel_subgroups'. + * + * @param module name of program owner module + * @param name unique name of program (module+name is used as key for OpenCL program caching) + * @param binary buffer address. See buffer lifetime requirement in description. + * @param size buffer size + * @param buildOptions additional program-related build options passed to clBuildProgram() + * (these options are added automatically: '-x spir' and '-spir-std=1.2') + * @return created ProgramSource object. + */ + static ProgramSource fromSPIR(const String& module, const String& name, + const unsigned char* binary, const size_t size, + const cv::String& buildOptions = cv::String()); + + //OpenCL 2.1+ only + //static Program fromSPIRV(const String& module, const String& name, + // const unsigned char* binary, const size_t size, + // const cv::String& buildOptions = cv::String()); + + struct Impl; friend struct Impl; + inline Impl* getImpl() const { return (Impl*)p; } +protected: + Impl* p; +}; + +class CV_EXPORTS PlatformInfo +{ +public: + PlatformInfo() CV_NOEXCEPT; + explicit PlatformInfo(void* id); + ~PlatformInfo(); + + PlatformInfo(const PlatformInfo& i); + PlatformInfo& operator =(const PlatformInfo& i); + + String name() const; + String vendor() const; + + /// See CL_PLATFORM_VERSION + String version() const; + int versionMajor() const; + int versionMinor() const; + + int deviceNumber() const; + void getDevice(Device& device, int d) const; + +protected: + struct Impl; + Impl* p; +}; + +CV_EXPORTS const char* convertTypeStr(int sdepth, int ddepth, int cn, char* buf); +CV_EXPORTS const char* typeToStr(int t); +CV_EXPORTS const char* memopTypeToStr(int t); +CV_EXPORTS const char* vecopTypeToStr(int t); +CV_EXPORTS const char* getOpenCLErrorString(int errorCode); +CV_EXPORTS String kernelToStr(InputArray _kernel, int ddepth = -1, const char * name = NULL); +CV_EXPORTS void getPlatfomsInfo(std::vector& platform_info); + + +enum OclVectorStrategy +{ + // all matrices have its own vector width + OCL_VECTOR_OWN = 0, + // all matrices have maximal vector width among all matrices + // (useful for cases when matrices have different data types) + OCL_VECTOR_MAX = 1, + + // default strategy + OCL_VECTOR_DEFAULT = OCL_VECTOR_OWN +}; + +CV_EXPORTS int predictOptimalVectorWidth(InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(), + InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(), + InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray(), + OclVectorStrategy strat = OCL_VECTOR_DEFAULT); + +CV_EXPORTS int checkOptimalVectorWidth(const int *vectorWidths, + InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(), + InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(), + InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray(), + OclVectorStrategy strat = OCL_VECTOR_DEFAULT); + +// with OCL_VECTOR_MAX strategy +CV_EXPORTS int predictOptimalVectorWidthMax(InputArray src1, InputArray src2 = noArray(), InputArray src3 = noArray(), + InputArray src4 = noArray(), InputArray src5 = noArray(), InputArray src6 = noArray(), + InputArray src7 = noArray(), InputArray src8 = noArray(), InputArray src9 = noArray()); + +CV_EXPORTS void buildOptionsAddMatrixDescription(String& buildOptions, const String& name, InputArray _m); + +class CV_EXPORTS Image2D +{ +public: + Image2D() CV_NOEXCEPT; + + /** + @param src UMat object from which to get image properties and data + @param norm flag to enable the use of normalized channel data types + @param alias flag indicating that the image should alias the src UMat. If true, changes to the + image or src will be reflected in both objects. + */ + explicit Image2D(const UMat &src, bool norm = false, bool alias = false); + Image2D(const Image2D & i); + ~Image2D(); + + Image2D & operator = (const Image2D & i); + + /** Indicates if creating an aliased image should succeed. + Depends on the underlying platform and the dimensions of the UMat. + */ + static bool canCreateAlias(const UMat &u); + + /** Indicates if the image format is supported. + */ + static bool isFormatSupported(int depth, int cn, bool norm); + + void* ptr() const; +protected: + struct Impl; + Impl* p; +}; + +class CV_EXPORTS Timer +{ +public: + Timer(const Queue& q); + ~Timer(); + void start(); + void stop(); + + uint64 durationNS() const; //< duration in nanoseconds + +protected: + struct Impl; + Impl* const p; + +private: + Timer(const Timer&); // disabled + Timer& operator=(const Timer&); // disabled +}; + +CV_EXPORTS MatAllocator* getOpenCLAllocator(); + + +#ifdef __OPENCV_BUILD +namespace internal { + +CV_EXPORTS bool isOpenCLForced(); +#define OCL_FORCE_CHECK(condition) (cv::ocl::internal::isOpenCLForced() || (condition)) + +CV_EXPORTS bool isPerformanceCheckBypassed(); +#define OCL_PERFORMANCE_CHECK(condition) (cv::ocl::internal::isPerformanceCheckBypassed() || (condition)) + +CV_EXPORTS bool isCLBuffer(UMat& u); + +} // namespace internal +#endif + +//! @} + +}} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/ocl_genbase.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/ocl_genbase.hpp new file mode 100644 index 00000000..5334cf1f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/ocl_genbase.hpp @@ -0,0 +1,69 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OPENCL_GENBASE_HPP +#define OPENCV_OPENCL_GENBASE_HPP + +//! @cond IGNORED + +namespace cv { +namespace ocl { + +class ProgramSource; + +namespace internal { + +struct CV_EXPORTS ProgramEntry +{ + const char* module; + const char* name; + const char* programCode; + const char* programHash; + ProgramSource* pProgramSource; + + operator ProgramSource& () const; +}; + +} } } // namespace + +//! @endcond + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/ocl_defs.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/ocl_defs.hpp new file mode 100644 index 00000000..605a65f8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/ocl_defs.hpp @@ -0,0 +1,75 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +// Copyright (C) 2014, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. + +#ifndef OPENCV_CORE_OPENCL_DEFS_HPP +#define OPENCV_CORE_OPENCL_DEFS_HPP + +#include "opencv2/core/utility.hpp" +#include "cvconfig.h" + +namespace cv { namespace ocl { +#ifdef HAVE_OPENCL +/// Call is similar to useOpenCL() but doesn't try to load OpenCL runtime or create OpenCL context +CV_EXPORTS bool isOpenCLActivated(); +#else +static inline bool isOpenCLActivated() { return false; } +#endif +}} // namespace + + +//#define CV_OPENCL_RUN_ASSERT + +#ifdef HAVE_OPENCL + +#ifdef CV_OPENCL_RUN_VERBOSE +#define CV_OCL_RUN_(condition, func, ...) \ + { \ + if (cv::ocl::isOpenCLActivated() && (condition) && func) \ + { \ + printf("%s: OpenCL implementation is running\n", CV_Func); \ + fflush(stdout); \ + CV_IMPL_ADD(CV_IMPL_OCL); \ + return __VA_ARGS__; \ + } \ + else \ + { \ + printf("%s: Plain implementation is running\n", CV_Func); \ + fflush(stdout); \ + } \ + } +#elif defined CV_OPENCL_RUN_ASSERT +#define CV_OCL_RUN_(condition, func, ...) \ + { \ + if (cv::ocl::isOpenCLActivated() && (condition)) \ + { \ + if(func) \ + { \ + CV_IMPL_ADD(CV_IMPL_OCL); \ + } \ + else \ + { \ + CV_Error(cv::Error::StsAssert, #func); \ + } \ + return __VA_ARGS__; \ + } \ + } +#else +#define CV_OCL_RUN_(condition, func, ...) \ + if (cv::ocl::isOpenCLActivated() && (condition) && func) \ + { \ + CV_IMPL_ADD(CV_IMPL_OCL); \ + return __VA_ARGS__; \ + } +#endif + +#else +#define CV_OCL_RUN_(condition, func, ...) +#endif + +#define CV_OCL_RUN(condition, func) CV_OCL_RUN_(condition, func) + +#endif // OPENCV_CORE_OPENCL_DEFS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_info.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_info.hpp new file mode 100644 index 00000000..5e5c846a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_info.hpp @@ -0,0 +1,205 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include + +#include +#include + +#ifndef DUMP_CONFIG_PROPERTY +#define DUMP_CONFIG_PROPERTY(...) +#endif + +#ifndef DUMP_MESSAGE_STDOUT +#define DUMP_MESSAGE_STDOUT(...) do { std::cout << __VA_ARGS__ << std::endl; } while (false) +#endif + +namespace cv { + +namespace { +static std::string bytesToStringRepr(size_t value) +{ + size_t b = value % 1024; + value /= 1024; + + size_t kb = value % 1024; + value /= 1024; + + size_t mb = value % 1024; + value /= 1024; + + size_t gb = value; + + std::ostringstream stream; + + if (gb > 0) + stream << gb << " GB "; + if (mb > 0) + stream << mb << " MB "; + if (kb > 0) + stream << kb << " KB "; + if (b > 0) + stream << b << " B"; + + std::string s = stream.str(); + if (s[s.size() - 1] == ' ') + s = s.substr(0, s.size() - 1); + return s; +} + +static String getDeviceTypeString(const cv::ocl::Device& device) +{ + if (device.type() == cv::ocl::Device::TYPE_CPU) { + return "CPU"; + } + + if (device.type() == cv::ocl::Device::TYPE_GPU) { + if (device.hostUnifiedMemory()) { + return "iGPU"; + } else { + return "dGPU"; + } + } + + return "unknown"; +} +} // namespace + +static void dumpOpenCLInformation() +{ + using namespace cv::ocl; + + try + { + if (!haveOpenCL() || !useOpenCL()) + { + DUMP_MESSAGE_STDOUT("OpenCL is disabled"); + DUMP_CONFIG_PROPERTY("cv_ocl", "disabled"); + return; + } + + std::vector platforms; + cv::ocl::getPlatfomsInfo(platforms); + if (platforms.empty()) + { + DUMP_MESSAGE_STDOUT("OpenCL is not available"); + DUMP_CONFIG_PROPERTY("cv_ocl", "not available"); + return; + } + + DUMP_MESSAGE_STDOUT("OpenCL Platforms: "); + for (size_t i = 0; i < platforms.size(); i++) + { + const PlatformInfo* platform = &platforms[i]; + DUMP_MESSAGE_STDOUT(" " << platform->name()); + Device current_device; + for (int j = 0; j < platform->deviceNumber(); j++) + { + platform->getDevice(current_device, j); + String deviceTypeStr = getDeviceTypeString(current_device); + DUMP_MESSAGE_STDOUT( " " << deviceTypeStr << ": " << current_device.name() << " (" << current_device.version() << ")"); + DUMP_CONFIG_PROPERTY( cv::format("cv_ocl_platform_%d_device_%d", (int)i, j ), + cv::format("(Platform=%s)(Type=%s)(Name=%s)(Version=%s)", + platform->name().c_str(), deviceTypeStr.c_str(), current_device.name().c_str(), current_device.version().c_str()) ); + } + } + const Device& device = Device::getDefault(); + if (!device.available()) + CV_Error(Error::OpenCLInitError, "OpenCL device is not available"); + + DUMP_MESSAGE_STDOUT("Current OpenCL device: "); + + String deviceTypeStr = getDeviceTypeString(device); + DUMP_MESSAGE_STDOUT(" Type = " << deviceTypeStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_deviceType", deviceTypeStr); + + DUMP_MESSAGE_STDOUT(" Name = " << device.name()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_deviceName", device.name()); + + DUMP_MESSAGE_STDOUT(" Version = " << device.version()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_deviceVersion", device.version()); + + DUMP_MESSAGE_STDOUT(" Driver version = " << device.driverVersion()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_driverVersion", device.driverVersion()); + + DUMP_MESSAGE_STDOUT(" Address bits = " << device.addressBits()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_addressBits", device.addressBits()); + + DUMP_MESSAGE_STDOUT(" Compute units = " << device.maxComputeUnits()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_maxComputeUnits", device.maxComputeUnits()); + + DUMP_MESSAGE_STDOUT(" Max work group size = " << device.maxWorkGroupSize()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_maxWorkGroupSize", device.maxWorkGroupSize()); + + std::string localMemorySizeStr = bytesToStringRepr(device.localMemSize()); + DUMP_MESSAGE_STDOUT(" Local memory size = " << localMemorySizeStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_localMemSize", device.localMemSize()); + + std::string maxMemAllocSizeStr = bytesToStringRepr(device.maxMemAllocSize()); + DUMP_MESSAGE_STDOUT(" Max memory allocation size = " << maxMemAllocSizeStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_maxMemAllocSize", device.maxMemAllocSize()); + + const char* doubleSupportStr = device.doubleFPConfig() > 0 ? "Yes" : "No"; + DUMP_MESSAGE_STDOUT(" Double support = " << doubleSupportStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_haveDoubleSupport", device.doubleFPConfig() > 0); + + const char* isUnifiedMemoryStr = device.hostUnifiedMemory() ? "Yes" : "No"; + DUMP_MESSAGE_STDOUT(" Host unified memory = " << isUnifiedMemoryStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_hostUnifiedMemory", device.hostUnifiedMemory()); + + DUMP_MESSAGE_STDOUT(" Device extensions:"); + String extensionsStr = device.extensions(); + size_t pos = 0; + while (pos < extensionsStr.size()) + { + size_t pos2 = extensionsStr.find(' ', pos); + if (pos2 == String::npos) + pos2 = extensionsStr.size(); + if (pos2 > pos) + { + String extensionName = extensionsStr.substr(pos, pos2 - pos); + DUMP_MESSAGE_STDOUT(" " << extensionName); + } + pos = pos2 + 1; + } + DUMP_CONFIG_PROPERTY("cv_ocl_current_extensions", extensionsStr); + + const char* haveAmdBlasStr = haveAmdBlas() ? "Yes" : "No"; + DUMP_MESSAGE_STDOUT(" Has AMD Blas = " << haveAmdBlasStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_AmdBlas", haveAmdBlas()); + + const char* haveAmdFftStr = haveAmdFft() ? "Yes" : "No"; + DUMP_MESSAGE_STDOUT(" Has AMD Fft = " << haveAmdFftStr); + DUMP_CONFIG_PROPERTY("cv_ocl_current_AmdFft", haveAmdFft()); + + + DUMP_MESSAGE_STDOUT(" Preferred vector width char = " << device.preferredVectorWidthChar()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthChar", device.preferredVectorWidthChar()); + + DUMP_MESSAGE_STDOUT(" Preferred vector width short = " << device.preferredVectorWidthShort()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthShort", device.preferredVectorWidthShort()); + + DUMP_MESSAGE_STDOUT(" Preferred vector width int = " << device.preferredVectorWidthInt()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthInt", device.preferredVectorWidthInt()); + + DUMP_MESSAGE_STDOUT(" Preferred vector width long = " << device.preferredVectorWidthLong()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthLong", device.preferredVectorWidthLong()); + + DUMP_MESSAGE_STDOUT(" Preferred vector width float = " << device.preferredVectorWidthFloat()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthFloat", device.preferredVectorWidthFloat()); + + DUMP_MESSAGE_STDOUT(" Preferred vector width double = " << device.preferredVectorWidthDouble()); + DUMP_CONFIG_PROPERTY("cv_ocl_current_preferredVectorWidthDouble", device.preferredVectorWidthDouble()); + } + catch (...) + { + DUMP_MESSAGE_STDOUT("Exception. Can't dump OpenCL info"); + DUMP_MESSAGE_STDOUT("OpenCL device not available"); + DUMP_CONFIG_PROPERTY("cv_ocl", "not available"); + } +} +#undef DUMP_MESSAGE_STDOUT +#undef DUMP_CONFIG_PROPERTY + +} // namespace diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_svm.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_svm.hpp new file mode 100644 index 00000000..7453082a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/opencl_svm.hpp @@ -0,0 +1,81 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef OPENCV_CORE_OPENCL_SVM_HPP +#define OPENCV_CORE_OPENCL_SVM_HPP + +// +// Internal usage only (binary compatibility is not guaranteed) +// +#ifndef __OPENCV_BUILD +#error Internal header file +#endif + +#if defined(HAVE_OPENCL) && defined(HAVE_OPENCL_SVM) +#include "runtime/opencl_core.hpp" +#include "runtime/opencl_svm_20.hpp" +#include "runtime/opencl_svm_hsa_extension.hpp" + +namespace cv { namespace ocl { namespace svm { + +struct SVMCapabilities +{ + enum Value + { + SVM_COARSE_GRAIN_BUFFER = (1 << 0), + SVM_FINE_GRAIN_BUFFER = (1 << 1), + SVM_FINE_GRAIN_SYSTEM = (1 << 2), + SVM_ATOMICS = (1 << 3), + }; + int value_; + + SVMCapabilities(int capabilities = 0) : value_(capabilities) { } + operator int() const { return value_; } + + inline bool isNoSVMSupport() const { return value_ == 0; } + inline bool isSupportCoarseGrainBuffer() const { return (value_ & SVM_COARSE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainBuffer() const { return (value_ & SVM_FINE_GRAIN_BUFFER) != 0; } + inline bool isSupportFineGrainSystem() const { return (value_ & SVM_FINE_GRAIN_SYSTEM) != 0; } + inline bool isSupportAtomics() const { return (value_ & SVM_ATOMICS) != 0; } +}; + +CV_EXPORTS const SVMCapabilities getSVMCapabilitites(const ocl::Context& context); + +struct SVMFunctions +{ + clSVMAllocAMD_fn fn_clSVMAlloc; + clSVMFreeAMD_fn fn_clSVMFree; + clSetKernelArgSVMPointerAMD_fn fn_clSetKernelArgSVMPointer; + //clSetKernelExecInfoAMD_fn fn_clSetKernelExecInfo; + //clEnqueueSVMFreeAMD_fn fn_clEnqueueSVMFree; + clEnqueueSVMMemcpyAMD_fn fn_clEnqueueSVMMemcpy; + clEnqueueSVMMemFillAMD_fn fn_clEnqueueSVMMemFill; + clEnqueueSVMMapAMD_fn fn_clEnqueueSVMMap; + clEnqueueSVMUnmapAMD_fn fn_clEnqueueSVMUnmap; + + inline SVMFunctions() + : fn_clSVMAlloc(NULL), fn_clSVMFree(NULL), + fn_clSetKernelArgSVMPointer(NULL), /*fn_clSetKernelExecInfo(NULL),*/ + /*fn_clEnqueueSVMFree(NULL),*/ fn_clEnqueueSVMMemcpy(NULL), fn_clEnqueueSVMMemFill(NULL), + fn_clEnqueueSVMMap(NULL), fn_clEnqueueSVMUnmap(NULL) + { + // nothing + } + + inline bool isValid() const + { + return fn_clSVMAlloc != NULL && fn_clSVMFree && fn_clSetKernelArgSVMPointer && + /*fn_clSetKernelExecInfo && fn_clEnqueueSVMFree &&*/ fn_clEnqueueSVMMemcpy && + fn_clEnqueueSVMMemFill && fn_clEnqueueSVMMap && fn_clEnqueueSVMUnmap; + } +}; + +// We should guarantee that SVMFunctions lifetime is not less than context's lifetime +CV_EXPORTS const SVMFunctions* getSVMFunctions(const ocl::Context& context); + +CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags); + +}}} //namespace cv::ocl::svm +#endif + +#endif // OPENCV_CORE_OPENCL_SVM_HPP +/* End of file. */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdblas.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdblas.hpp new file mode 100644 index 00000000..65c84935 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdblas.hpp @@ -0,0 +1,714 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_CLAMDBLAS_HPP +#error "Invalid usage" +#endif + +// generated by parser_clamdblas.py +#define clAmdBlasAddScratchImage clAmdBlasAddScratchImage_ +#define clAmdBlasCaxpy clAmdBlasCaxpy_ +#define clAmdBlasCcopy clAmdBlasCcopy_ +#define clAmdBlasCdotc clAmdBlasCdotc_ +#define clAmdBlasCdotu clAmdBlasCdotu_ +#define clAmdBlasCgbmv clAmdBlasCgbmv_ +#define clAmdBlasCgemm clAmdBlasCgemm_ +#define clAmdBlasCgemmEx clAmdBlasCgemmEx_ +#define clAmdBlasCgemv clAmdBlasCgemv_ +#define clAmdBlasCgemvEx clAmdBlasCgemvEx_ +#define clAmdBlasCgerc clAmdBlasCgerc_ +#define clAmdBlasCgeru clAmdBlasCgeru_ +#define clAmdBlasChbmv clAmdBlasChbmv_ +#define clAmdBlasChemm clAmdBlasChemm_ +#define clAmdBlasChemv clAmdBlasChemv_ +#define clAmdBlasCher clAmdBlasCher_ +#define clAmdBlasCher2 clAmdBlasCher2_ +#define clAmdBlasCher2k clAmdBlasCher2k_ +#define clAmdBlasCherk clAmdBlasCherk_ +#define clAmdBlasChpmv clAmdBlasChpmv_ +#define clAmdBlasChpr clAmdBlasChpr_ +#define clAmdBlasChpr2 clAmdBlasChpr2_ +#define clAmdBlasCrotg clAmdBlasCrotg_ +#define clAmdBlasCscal clAmdBlasCscal_ +#define clAmdBlasCsrot clAmdBlasCsrot_ +#define clAmdBlasCsscal clAmdBlasCsscal_ +#define clAmdBlasCswap clAmdBlasCswap_ +#define clAmdBlasCsymm clAmdBlasCsymm_ +#define clAmdBlasCsyr2k clAmdBlasCsyr2k_ +#define clAmdBlasCsyr2kEx clAmdBlasCsyr2kEx_ +#define clAmdBlasCsyrk clAmdBlasCsyrk_ +#define clAmdBlasCsyrkEx clAmdBlasCsyrkEx_ +#define clAmdBlasCtbmv clAmdBlasCtbmv_ +#define clAmdBlasCtbsv clAmdBlasCtbsv_ +#define clAmdBlasCtpmv clAmdBlasCtpmv_ +#define clAmdBlasCtpsv clAmdBlasCtpsv_ +#define clAmdBlasCtrmm clAmdBlasCtrmm_ +#define clAmdBlasCtrmmEx clAmdBlasCtrmmEx_ +#define clAmdBlasCtrmv clAmdBlasCtrmv_ +#define clAmdBlasCtrsm clAmdBlasCtrsm_ +#define clAmdBlasCtrsmEx clAmdBlasCtrsmEx_ +#define clAmdBlasCtrsv clAmdBlasCtrsv_ +#define clAmdBlasDasum clAmdBlasDasum_ +#define clAmdBlasDaxpy clAmdBlasDaxpy_ +#define clAmdBlasDcopy clAmdBlasDcopy_ +#define clAmdBlasDdot clAmdBlasDdot_ +#define clAmdBlasDgbmv clAmdBlasDgbmv_ +#define clAmdBlasDgemm clAmdBlasDgemm_ +#define clAmdBlasDgemmEx clAmdBlasDgemmEx_ +#define clAmdBlasDgemv clAmdBlasDgemv_ +#define clAmdBlasDgemvEx clAmdBlasDgemvEx_ +#define clAmdBlasDger clAmdBlasDger_ +#define clAmdBlasDnrm2 clAmdBlasDnrm2_ +#define clAmdBlasDrot clAmdBlasDrot_ +#define clAmdBlasDrotg clAmdBlasDrotg_ +#define clAmdBlasDrotm clAmdBlasDrotm_ +#define clAmdBlasDrotmg clAmdBlasDrotmg_ +#define clAmdBlasDsbmv clAmdBlasDsbmv_ +#define clAmdBlasDscal clAmdBlasDscal_ +#define clAmdBlasDspmv clAmdBlasDspmv_ +#define clAmdBlasDspr clAmdBlasDspr_ +#define clAmdBlasDspr2 clAmdBlasDspr2_ +#define clAmdBlasDswap clAmdBlasDswap_ +#define clAmdBlasDsymm clAmdBlasDsymm_ +#define clAmdBlasDsymv clAmdBlasDsymv_ +#define clAmdBlasDsymvEx clAmdBlasDsymvEx_ +#define clAmdBlasDsyr clAmdBlasDsyr_ +#define clAmdBlasDsyr2 clAmdBlasDsyr2_ +#define clAmdBlasDsyr2k clAmdBlasDsyr2k_ +#define clAmdBlasDsyr2kEx clAmdBlasDsyr2kEx_ +#define clAmdBlasDsyrk clAmdBlasDsyrk_ +#define clAmdBlasDsyrkEx clAmdBlasDsyrkEx_ +#define clAmdBlasDtbmv clAmdBlasDtbmv_ +#define clAmdBlasDtbsv clAmdBlasDtbsv_ +#define clAmdBlasDtpmv clAmdBlasDtpmv_ +#define clAmdBlasDtpsv clAmdBlasDtpsv_ +#define clAmdBlasDtrmm clAmdBlasDtrmm_ +#define clAmdBlasDtrmmEx clAmdBlasDtrmmEx_ +#define clAmdBlasDtrmv clAmdBlasDtrmv_ +#define clAmdBlasDtrsm clAmdBlasDtrsm_ +#define clAmdBlasDtrsmEx clAmdBlasDtrsmEx_ +#define clAmdBlasDtrsv clAmdBlasDtrsv_ +#define clAmdBlasDzasum clAmdBlasDzasum_ +#define clAmdBlasDznrm2 clAmdBlasDznrm2_ +#define clAmdBlasGetVersion clAmdBlasGetVersion_ +#define clAmdBlasRemoveScratchImage clAmdBlasRemoveScratchImage_ +#define clAmdBlasSasum clAmdBlasSasum_ +#define clAmdBlasSaxpy clAmdBlasSaxpy_ +#define clAmdBlasScasum clAmdBlasScasum_ +#define clAmdBlasScnrm2 clAmdBlasScnrm2_ +#define clAmdBlasScopy clAmdBlasScopy_ +#define clAmdBlasSdot clAmdBlasSdot_ +#define clAmdBlasSetup clAmdBlasSetup_ +#define clAmdBlasSgbmv clAmdBlasSgbmv_ +#define clAmdBlasSgemm clAmdBlasSgemm_ +#define clAmdBlasSgemmEx clAmdBlasSgemmEx_ +#define clAmdBlasSgemv clAmdBlasSgemv_ +#define clAmdBlasSgemvEx clAmdBlasSgemvEx_ +#define clAmdBlasSger clAmdBlasSger_ +#define clAmdBlasSnrm2 clAmdBlasSnrm2_ +#define clAmdBlasSrot clAmdBlasSrot_ +#define clAmdBlasSrotg clAmdBlasSrotg_ +#define clAmdBlasSrotm clAmdBlasSrotm_ +#define clAmdBlasSrotmg clAmdBlasSrotmg_ +#define clAmdBlasSsbmv clAmdBlasSsbmv_ +#define clAmdBlasSscal clAmdBlasSscal_ +#define clAmdBlasSspmv clAmdBlasSspmv_ +#define clAmdBlasSspr clAmdBlasSspr_ +#define clAmdBlasSspr2 clAmdBlasSspr2_ +#define clAmdBlasSswap clAmdBlasSswap_ +#define clAmdBlasSsymm clAmdBlasSsymm_ +#define clAmdBlasSsymv clAmdBlasSsymv_ +#define clAmdBlasSsymvEx clAmdBlasSsymvEx_ +#define clAmdBlasSsyr clAmdBlasSsyr_ +#define clAmdBlasSsyr2 clAmdBlasSsyr2_ +#define clAmdBlasSsyr2k clAmdBlasSsyr2k_ +#define clAmdBlasSsyr2kEx clAmdBlasSsyr2kEx_ +#define clAmdBlasSsyrk clAmdBlasSsyrk_ +#define clAmdBlasSsyrkEx clAmdBlasSsyrkEx_ +#define clAmdBlasStbmv clAmdBlasStbmv_ +#define clAmdBlasStbsv clAmdBlasStbsv_ +#define clAmdBlasStpmv clAmdBlasStpmv_ +#define clAmdBlasStpsv clAmdBlasStpsv_ +#define clAmdBlasStrmm clAmdBlasStrmm_ +#define clAmdBlasStrmmEx clAmdBlasStrmmEx_ +#define clAmdBlasStrmv clAmdBlasStrmv_ +#define clAmdBlasStrsm clAmdBlasStrsm_ +#define clAmdBlasStrsmEx clAmdBlasStrsmEx_ +#define clAmdBlasStrsv clAmdBlasStrsv_ +#define clAmdBlasTeardown clAmdBlasTeardown_ +#define clAmdBlasZaxpy clAmdBlasZaxpy_ +#define clAmdBlasZcopy clAmdBlasZcopy_ +#define clAmdBlasZdotc clAmdBlasZdotc_ +#define clAmdBlasZdotu clAmdBlasZdotu_ +#define clAmdBlasZdrot clAmdBlasZdrot_ +#define clAmdBlasZdscal clAmdBlasZdscal_ +#define clAmdBlasZgbmv clAmdBlasZgbmv_ +#define clAmdBlasZgemm clAmdBlasZgemm_ +#define clAmdBlasZgemmEx clAmdBlasZgemmEx_ +#define clAmdBlasZgemv clAmdBlasZgemv_ +#define clAmdBlasZgemvEx clAmdBlasZgemvEx_ +#define clAmdBlasZgerc clAmdBlasZgerc_ +#define clAmdBlasZgeru clAmdBlasZgeru_ +#define clAmdBlasZhbmv clAmdBlasZhbmv_ +#define clAmdBlasZhemm clAmdBlasZhemm_ +#define clAmdBlasZhemv clAmdBlasZhemv_ +#define clAmdBlasZher clAmdBlasZher_ +#define clAmdBlasZher2 clAmdBlasZher2_ +#define clAmdBlasZher2k clAmdBlasZher2k_ +#define clAmdBlasZherk clAmdBlasZherk_ +#define clAmdBlasZhpmv clAmdBlasZhpmv_ +#define clAmdBlasZhpr clAmdBlasZhpr_ +#define clAmdBlasZhpr2 clAmdBlasZhpr2_ +#define clAmdBlasZrotg clAmdBlasZrotg_ +#define clAmdBlasZscal clAmdBlasZscal_ +#define clAmdBlasZswap clAmdBlasZswap_ +#define clAmdBlasZsymm clAmdBlasZsymm_ +#define clAmdBlasZsyr2k clAmdBlasZsyr2k_ +#define clAmdBlasZsyr2kEx clAmdBlasZsyr2kEx_ +#define clAmdBlasZsyrk clAmdBlasZsyrk_ +#define clAmdBlasZsyrkEx clAmdBlasZsyrkEx_ +#define clAmdBlasZtbmv clAmdBlasZtbmv_ +#define clAmdBlasZtbsv clAmdBlasZtbsv_ +#define clAmdBlasZtpmv clAmdBlasZtpmv_ +#define clAmdBlasZtpsv clAmdBlasZtpsv_ +#define clAmdBlasZtrmm clAmdBlasZtrmm_ +#define clAmdBlasZtrmmEx clAmdBlasZtrmmEx_ +#define clAmdBlasZtrmv clAmdBlasZtrmv_ +#define clAmdBlasZtrsm clAmdBlasZtrsm_ +#define clAmdBlasZtrsmEx clAmdBlasZtrsmEx_ +#define clAmdBlasZtrsv clAmdBlasZtrsv_ +#define clAmdBlasiCamax clAmdBlasiCamax_ +#define clAmdBlasiDamax clAmdBlasiDamax_ +#define clAmdBlasiSamax clAmdBlasiSamax_ +#define clAmdBlasiZamax clAmdBlasiZamax_ + +#include + +// generated by parser_clamdblas.py +#undef clAmdBlasAddScratchImage +//#define clAmdBlasAddScratchImage clAmdBlasAddScratchImage_pfn +#undef clAmdBlasCaxpy +//#define clAmdBlasCaxpy clAmdBlasCaxpy_pfn +#undef clAmdBlasCcopy +//#define clAmdBlasCcopy clAmdBlasCcopy_pfn +#undef clAmdBlasCdotc +//#define clAmdBlasCdotc clAmdBlasCdotc_pfn +#undef clAmdBlasCdotu +//#define clAmdBlasCdotu clAmdBlasCdotu_pfn +#undef clAmdBlasCgbmv +//#define clAmdBlasCgbmv clAmdBlasCgbmv_pfn +#undef clAmdBlasCgemm +//#define clAmdBlasCgemm clAmdBlasCgemm_pfn +#undef clAmdBlasCgemmEx +#define clAmdBlasCgemmEx clAmdBlasCgemmEx_pfn +#undef clAmdBlasCgemv +//#define clAmdBlasCgemv clAmdBlasCgemv_pfn +#undef clAmdBlasCgemvEx +//#define clAmdBlasCgemvEx clAmdBlasCgemvEx_pfn +#undef clAmdBlasCgerc +//#define clAmdBlasCgerc clAmdBlasCgerc_pfn +#undef clAmdBlasCgeru +//#define clAmdBlasCgeru clAmdBlasCgeru_pfn +#undef clAmdBlasChbmv +//#define clAmdBlasChbmv clAmdBlasChbmv_pfn +#undef clAmdBlasChemm +//#define clAmdBlasChemm clAmdBlasChemm_pfn +#undef clAmdBlasChemv +//#define clAmdBlasChemv clAmdBlasChemv_pfn +#undef clAmdBlasCher +//#define clAmdBlasCher clAmdBlasCher_pfn +#undef clAmdBlasCher2 +//#define clAmdBlasCher2 clAmdBlasCher2_pfn +#undef clAmdBlasCher2k +//#define clAmdBlasCher2k clAmdBlasCher2k_pfn +#undef clAmdBlasCherk +//#define clAmdBlasCherk clAmdBlasCherk_pfn +#undef clAmdBlasChpmv +//#define clAmdBlasChpmv clAmdBlasChpmv_pfn +#undef clAmdBlasChpr +//#define clAmdBlasChpr clAmdBlasChpr_pfn +#undef clAmdBlasChpr2 +//#define clAmdBlasChpr2 clAmdBlasChpr2_pfn +#undef clAmdBlasCrotg +//#define clAmdBlasCrotg clAmdBlasCrotg_pfn +#undef clAmdBlasCscal +//#define clAmdBlasCscal clAmdBlasCscal_pfn +#undef clAmdBlasCsrot +//#define clAmdBlasCsrot clAmdBlasCsrot_pfn +#undef clAmdBlasCsscal +//#define clAmdBlasCsscal clAmdBlasCsscal_pfn +#undef clAmdBlasCswap +//#define clAmdBlasCswap clAmdBlasCswap_pfn +#undef clAmdBlasCsymm +//#define clAmdBlasCsymm clAmdBlasCsymm_pfn +#undef clAmdBlasCsyr2k +//#define clAmdBlasCsyr2k clAmdBlasCsyr2k_pfn +#undef clAmdBlasCsyr2kEx +//#define clAmdBlasCsyr2kEx clAmdBlasCsyr2kEx_pfn +#undef clAmdBlasCsyrk +//#define clAmdBlasCsyrk clAmdBlasCsyrk_pfn +#undef clAmdBlasCsyrkEx +//#define clAmdBlasCsyrkEx clAmdBlasCsyrkEx_pfn +#undef clAmdBlasCtbmv +//#define clAmdBlasCtbmv clAmdBlasCtbmv_pfn +#undef clAmdBlasCtbsv +//#define clAmdBlasCtbsv clAmdBlasCtbsv_pfn +#undef clAmdBlasCtpmv +//#define clAmdBlasCtpmv clAmdBlasCtpmv_pfn +#undef clAmdBlasCtpsv +//#define clAmdBlasCtpsv clAmdBlasCtpsv_pfn +#undef clAmdBlasCtrmm +//#define clAmdBlasCtrmm clAmdBlasCtrmm_pfn +#undef clAmdBlasCtrmmEx +//#define clAmdBlasCtrmmEx clAmdBlasCtrmmEx_pfn +#undef clAmdBlasCtrmv +//#define clAmdBlasCtrmv clAmdBlasCtrmv_pfn +#undef clAmdBlasCtrsm +//#define clAmdBlasCtrsm clAmdBlasCtrsm_pfn +#undef clAmdBlasCtrsmEx +//#define clAmdBlasCtrsmEx clAmdBlasCtrsmEx_pfn +#undef clAmdBlasCtrsv +//#define clAmdBlasCtrsv clAmdBlasCtrsv_pfn +#undef clAmdBlasDasum +//#define clAmdBlasDasum clAmdBlasDasum_pfn +#undef clAmdBlasDaxpy +//#define clAmdBlasDaxpy clAmdBlasDaxpy_pfn +#undef clAmdBlasDcopy +//#define clAmdBlasDcopy clAmdBlasDcopy_pfn +#undef clAmdBlasDdot +//#define clAmdBlasDdot clAmdBlasDdot_pfn +#undef clAmdBlasDgbmv +//#define clAmdBlasDgbmv clAmdBlasDgbmv_pfn +#undef clAmdBlasDgemm +//#define clAmdBlasDgemm clAmdBlasDgemm_pfn +#undef clAmdBlasDgemmEx +#define clAmdBlasDgemmEx clAmdBlasDgemmEx_pfn +#undef clAmdBlasDgemv +//#define clAmdBlasDgemv clAmdBlasDgemv_pfn +#undef clAmdBlasDgemvEx +//#define clAmdBlasDgemvEx clAmdBlasDgemvEx_pfn +#undef clAmdBlasDger +//#define clAmdBlasDger clAmdBlasDger_pfn +#undef clAmdBlasDnrm2 +//#define clAmdBlasDnrm2 clAmdBlasDnrm2_pfn +#undef clAmdBlasDrot +//#define clAmdBlasDrot clAmdBlasDrot_pfn +#undef clAmdBlasDrotg +//#define clAmdBlasDrotg clAmdBlasDrotg_pfn +#undef clAmdBlasDrotm +//#define clAmdBlasDrotm clAmdBlasDrotm_pfn +#undef clAmdBlasDrotmg +//#define clAmdBlasDrotmg clAmdBlasDrotmg_pfn +#undef clAmdBlasDsbmv +//#define clAmdBlasDsbmv clAmdBlasDsbmv_pfn +#undef clAmdBlasDscal +//#define clAmdBlasDscal clAmdBlasDscal_pfn +#undef clAmdBlasDspmv +//#define clAmdBlasDspmv clAmdBlasDspmv_pfn +#undef clAmdBlasDspr +//#define clAmdBlasDspr clAmdBlasDspr_pfn +#undef clAmdBlasDspr2 +//#define clAmdBlasDspr2 clAmdBlasDspr2_pfn +#undef clAmdBlasDswap +//#define clAmdBlasDswap clAmdBlasDswap_pfn +#undef clAmdBlasDsymm +//#define clAmdBlasDsymm clAmdBlasDsymm_pfn +#undef clAmdBlasDsymv +//#define clAmdBlasDsymv clAmdBlasDsymv_pfn +#undef clAmdBlasDsymvEx +//#define clAmdBlasDsymvEx clAmdBlasDsymvEx_pfn +#undef clAmdBlasDsyr +//#define clAmdBlasDsyr clAmdBlasDsyr_pfn +#undef clAmdBlasDsyr2 +//#define clAmdBlasDsyr2 clAmdBlasDsyr2_pfn +#undef clAmdBlasDsyr2k +//#define clAmdBlasDsyr2k clAmdBlasDsyr2k_pfn +#undef clAmdBlasDsyr2kEx +//#define clAmdBlasDsyr2kEx clAmdBlasDsyr2kEx_pfn +#undef clAmdBlasDsyrk +//#define clAmdBlasDsyrk clAmdBlasDsyrk_pfn +#undef clAmdBlasDsyrkEx +//#define clAmdBlasDsyrkEx clAmdBlasDsyrkEx_pfn +#undef clAmdBlasDtbmv +//#define clAmdBlasDtbmv clAmdBlasDtbmv_pfn +#undef clAmdBlasDtbsv +//#define clAmdBlasDtbsv clAmdBlasDtbsv_pfn +#undef clAmdBlasDtpmv +//#define clAmdBlasDtpmv clAmdBlasDtpmv_pfn +#undef clAmdBlasDtpsv +//#define clAmdBlasDtpsv clAmdBlasDtpsv_pfn +#undef clAmdBlasDtrmm +//#define clAmdBlasDtrmm clAmdBlasDtrmm_pfn +#undef clAmdBlasDtrmmEx +//#define clAmdBlasDtrmmEx clAmdBlasDtrmmEx_pfn +#undef clAmdBlasDtrmv +//#define clAmdBlasDtrmv clAmdBlasDtrmv_pfn +#undef clAmdBlasDtrsm +//#define clAmdBlasDtrsm clAmdBlasDtrsm_pfn +#undef clAmdBlasDtrsmEx +//#define clAmdBlasDtrsmEx clAmdBlasDtrsmEx_pfn +#undef clAmdBlasDtrsv +//#define clAmdBlasDtrsv clAmdBlasDtrsv_pfn +#undef clAmdBlasDzasum +//#define clAmdBlasDzasum clAmdBlasDzasum_pfn +#undef clAmdBlasDznrm2 +//#define clAmdBlasDznrm2 clAmdBlasDznrm2_pfn +#undef clAmdBlasGetVersion +//#define clAmdBlasGetVersion clAmdBlasGetVersion_pfn +#undef clAmdBlasRemoveScratchImage +//#define clAmdBlasRemoveScratchImage clAmdBlasRemoveScratchImage_pfn +#undef clAmdBlasSasum +//#define clAmdBlasSasum clAmdBlasSasum_pfn +#undef clAmdBlasSaxpy +//#define clAmdBlasSaxpy clAmdBlasSaxpy_pfn +#undef clAmdBlasScasum +//#define clAmdBlasScasum clAmdBlasScasum_pfn +#undef clAmdBlasScnrm2 +//#define clAmdBlasScnrm2 clAmdBlasScnrm2_pfn +#undef clAmdBlasScopy +//#define clAmdBlasScopy clAmdBlasScopy_pfn +#undef clAmdBlasSdot +//#define clAmdBlasSdot clAmdBlasSdot_pfn +#undef clAmdBlasSetup +#define clAmdBlasSetup clAmdBlasSetup_pfn +#undef clAmdBlasSgbmv +//#define clAmdBlasSgbmv clAmdBlasSgbmv_pfn +#undef clAmdBlasSgemm +//#define clAmdBlasSgemm clAmdBlasSgemm_pfn +#undef clAmdBlasSgemmEx +#define clAmdBlasSgemmEx clAmdBlasSgemmEx_pfn +#undef clAmdBlasSgemv +//#define clAmdBlasSgemv clAmdBlasSgemv_pfn +#undef clAmdBlasSgemvEx +//#define clAmdBlasSgemvEx clAmdBlasSgemvEx_pfn +#undef clAmdBlasSger +//#define clAmdBlasSger clAmdBlasSger_pfn +#undef clAmdBlasSnrm2 +//#define clAmdBlasSnrm2 clAmdBlasSnrm2_pfn +#undef clAmdBlasSrot +//#define clAmdBlasSrot clAmdBlasSrot_pfn +#undef clAmdBlasSrotg +//#define clAmdBlasSrotg clAmdBlasSrotg_pfn +#undef clAmdBlasSrotm +//#define clAmdBlasSrotm clAmdBlasSrotm_pfn +#undef clAmdBlasSrotmg +//#define clAmdBlasSrotmg clAmdBlasSrotmg_pfn +#undef clAmdBlasSsbmv +//#define clAmdBlasSsbmv clAmdBlasSsbmv_pfn +#undef clAmdBlasSscal +//#define clAmdBlasSscal clAmdBlasSscal_pfn +#undef clAmdBlasSspmv +//#define clAmdBlasSspmv clAmdBlasSspmv_pfn +#undef clAmdBlasSspr +//#define clAmdBlasSspr clAmdBlasSspr_pfn +#undef clAmdBlasSspr2 +//#define clAmdBlasSspr2 clAmdBlasSspr2_pfn +#undef clAmdBlasSswap +//#define clAmdBlasSswap clAmdBlasSswap_pfn +#undef clAmdBlasSsymm +//#define clAmdBlasSsymm clAmdBlasSsymm_pfn +#undef clAmdBlasSsymv +//#define clAmdBlasSsymv clAmdBlasSsymv_pfn +#undef clAmdBlasSsymvEx +//#define clAmdBlasSsymvEx clAmdBlasSsymvEx_pfn +#undef clAmdBlasSsyr +//#define clAmdBlasSsyr clAmdBlasSsyr_pfn +#undef clAmdBlasSsyr2 +//#define clAmdBlasSsyr2 clAmdBlasSsyr2_pfn +#undef clAmdBlasSsyr2k +//#define clAmdBlasSsyr2k clAmdBlasSsyr2k_pfn +#undef clAmdBlasSsyr2kEx +//#define clAmdBlasSsyr2kEx clAmdBlasSsyr2kEx_pfn +#undef clAmdBlasSsyrk +//#define clAmdBlasSsyrk clAmdBlasSsyrk_pfn +#undef clAmdBlasSsyrkEx +//#define clAmdBlasSsyrkEx clAmdBlasSsyrkEx_pfn +#undef clAmdBlasStbmv +//#define clAmdBlasStbmv clAmdBlasStbmv_pfn +#undef clAmdBlasStbsv +//#define clAmdBlasStbsv clAmdBlasStbsv_pfn +#undef clAmdBlasStpmv +//#define clAmdBlasStpmv clAmdBlasStpmv_pfn +#undef clAmdBlasStpsv +//#define clAmdBlasStpsv clAmdBlasStpsv_pfn +#undef clAmdBlasStrmm +//#define clAmdBlasStrmm clAmdBlasStrmm_pfn +#undef clAmdBlasStrmmEx +//#define clAmdBlasStrmmEx clAmdBlasStrmmEx_pfn +#undef clAmdBlasStrmv +//#define clAmdBlasStrmv clAmdBlasStrmv_pfn +#undef clAmdBlasStrsm +//#define clAmdBlasStrsm clAmdBlasStrsm_pfn +#undef clAmdBlasStrsmEx +//#define clAmdBlasStrsmEx clAmdBlasStrsmEx_pfn +#undef clAmdBlasStrsv +//#define clAmdBlasStrsv clAmdBlasStrsv_pfn +#undef clAmdBlasTeardown +#define clAmdBlasTeardown clAmdBlasTeardown_pfn +#undef clAmdBlasZaxpy +//#define clAmdBlasZaxpy clAmdBlasZaxpy_pfn +#undef clAmdBlasZcopy +//#define clAmdBlasZcopy clAmdBlasZcopy_pfn +#undef clAmdBlasZdotc +//#define clAmdBlasZdotc clAmdBlasZdotc_pfn +#undef clAmdBlasZdotu +//#define clAmdBlasZdotu clAmdBlasZdotu_pfn +#undef clAmdBlasZdrot +//#define clAmdBlasZdrot clAmdBlasZdrot_pfn +#undef clAmdBlasZdscal +//#define clAmdBlasZdscal clAmdBlasZdscal_pfn +#undef clAmdBlasZgbmv +//#define clAmdBlasZgbmv clAmdBlasZgbmv_pfn +#undef clAmdBlasZgemm +//#define clAmdBlasZgemm clAmdBlasZgemm_pfn +#undef clAmdBlasZgemmEx +#define clAmdBlasZgemmEx clAmdBlasZgemmEx_pfn +#undef clAmdBlasZgemv +//#define clAmdBlasZgemv clAmdBlasZgemv_pfn +#undef clAmdBlasZgemvEx +//#define clAmdBlasZgemvEx clAmdBlasZgemvEx_pfn +#undef clAmdBlasZgerc +//#define clAmdBlasZgerc clAmdBlasZgerc_pfn +#undef clAmdBlasZgeru +//#define clAmdBlasZgeru clAmdBlasZgeru_pfn +#undef clAmdBlasZhbmv +//#define clAmdBlasZhbmv clAmdBlasZhbmv_pfn +#undef clAmdBlasZhemm +//#define clAmdBlasZhemm clAmdBlasZhemm_pfn +#undef clAmdBlasZhemv +//#define clAmdBlasZhemv clAmdBlasZhemv_pfn +#undef clAmdBlasZher +//#define clAmdBlasZher clAmdBlasZher_pfn +#undef clAmdBlasZher2 +//#define clAmdBlasZher2 clAmdBlasZher2_pfn +#undef clAmdBlasZher2k +//#define clAmdBlasZher2k clAmdBlasZher2k_pfn +#undef clAmdBlasZherk +//#define clAmdBlasZherk clAmdBlasZherk_pfn +#undef clAmdBlasZhpmv +//#define clAmdBlasZhpmv clAmdBlasZhpmv_pfn +#undef clAmdBlasZhpr +//#define clAmdBlasZhpr clAmdBlasZhpr_pfn +#undef clAmdBlasZhpr2 +//#define clAmdBlasZhpr2 clAmdBlasZhpr2_pfn +#undef clAmdBlasZrotg +//#define clAmdBlasZrotg clAmdBlasZrotg_pfn +#undef clAmdBlasZscal +//#define clAmdBlasZscal clAmdBlasZscal_pfn +#undef clAmdBlasZswap +//#define clAmdBlasZswap clAmdBlasZswap_pfn +#undef clAmdBlasZsymm +//#define clAmdBlasZsymm clAmdBlasZsymm_pfn +#undef clAmdBlasZsyr2k +//#define clAmdBlasZsyr2k clAmdBlasZsyr2k_pfn +#undef clAmdBlasZsyr2kEx +//#define clAmdBlasZsyr2kEx clAmdBlasZsyr2kEx_pfn +#undef clAmdBlasZsyrk +//#define clAmdBlasZsyrk clAmdBlasZsyrk_pfn +#undef clAmdBlasZsyrkEx +//#define clAmdBlasZsyrkEx clAmdBlasZsyrkEx_pfn +#undef clAmdBlasZtbmv +//#define clAmdBlasZtbmv clAmdBlasZtbmv_pfn +#undef clAmdBlasZtbsv +//#define clAmdBlasZtbsv clAmdBlasZtbsv_pfn +#undef clAmdBlasZtpmv +//#define clAmdBlasZtpmv clAmdBlasZtpmv_pfn +#undef clAmdBlasZtpsv +//#define clAmdBlasZtpsv clAmdBlasZtpsv_pfn +#undef clAmdBlasZtrmm +//#define clAmdBlasZtrmm clAmdBlasZtrmm_pfn +#undef clAmdBlasZtrmmEx +//#define clAmdBlasZtrmmEx clAmdBlasZtrmmEx_pfn +#undef clAmdBlasZtrmv +//#define clAmdBlasZtrmv clAmdBlasZtrmv_pfn +#undef clAmdBlasZtrsm +//#define clAmdBlasZtrsm clAmdBlasZtrsm_pfn +#undef clAmdBlasZtrsmEx +//#define clAmdBlasZtrsmEx clAmdBlasZtrsmEx_pfn +#undef clAmdBlasZtrsv +//#define clAmdBlasZtrsv clAmdBlasZtrsv_pfn +#undef clAmdBlasiCamax +//#define clAmdBlasiCamax clAmdBlasiCamax_pfn +#undef clAmdBlasiDamax +//#define clAmdBlasiDamax clAmdBlasiDamax_pfn +#undef clAmdBlasiSamax +//#define clAmdBlasiSamax clAmdBlasiSamax_pfn +#undef clAmdBlasiZamax +//#define clAmdBlasiZamax clAmdBlasiZamax_pfn + +// generated by parser_clamdblas.py +//extern CL_RUNTIME_EXPORT cl_ulong (*clAmdBlasAddScratchImage)(cl_context context, size_t width, size_t height, clAmdBlasStatus* status); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCaxpy)(size_t N, cl_float2 alpha, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCcopy)(size_t N, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCdotc)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCdotu)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgbmv)(clAmdBlasOrder order, clAmdBlasTranspose trans, size_t M, size_t N, size_t KL, size_t KU, cl_float2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_float2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgemm)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, FloatComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgemmEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, FloatComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgemv)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, FloatComplex beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgemvEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, FloatComplex beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgerc)(clAmdBlasOrder order, size_t M, size_t N, cl_float2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCgeru)(clAmdBlasOrder order, size_t M, size_t N, cl_float2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, size_t K, cl_float2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_float2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChemm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_float2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_float2 beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChemv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, FloatComplex alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, FloatComplex beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCher)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCher2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCher2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_float beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCherk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, float alpha, const cl_mem A, size_t offa, size_t lda, float beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float2 alpha, const cl_mem AP, size_t offa, const cl_mem X, size_t offx, int incx, cl_float2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChpr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasChpr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCrotg)(cl_mem CA, size_t offCA, cl_mem CB, size_t offCB, cl_mem C, size_t offC, cl_mem S, size_t offS, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCscal)(size_t N, cl_float2 alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsrot)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_float C, cl_float S, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsscal)(size_t N, cl_float alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCswap)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsymm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_float2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_float2 beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsyr2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, FloatComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsyr2kEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, FloatComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsyrk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t lda, FloatComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCsyrkEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, FloatComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtbsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem AP, size_t offa, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtpsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrmm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrmmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrsm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrsmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, FloatComplex alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasCtrsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDasum)(size_t N, cl_mem asum, size_t offAsum, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDaxpy)(size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDcopy)(size_t N, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDdot)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDgbmv)(clAmdBlasOrder order, clAmdBlasTranspose trans, size_t M, size_t N, size_t KL, size_t KU, cl_double alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_double beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDgemm)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, cl_double beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDgemmEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, cl_double beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDgemv)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, cl_double beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDgemvEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, cl_double beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDger)(clAmdBlasOrder order, size_t M, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDnrm2)(size_t N, cl_mem NRM2, size_t offNRM2, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDrot)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_double C, cl_double S, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDrotg)(cl_mem DA, size_t offDA, cl_mem DB, size_t offDB, cl_mem C, size_t offC, cl_mem S, size_t offS, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDrotm)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, const cl_mem DPARAM, size_t offDparam, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDrotmg)(cl_mem DD1, size_t offDD1, cl_mem DD2, size_t offDD2, cl_mem DX1, size_t offDX1, const cl_mem DY1, size_t offDY1, cl_mem DPARAM, size_t offDparam, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_double beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDscal)(size_t N, cl_double alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDspmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem AP, size_t offa, const cl_mem X, size_t offx, int incx, cl_double beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDspr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDspr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDswap)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsymm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_double beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsymv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, cl_double beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsymvEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, cl_double beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyr2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, cl_double beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyr2kEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, cl_double beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyrk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t lda, cl_double beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDsyrkEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, cl_double alpha, const cl_mem A, size_t offA, size_t lda, cl_double beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtbsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem AP, size_t offa, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtpsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrmm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrmmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrsm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrsmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_double alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDtrsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDzasum)(size_t N, cl_mem asum, size_t offAsum, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasDznrm2)(size_t N, cl_mem NRM2, size_t offNRM2, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasGetVersion)(cl_uint* major, cl_uint* minor, cl_uint* patch); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasRemoveScratchImage)(cl_ulong imageID); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSasum)(size_t N, cl_mem asum, size_t offAsum, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSaxpy)(size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasScasum)(size_t N, cl_mem asum, size_t offAsum, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasScnrm2)(size_t N, cl_mem NRM2, size_t offNRM2, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasScopy)(size_t N, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSdot)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSetup)(); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSgbmv)(clAmdBlasOrder order, clAmdBlasTranspose trans, size_t M, size_t N, size_t KL, size_t KU, cl_float alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_float beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSgemm)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, cl_float beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSgemmEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, cl_float beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSgemv)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, cl_float beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSgemvEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, cl_float beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSger)(clAmdBlasOrder order, size_t M, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSnrm2)(size_t N, cl_mem NRM2, size_t offNRM2, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSrot)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_float C, cl_float S, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSrotg)(cl_mem SA, size_t offSA, cl_mem SB, size_t offSB, cl_mem C, size_t offC, cl_mem S, size_t offS, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSrotm)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, const cl_mem SPARAM, size_t offSparam, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSrotmg)(cl_mem SD1, size_t offSD1, cl_mem SD2, size_t offSD2, cl_mem SX1, size_t offSX1, const cl_mem SY1, size_t offSY1, cl_mem SPARAM, size_t offSparam, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_float beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSscal)(size_t N, cl_float alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSspmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem AP, size_t offa, const cl_mem X, size_t offx, int incx, cl_float beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSspr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSspr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSswap)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsymm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_float beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsymv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, cl_float beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsymvEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, cl_float beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_float alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyr2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, cl_float beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyr2kEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, cl_float beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyrk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t lda, cl_float beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasSsyrkEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, cl_float alpha, const cl_mem A, size_t offA, size_t lda, cl_float beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStbsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem AP, size_t offa, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStpsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrmm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrmmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrsm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrsmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, cl_float alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasStrsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT void (*clAmdBlasTeardown)(); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZaxpy)(size_t N, cl_double2 alpha, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZcopy)(size_t N, const cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZdotc)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZdotu)(size_t N, cl_mem dotProduct, size_t offDP, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZdrot)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_double C, cl_double S, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZdscal)(size_t N, cl_double alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgbmv)(clAmdBlasOrder order, clAmdBlasTranspose trans, size_t M, size_t N, size_t KL, size_t KU, cl_double2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_double2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgemm)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, DoubleComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgemmEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, clAmdBlasTranspose transB, size_t M, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, DoubleComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgemv)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t lda, const cl_mem x, size_t offx, int incx, DoubleComplex beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgemvEx)(clAmdBlasOrder order, clAmdBlasTranspose transA, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem x, size_t offx, int incx, DoubleComplex beta, cl_mem y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgerc)(clAmdBlasOrder order, size_t M, size_t N, cl_double2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZgeru)(clAmdBlasOrder order, size_t M, size_t N, cl_double2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, size_t K, cl_double2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, cl_double2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhemm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_double2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_double2 beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhemv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, DoubleComplex alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem X, size_t offx, int incx, DoubleComplex beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZher)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZher2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem A, size_t offa, size_t lda, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZher2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_double beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZherk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, double alpha, const cl_mem A, size_t offa, size_t lda, double beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double2 alpha, const cl_mem AP, size_t offa, const cl_mem X, size_t offx, int incx, cl_double2 beta, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhpr)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double alpha, const cl_mem X, size_t offx, int incx, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZhpr2)(clAmdBlasOrder order, clAmdBlasUplo uplo, size_t N, cl_double2 alpha, const cl_mem X, size_t offx, int incx, const cl_mem Y, size_t offy, int incy, cl_mem AP, size_t offa, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZrotg)(cl_mem CA, size_t offCA, cl_mem CB, size_t offCB, cl_mem C, size_t offC, cl_mem S, size_t offS, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZscal)(size_t N, cl_double2 alpha, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZswap)(size_t N, cl_mem X, size_t offx, int incx, cl_mem Y, size_t offy, int incy, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZsymm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, size_t M, size_t N, cl_double2 alpha, const cl_mem A, size_t offa, size_t lda, const cl_mem B, size_t offb, size_t ldb, cl_double2 beta, cl_mem C, size_t offc, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZsyr2k)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t lda, const cl_mem B, size_t ldb, DoubleComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZsyr2kEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transAB, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, const cl_mem B, size_t offB, size_t ldb, DoubleComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZsyrk)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t lda, DoubleComplex beta, cl_mem C, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZsyrkEx)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose transA, size_t N, size_t K, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, DoubleComplex beta, cl_mem C, size_t offC, size_t ldc, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtbmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtbsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, size_t K, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtpmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem AP, size_t offa, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtpsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrmm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrmmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrmv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrsm)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t lda, cl_mem B, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrsmEx)(clAmdBlasOrder order, clAmdBlasSide side, clAmdBlasUplo uplo, clAmdBlasTranspose transA, clAmdBlasDiag diag, size_t M, size_t N, DoubleComplex alpha, const cl_mem A, size_t offA, size_t lda, cl_mem B, size_t offB, size_t ldb, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasZtrsv)(clAmdBlasOrder order, clAmdBlasUplo uplo, clAmdBlasTranspose trans, clAmdBlasDiag diag, size_t N, const cl_mem A, size_t offa, size_t lda, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasiCamax)(size_t N, cl_mem iMax, size_t offiMax, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasiDamax)(size_t N, cl_mem iMax, size_t offiMax, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasiSamax)(size_t N, cl_mem iMax, size_t offiMax, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); +//extern CL_RUNTIME_EXPORT clAmdBlasStatus (*clAmdBlasiZamax)(size_t N, cl_mem iMax, size_t offiMax, const cl_mem X, size_t offx, int incx, cl_mem scratchBuff, cl_uint numCommandQueues, cl_command_queue* commandQueues, cl_uint numEventsInWaitList, const cl_event* eventWaitList, cl_event* events); diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdfft.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdfft.hpp new file mode 100644 index 00000000..1457d7eb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_clamdfft.hpp @@ -0,0 +1,142 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_CLAMDFFT_HPP +#error "Invalid usage" +#endif + +// generated by parser_clamdfft.py +#define clAmdFftBakePlan clAmdFftBakePlan_ +#define clAmdFftCopyPlan clAmdFftCopyPlan_ +#define clAmdFftCreateDefaultPlan clAmdFftCreateDefaultPlan_ +#define clAmdFftDestroyPlan clAmdFftDestroyPlan_ +#define clAmdFftEnqueueTransform clAmdFftEnqueueTransform_ +#define clAmdFftGetLayout clAmdFftGetLayout_ +#define clAmdFftGetPlanBatchSize clAmdFftGetPlanBatchSize_ +#define clAmdFftGetPlanContext clAmdFftGetPlanContext_ +#define clAmdFftGetPlanDim clAmdFftGetPlanDim_ +#define clAmdFftGetPlanDistance clAmdFftGetPlanDistance_ +#define clAmdFftGetPlanInStride clAmdFftGetPlanInStride_ +#define clAmdFftGetPlanLength clAmdFftGetPlanLength_ +#define clAmdFftGetPlanOutStride clAmdFftGetPlanOutStride_ +#define clAmdFftGetPlanPrecision clAmdFftGetPlanPrecision_ +#define clAmdFftGetPlanScale clAmdFftGetPlanScale_ +#define clAmdFftGetPlanTransposeResult clAmdFftGetPlanTransposeResult_ +#define clAmdFftGetResultLocation clAmdFftGetResultLocation_ +#define clAmdFftGetTmpBufSize clAmdFftGetTmpBufSize_ +#define clAmdFftGetVersion clAmdFftGetVersion_ +#define clAmdFftSetLayout clAmdFftSetLayout_ +#define clAmdFftSetPlanBatchSize clAmdFftSetPlanBatchSize_ +#define clAmdFftSetPlanDim clAmdFftSetPlanDim_ +#define clAmdFftSetPlanDistance clAmdFftSetPlanDistance_ +#define clAmdFftSetPlanInStride clAmdFftSetPlanInStride_ +#define clAmdFftSetPlanLength clAmdFftSetPlanLength_ +#define clAmdFftSetPlanOutStride clAmdFftSetPlanOutStride_ +#define clAmdFftSetPlanPrecision clAmdFftSetPlanPrecision_ +#define clAmdFftSetPlanScale clAmdFftSetPlanScale_ +#define clAmdFftSetPlanTransposeResult clAmdFftSetPlanTransposeResult_ +#define clAmdFftSetResultLocation clAmdFftSetResultLocation_ +#define clAmdFftSetup clAmdFftSetup_ +#define clAmdFftTeardown clAmdFftTeardown_ + +#include + +// generated by parser_clamdfft.py +#undef clAmdFftBakePlan +#define clAmdFftBakePlan clAmdFftBakePlan_pfn +#undef clAmdFftCopyPlan +//#define clAmdFftCopyPlan clAmdFftCopyPlan_pfn +#undef clAmdFftCreateDefaultPlan +#define clAmdFftCreateDefaultPlan clAmdFftCreateDefaultPlan_pfn +#undef clAmdFftDestroyPlan +#define clAmdFftDestroyPlan clAmdFftDestroyPlan_pfn +#undef clAmdFftEnqueueTransform +#define clAmdFftEnqueueTransform clAmdFftEnqueueTransform_pfn +#undef clAmdFftGetLayout +//#define clAmdFftGetLayout clAmdFftGetLayout_pfn +#undef clAmdFftGetPlanBatchSize +//#define clAmdFftGetPlanBatchSize clAmdFftGetPlanBatchSize_pfn +#undef clAmdFftGetPlanContext +//#define clAmdFftGetPlanContext clAmdFftGetPlanContext_pfn +#undef clAmdFftGetPlanDim +//#define clAmdFftGetPlanDim clAmdFftGetPlanDim_pfn +#undef clAmdFftGetPlanDistance +//#define clAmdFftGetPlanDistance clAmdFftGetPlanDistance_pfn +#undef clAmdFftGetPlanInStride +//#define clAmdFftGetPlanInStride clAmdFftGetPlanInStride_pfn +#undef clAmdFftGetPlanLength +//#define clAmdFftGetPlanLength clAmdFftGetPlanLength_pfn +#undef clAmdFftGetPlanOutStride +//#define clAmdFftGetPlanOutStride clAmdFftGetPlanOutStride_pfn +#undef clAmdFftGetPlanPrecision +//#define clAmdFftGetPlanPrecision clAmdFftGetPlanPrecision_pfn +#undef clAmdFftGetPlanScale +//#define clAmdFftGetPlanScale clAmdFftGetPlanScale_pfn +#undef clAmdFftGetPlanTransposeResult +//#define clAmdFftGetPlanTransposeResult clAmdFftGetPlanTransposeResult_pfn +#undef clAmdFftGetResultLocation +//#define clAmdFftGetResultLocation clAmdFftGetResultLocation_pfn +#undef clAmdFftGetTmpBufSize +#define clAmdFftGetTmpBufSize clAmdFftGetTmpBufSize_pfn +#undef clAmdFftGetVersion +#define clAmdFftGetVersion clAmdFftGetVersion_pfn +#undef clAmdFftSetLayout +#define clAmdFftSetLayout clAmdFftSetLayout_pfn +#undef clAmdFftSetPlanBatchSize +#define clAmdFftSetPlanBatchSize clAmdFftSetPlanBatchSize_pfn +#undef clAmdFftSetPlanDim +//#define clAmdFftSetPlanDim clAmdFftSetPlanDim_pfn +#undef clAmdFftSetPlanDistance +#define clAmdFftSetPlanDistance clAmdFftSetPlanDistance_pfn +#undef clAmdFftSetPlanInStride +#define clAmdFftSetPlanInStride clAmdFftSetPlanInStride_pfn +#undef clAmdFftSetPlanLength +//#define clAmdFftSetPlanLength clAmdFftSetPlanLength_pfn +#undef clAmdFftSetPlanOutStride +#define clAmdFftSetPlanOutStride clAmdFftSetPlanOutStride_pfn +#undef clAmdFftSetPlanPrecision +#define clAmdFftSetPlanPrecision clAmdFftSetPlanPrecision_pfn +#undef clAmdFftSetPlanScale +#define clAmdFftSetPlanScale clAmdFftSetPlanScale_pfn +#undef clAmdFftSetPlanTransposeResult +//#define clAmdFftSetPlanTransposeResult clAmdFftSetPlanTransposeResult_pfn +#undef clAmdFftSetResultLocation +#define clAmdFftSetResultLocation clAmdFftSetResultLocation_pfn +#undef clAmdFftSetup +#define clAmdFftSetup clAmdFftSetup_pfn +#undef clAmdFftTeardown +#define clAmdFftTeardown clAmdFftTeardown_pfn + +// generated by parser_clamdfft.py +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftBakePlan)(clAmdFftPlanHandle plHandle, cl_uint numQueues, cl_command_queue* commQueueFFT, void (CL_CALLBACK* pfn_notify) (clAmdFftPlanHandle plHandle, void* user_data), void* user_data); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftCopyPlan)(clAmdFftPlanHandle* out_plHandle, cl_context new_context, clAmdFftPlanHandle in_plHandle); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftCreateDefaultPlan)(clAmdFftPlanHandle* plHandle, cl_context context, const clAmdFftDim dim, const size_t* clLengths); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftDestroyPlan)(clAmdFftPlanHandle* plHandle); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftEnqueueTransform)(clAmdFftPlanHandle plHandle, clAmdFftDirection dir, cl_uint numQueuesAndEvents, cl_command_queue* commQueues, cl_uint numWaitEvents, const cl_event* waitEvents, cl_event* outEvents, cl_mem* inputBuffers, cl_mem* outputBuffers, cl_mem tmpBuffer); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetLayout)(const clAmdFftPlanHandle plHandle, clAmdFftLayout* iLayout, clAmdFftLayout* oLayout); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanBatchSize)(const clAmdFftPlanHandle plHandle, size_t* batchSize); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanContext)(const clAmdFftPlanHandle plHandle, cl_context* context); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanDim)(const clAmdFftPlanHandle plHandle, clAmdFftDim* dim, cl_uint* size); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanDistance)(const clAmdFftPlanHandle plHandle, size_t* iDist, size_t* oDist); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanInStride)(const clAmdFftPlanHandle plHandle, const clAmdFftDim dim, size_t* clStrides); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanLength)(const clAmdFftPlanHandle plHandle, const clAmdFftDim dim, size_t* clLengths); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanOutStride)(const clAmdFftPlanHandle plHandle, const clAmdFftDim dim, size_t* clStrides); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanPrecision)(const clAmdFftPlanHandle plHandle, clAmdFftPrecision* precision); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanScale)(const clAmdFftPlanHandle plHandle, clAmdFftDirection dir, cl_float* scale); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetPlanTransposeResult)(const clAmdFftPlanHandle plHandle, clAmdFftResultTransposed* transposed); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetResultLocation)(const clAmdFftPlanHandle plHandle, clAmdFftResultLocation* placeness); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetTmpBufSize)(const clAmdFftPlanHandle plHandle, size_t* buffersize); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftGetVersion)(cl_uint* major, cl_uint* minor, cl_uint* patch); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetLayout)(clAmdFftPlanHandle plHandle, clAmdFftLayout iLayout, clAmdFftLayout oLayout); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanBatchSize)(clAmdFftPlanHandle plHandle, size_t batchSize); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanDim)(clAmdFftPlanHandle plHandle, const clAmdFftDim dim); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanDistance)(clAmdFftPlanHandle plHandle, size_t iDist, size_t oDist); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanInStride)(clAmdFftPlanHandle plHandle, const clAmdFftDim dim, size_t* clStrides); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanLength)(clAmdFftPlanHandle plHandle, const clAmdFftDim dim, const size_t* clLengths); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanOutStride)(clAmdFftPlanHandle plHandle, const clAmdFftDim dim, size_t* clStrides); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanPrecision)(clAmdFftPlanHandle plHandle, clAmdFftPrecision precision); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanScale)(clAmdFftPlanHandle plHandle, clAmdFftDirection dir, cl_float scale); +//extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetPlanTransposeResult)(clAmdFftPlanHandle plHandle, clAmdFftResultTransposed transposed); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetResultLocation)(clAmdFftPlanHandle plHandle, clAmdFftResultLocation placeness); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftSetup)(const clAmdFftSetupData* setupData); +extern CL_RUNTIME_EXPORT clAmdFftStatus (*clAmdFftTeardown)(); diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core.hpp new file mode 100644 index 00000000..28618a1f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core.hpp @@ -0,0 +1,371 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP +#error "Invalid usage" +#endif + +// generated by parser_cl.py +#define clBuildProgram clBuildProgram_ +#define clCompileProgram clCompileProgram_ +#define clCreateBuffer clCreateBuffer_ +#define clCreateCommandQueue clCreateCommandQueue_ +#define clCreateContext clCreateContext_ +#define clCreateContextFromType clCreateContextFromType_ +#define clCreateImage clCreateImage_ +#define clCreateImage2D clCreateImage2D_ +#define clCreateImage3D clCreateImage3D_ +#define clCreateKernel clCreateKernel_ +#define clCreateKernelsInProgram clCreateKernelsInProgram_ +#define clCreateProgramWithBinary clCreateProgramWithBinary_ +#define clCreateProgramWithBuiltInKernels clCreateProgramWithBuiltInKernels_ +#define clCreateProgramWithSource clCreateProgramWithSource_ +#define clCreateSampler clCreateSampler_ +#define clCreateSubBuffer clCreateSubBuffer_ +#define clCreateSubDevices clCreateSubDevices_ +#define clCreateUserEvent clCreateUserEvent_ +#define clEnqueueBarrier clEnqueueBarrier_ +#define clEnqueueBarrierWithWaitList clEnqueueBarrierWithWaitList_ +#define clEnqueueCopyBuffer clEnqueueCopyBuffer_ +#define clEnqueueCopyBufferRect clEnqueueCopyBufferRect_ +#define clEnqueueCopyBufferToImage clEnqueueCopyBufferToImage_ +#define clEnqueueCopyImage clEnqueueCopyImage_ +#define clEnqueueCopyImageToBuffer clEnqueueCopyImageToBuffer_ +#define clEnqueueFillBuffer clEnqueueFillBuffer_ +#define clEnqueueFillImage clEnqueueFillImage_ +#define clEnqueueMapBuffer clEnqueueMapBuffer_ +#define clEnqueueMapImage clEnqueueMapImage_ +#define clEnqueueMarker clEnqueueMarker_ +#define clEnqueueMarkerWithWaitList clEnqueueMarkerWithWaitList_ +#define clEnqueueMigrateMemObjects clEnqueueMigrateMemObjects_ +#define clEnqueueNDRangeKernel clEnqueueNDRangeKernel_ +#define clEnqueueNativeKernel clEnqueueNativeKernel_ +#define clEnqueueReadBuffer clEnqueueReadBuffer_ +#define clEnqueueReadBufferRect clEnqueueReadBufferRect_ +#define clEnqueueReadImage clEnqueueReadImage_ +#define clEnqueueTask clEnqueueTask_ +#define clEnqueueUnmapMemObject clEnqueueUnmapMemObject_ +#define clEnqueueWaitForEvents clEnqueueWaitForEvents_ +#define clEnqueueWriteBuffer clEnqueueWriteBuffer_ +#define clEnqueueWriteBufferRect clEnqueueWriteBufferRect_ +#define clEnqueueWriteImage clEnqueueWriteImage_ +#define clFinish clFinish_ +#define clFlush clFlush_ +#define clGetCommandQueueInfo clGetCommandQueueInfo_ +#define clGetContextInfo clGetContextInfo_ +#define clGetDeviceIDs clGetDeviceIDs_ +#define clGetDeviceInfo clGetDeviceInfo_ +#define clGetEventInfo clGetEventInfo_ +#define clGetEventProfilingInfo clGetEventProfilingInfo_ +#define clGetExtensionFunctionAddress clGetExtensionFunctionAddress_ +#define clGetExtensionFunctionAddressForPlatform clGetExtensionFunctionAddressForPlatform_ +#define clGetImageInfo clGetImageInfo_ +#define clGetKernelArgInfo clGetKernelArgInfo_ +#define clGetKernelInfo clGetKernelInfo_ +#define clGetKernelWorkGroupInfo clGetKernelWorkGroupInfo_ +#define clGetMemObjectInfo clGetMemObjectInfo_ +#define clGetPlatformIDs clGetPlatformIDs_ +#define clGetPlatformInfo clGetPlatformInfo_ +#define clGetProgramBuildInfo clGetProgramBuildInfo_ +#define clGetProgramInfo clGetProgramInfo_ +#define clGetSamplerInfo clGetSamplerInfo_ +#define clGetSupportedImageFormats clGetSupportedImageFormats_ +#define clLinkProgram clLinkProgram_ +#define clReleaseCommandQueue clReleaseCommandQueue_ +#define clReleaseContext clReleaseContext_ +#define clReleaseDevice clReleaseDevice_ +#define clReleaseEvent clReleaseEvent_ +#define clReleaseKernel clReleaseKernel_ +#define clReleaseMemObject clReleaseMemObject_ +#define clReleaseProgram clReleaseProgram_ +#define clReleaseSampler clReleaseSampler_ +#define clRetainCommandQueue clRetainCommandQueue_ +#define clRetainContext clRetainContext_ +#define clRetainDevice clRetainDevice_ +#define clRetainEvent clRetainEvent_ +#define clRetainKernel clRetainKernel_ +#define clRetainMemObject clRetainMemObject_ +#define clRetainProgram clRetainProgram_ +#define clRetainSampler clRetainSampler_ +#define clSetEventCallback clSetEventCallback_ +#define clSetKernelArg clSetKernelArg_ +#define clSetMemObjectDestructorCallback clSetMemObjectDestructorCallback_ +#define clSetUserEventStatus clSetUserEventStatus_ +#define clUnloadCompiler clUnloadCompiler_ +#define clUnloadPlatformCompiler clUnloadPlatformCompiler_ +#define clWaitForEvents clWaitForEvents_ + +#if defined __APPLE__ +#define CL_SILENCE_DEPRECATION +#include +#else +#include +#endif + +// generated by parser_cl.py +#undef clBuildProgram +#define clBuildProgram clBuildProgram_pfn +#undef clCompileProgram +#define clCompileProgram clCompileProgram_pfn +#undef clCreateBuffer +#define clCreateBuffer clCreateBuffer_pfn +#undef clCreateCommandQueue +#define clCreateCommandQueue clCreateCommandQueue_pfn +#undef clCreateContext +#define clCreateContext clCreateContext_pfn +#undef clCreateContextFromType +#define clCreateContextFromType clCreateContextFromType_pfn +#undef clCreateImage +#define clCreateImage clCreateImage_pfn +#undef clCreateImage2D +#define clCreateImage2D clCreateImage2D_pfn +#undef clCreateImage3D +#define clCreateImage3D clCreateImage3D_pfn +#undef clCreateKernel +#define clCreateKernel clCreateKernel_pfn +#undef clCreateKernelsInProgram +#define clCreateKernelsInProgram clCreateKernelsInProgram_pfn +#undef clCreateProgramWithBinary +#define clCreateProgramWithBinary clCreateProgramWithBinary_pfn +#undef clCreateProgramWithBuiltInKernels +#define clCreateProgramWithBuiltInKernels clCreateProgramWithBuiltInKernels_pfn +#undef clCreateProgramWithSource +#define clCreateProgramWithSource clCreateProgramWithSource_pfn +#undef clCreateSampler +#define clCreateSampler clCreateSampler_pfn +#undef clCreateSubBuffer +#define clCreateSubBuffer clCreateSubBuffer_pfn +#undef clCreateSubDevices +#define clCreateSubDevices clCreateSubDevices_pfn +#undef clCreateUserEvent +#define clCreateUserEvent clCreateUserEvent_pfn +#undef clEnqueueBarrier +#define clEnqueueBarrier clEnqueueBarrier_pfn +#undef clEnqueueBarrierWithWaitList +#define clEnqueueBarrierWithWaitList clEnqueueBarrierWithWaitList_pfn +#undef clEnqueueCopyBuffer +#define clEnqueueCopyBuffer clEnqueueCopyBuffer_pfn +#undef clEnqueueCopyBufferRect +#define clEnqueueCopyBufferRect clEnqueueCopyBufferRect_pfn +#undef clEnqueueCopyBufferToImage +#define clEnqueueCopyBufferToImage clEnqueueCopyBufferToImage_pfn +#undef clEnqueueCopyImage +#define clEnqueueCopyImage clEnqueueCopyImage_pfn +#undef clEnqueueCopyImageToBuffer +#define clEnqueueCopyImageToBuffer clEnqueueCopyImageToBuffer_pfn +#undef clEnqueueFillBuffer +#define clEnqueueFillBuffer clEnqueueFillBuffer_pfn +#undef clEnqueueFillImage +#define clEnqueueFillImage clEnqueueFillImage_pfn +#undef clEnqueueMapBuffer +#define clEnqueueMapBuffer clEnqueueMapBuffer_pfn +#undef clEnqueueMapImage +#define clEnqueueMapImage clEnqueueMapImage_pfn +#undef clEnqueueMarker +#define clEnqueueMarker clEnqueueMarker_pfn +#undef clEnqueueMarkerWithWaitList +#define clEnqueueMarkerWithWaitList clEnqueueMarkerWithWaitList_pfn +#undef clEnqueueMigrateMemObjects +#define clEnqueueMigrateMemObjects clEnqueueMigrateMemObjects_pfn +#undef clEnqueueNDRangeKernel +#define clEnqueueNDRangeKernel clEnqueueNDRangeKernel_pfn +#undef clEnqueueNativeKernel +#define clEnqueueNativeKernel clEnqueueNativeKernel_pfn +#undef clEnqueueReadBuffer +#define clEnqueueReadBuffer clEnqueueReadBuffer_pfn +#undef clEnqueueReadBufferRect +#define clEnqueueReadBufferRect clEnqueueReadBufferRect_pfn +#undef clEnqueueReadImage +#define clEnqueueReadImage clEnqueueReadImage_pfn +#undef clEnqueueTask +#define clEnqueueTask clEnqueueTask_pfn +#undef clEnqueueUnmapMemObject +#define clEnqueueUnmapMemObject clEnqueueUnmapMemObject_pfn +#undef clEnqueueWaitForEvents +#define clEnqueueWaitForEvents clEnqueueWaitForEvents_pfn +#undef clEnqueueWriteBuffer +#define clEnqueueWriteBuffer clEnqueueWriteBuffer_pfn +#undef clEnqueueWriteBufferRect +#define clEnqueueWriteBufferRect clEnqueueWriteBufferRect_pfn +#undef clEnqueueWriteImage +#define clEnqueueWriteImage clEnqueueWriteImage_pfn +#undef clFinish +#define clFinish clFinish_pfn +#undef clFlush +#define clFlush clFlush_pfn +#undef clGetCommandQueueInfo +#define clGetCommandQueueInfo clGetCommandQueueInfo_pfn +#undef clGetContextInfo +#define clGetContextInfo clGetContextInfo_pfn +#undef clGetDeviceIDs +#define clGetDeviceIDs clGetDeviceIDs_pfn +#undef clGetDeviceInfo +#define clGetDeviceInfo clGetDeviceInfo_pfn +#undef clGetEventInfo +#define clGetEventInfo clGetEventInfo_pfn +#undef clGetEventProfilingInfo +#define clGetEventProfilingInfo clGetEventProfilingInfo_pfn +#undef clGetExtensionFunctionAddress +#define clGetExtensionFunctionAddress clGetExtensionFunctionAddress_pfn +#undef clGetExtensionFunctionAddressForPlatform +#define clGetExtensionFunctionAddressForPlatform clGetExtensionFunctionAddressForPlatform_pfn +#undef clGetImageInfo +#define clGetImageInfo clGetImageInfo_pfn +#undef clGetKernelArgInfo +#define clGetKernelArgInfo clGetKernelArgInfo_pfn +#undef clGetKernelInfo +#define clGetKernelInfo clGetKernelInfo_pfn +#undef clGetKernelWorkGroupInfo +#define clGetKernelWorkGroupInfo clGetKernelWorkGroupInfo_pfn +#undef clGetMemObjectInfo +#define clGetMemObjectInfo clGetMemObjectInfo_pfn +#undef clGetPlatformIDs +#define clGetPlatformIDs clGetPlatformIDs_pfn +#undef clGetPlatformInfo +#define clGetPlatformInfo clGetPlatformInfo_pfn +#undef clGetProgramBuildInfo +#define clGetProgramBuildInfo clGetProgramBuildInfo_pfn +#undef clGetProgramInfo +#define clGetProgramInfo clGetProgramInfo_pfn +#undef clGetSamplerInfo +#define clGetSamplerInfo clGetSamplerInfo_pfn +#undef clGetSupportedImageFormats +#define clGetSupportedImageFormats clGetSupportedImageFormats_pfn +#undef clLinkProgram +#define clLinkProgram clLinkProgram_pfn +#undef clReleaseCommandQueue +#define clReleaseCommandQueue clReleaseCommandQueue_pfn +#undef clReleaseContext +#define clReleaseContext clReleaseContext_pfn +#undef clReleaseDevice +#define clReleaseDevice clReleaseDevice_pfn +#undef clReleaseEvent +#define clReleaseEvent clReleaseEvent_pfn +#undef clReleaseKernel +#define clReleaseKernel clReleaseKernel_pfn +#undef clReleaseMemObject +#define clReleaseMemObject clReleaseMemObject_pfn +#undef clReleaseProgram +#define clReleaseProgram clReleaseProgram_pfn +#undef clReleaseSampler +#define clReleaseSampler clReleaseSampler_pfn +#undef clRetainCommandQueue +#define clRetainCommandQueue clRetainCommandQueue_pfn +#undef clRetainContext +#define clRetainContext clRetainContext_pfn +#undef clRetainDevice +#define clRetainDevice clRetainDevice_pfn +#undef clRetainEvent +#define clRetainEvent clRetainEvent_pfn +#undef clRetainKernel +#define clRetainKernel clRetainKernel_pfn +#undef clRetainMemObject +#define clRetainMemObject clRetainMemObject_pfn +#undef clRetainProgram +#define clRetainProgram clRetainProgram_pfn +#undef clRetainSampler +#define clRetainSampler clRetainSampler_pfn +#undef clSetEventCallback +#define clSetEventCallback clSetEventCallback_pfn +#undef clSetKernelArg +#define clSetKernelArg clSetKernelArg_pfn +#undef clSetMemObjectDestructorCallback +#define clSetMemObjectDestructorCallback clSetMemObjectDestructorCallback_pfn +#undef clSetUserEventStatus +#define clSetUserEventStatus clSetUserEventStatus_pfn +#undef clUnloadCompiler +#define clUnloadCompiler clUnloadCompiler_pfn +#undef clUnloadPlatformCompiler +#define clUnloadPlatformCompiler clUnloadPlatformCompiler_pfn +#undef clWaitForEvents +#define clWaitForEvents clWaitForEvents_pfn + +// generated by parser_cl.py +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clBuildProgram)(cl_program, cl_uint, const cl_device_id*, const char*, void (CL_CALLBACK*) (cl_program, void*), void*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clCompileProgram)(cl_program, cl_uint, const cl_device_id*, const char*, cl_uint, const cl_program*, const char**, void (CL_CALLBACK*) (cl_program, void*), void*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateBuffer)(cl_context, cl_mem_flags, size_t, void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_command_queue (CL_API_CALL*clCreateCommandQueue)(cl_context, cl_device_id, cl_command_queue_properties, cl_int*); +extern CL_RUNTIME_EXPORT cl_context (CL_API_CALL*clCreateContext)(const cl_context_properties*, cl_uint, const cl_device_id*, void (CL_CALLBACK*) (const char*, const void*, size_t, void*), void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_context (CL_API_CALL*clCreateContextFromType)(const cl_context_properties*, cl_device_type, void (CL_CALLBACK*) (const char*, const void*, size_t, void*), void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateImage)(cl_context, cl_mem_flags, const cl_image_format*, const cl_image_desc*, void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateImage2D)(cl_context, cl_mem_flags, const cl_image_format*, size_t, size_t, size_t, void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateImage3D)(cl_context, cl_mem_flags, const cl_image_format*, size_t, size_t, size_t, size_t, size_t, void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_kernel (CL_API_CALL*clCreateKernel)(cl_program, const char*, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clCreateKernelsInProgram)(cl_program, cl_uint, cl_kernel*, cl_uint*); +extern CL_RUNTIME_EXPORT cl_program (CL_API_CALL*clCreateProgramWithBinary)(cl_context, cl_uint, const cl_device_id*, const size_t*, const unsigned char**, cl_int*, cl_int*); +extern CL_RUNTIME_EXPORT cl_program (CL_API_CALL*clCreateProgramWithBuiltInKernels)(cl_context, cl_uint, const cl_device_id*, const char*, cl_int*); +extern CL_RUNTIME_EXPORT cl_program (CL_API_CALL*clCreateProgramWithSource)(cl_context, cl_uint, const char**, const size_t*, cl_int*); +extern CL_RUNTIME_EXPORT cl_sampler (CL_API_CALL*clCreateSampler)(cl_context, cl_bool, cl_addressing_mode, cl_filter_mode, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateSubBuffer)(cl_mem, cl_mem_flags, cl_buffer_create_type, const void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clCreateSubDevices)(cl_device_id, const cl_device_partition_property*, cl_uint, cl_device_id*, cl_uint*); +extern CL_RUNTIME_EXPORT cl_event (CL_API_CALL*clCreateUserEvent)(cl_context, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueBarrier)(cl_command_queue); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueBarrierWithWaitList)(cl_command_queue, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueCopyBuffer)(cl_command_queue, cl_mem, cl_mem, size_t, size_t, size_t, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueCopyBufferRect)(cl_command_queue, cl_mem, cl_mem, const size_t*, const size_t*, const size_t*, size_t, size_t, size_t, size_t, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueCopyBufferToImage)(cl_command_queue, cl_mem, cl_mem, size_t, const size_t*, const size_t*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueCopyImage)(cl_command_queue, cl_mem, cl_mem, const size_t*, const size_t*, const size_t*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueCopyImageToBuffer)(cl_command_queue, cl_mem, cl_mem, const size_t*, const size_t*, size_t, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueFillBuffer)(cl_command_queue, cl_mem, const void*, size_t, size_t, size_t, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueFillImage)(cl_command_queue, cl_mem, const void*, const size_t*, const size_t*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT void* (CL_API_CALL*clEnqueueMapBuffer)(cl_command_queue, cl_mem, cl_bool, cl_map_flags, size_t, size_t, cl_uint, const cl_event*, cl_event*, cl_int*); +extern CL_RUNTIME_EXPORT void* (CL_API_CALL*clEnqueueMapImage)(cl_command_queue, cl_mem, cl_bool, cl_map_flags, const size_t*, const size_t*, size_t*, size_t*, cl_uint, const cl_event*, cl_event*, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueMarker)(cl_command_queue, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueMarkerWithWaitList)(cl_command_queue, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueMigrateMemObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_mem_migration_flags, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueNDRangeKernel)(cl_command_queue, cl_kernel, cl_uint, const size_t*, const size_t*, const size_t*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueNativeKernel)(cl_command_queue, void (CL_CALLBACK*) (void*), void*, size_t, cl_uint, const cl_mem*, const void**, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueReadBuffer)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueReadBufferRect)(cl_command_queue, cl_mem, cl_bool, const size_t*, const size_t*, const size_t*, size_t, size_t, size_t, size_t, void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueReadImage)(cl_command_queue, cl_mem, cl_bool, const size_t*, const size_t*, size_t, size_t, void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueTask)(cl_command_queue, cl_kernel, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueUnmapMemObject)(cl_command_queue, cl_mem, void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueWaitForEvents)(cl_command_queue, cl_uint, const cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueWriteBuffer)(cl_command_queue, cl_mem, cl_bool, size_t, size_t, const void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueWriteBufferRect)(cl_command_queue, cl_mem, cl_bool, const size_t*, const size_t*, const size_t*, size_t, size_t, size_t, size_t, const void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueWriteImage)(cl_command_queue, cl_mem, cl_bool, const size_t*, const size_t*, size_t, size_t, const void*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clFinish)(cl_command_queue); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clFlush)(cl_command_queue); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetCommandQueueInfo)(cl_command_queue, cl_command_queue_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetContextInfo)(cl_context, cl_context_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetDeviceIDs)(cl_platform_id, cl_device_type, cl_uint, cl_device_id*, cl_uint*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetDeviceInfo)(cl_device_id, cl_device_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetEventInfo)(cl_event, cl_event_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetEventProfilingInfo)(cl_event, cl_profiling_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT void* (CL_API_CALL*clGetExtensionFunctionAddress)(const char*); +extern CL_RUNTIME_EXPORT void* (CL_API_CALL*clGetExtensionFunctionAddressForPlatform)(cl_platform_id, const char*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetImageInfo)(cl_mem, cl_image_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetKernelArgInfo)(cl_kernel, cl_uint, cl_kernel_arg_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetKernelInfo)(cl_kernel, cl_kernel_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetKernelWorkGroupInfo)(cl_kernel, cl_device_id, cl_kernel_work_group_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetMemObjectInfo)(cl_mem, cl_mem_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetPlatformIDs)(cl_uint, cl_platform_id*, cl_uint*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetPlatformInfo)(cl_platform_id, cl_platform_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetProgramBuildInfo)(cl_program, cl_device_id, cl_program_build_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetProgramInfo)(cl_program, cl_program_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetSamplerInfo)(cl_sampler, cl_sampler_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetSupportedImageFormats)(cl_context, cl_mem_flags, cl_mem_object_type, cl_uint, cl_image_format*, cl_uint*); +extern CL_RUNTIME_EXPORT cl_program (CL_API_CALL*clLinkProgram)(cl_context, cl_uint, const cl_device_id*, const char*, cl_uint, const cl_program*, void (CL_CALLBACK*) (cl_program, void*), void*, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseCommandQueue)(cl_command_queue); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseContext)(cl_context); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseDevice)(cl_device_id); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseEvent)(cl_event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseKernel)(cl_kernel); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseMemObject)(cl_mem); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseProgram)(cl_program); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clReleaseSampler)(cl_sampler); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainCommandQueue)(cl_command_queue); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainContext)(cl_context); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainDevice)(cl_device_id); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainEvent)(cl_event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainKernel)(cl_kernel); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainMemObject)(cl_mem); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainProgram)(cl_program); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clRetainSampler)(cl_sampler); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clSetEventCallback)(cl_event, cl_int, void (CL_CALLBACK*) (cl_event, cl_int, void*), void*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clSetKernelArg)(cl_kernel, cl_uint, size_t, const void*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clSetMemObjectDestructorCallback)(cl_mem, void (CL_CALLBACK*) (cl_mem, void*), void*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clSetUserEventStatus)(cl_event, cl_int); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clUnloadCompiler)(); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clUnloadPlatformCompiler)(cl_platform_id); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clWaitForEvents)(cl_uint, const cl_event*); diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core_wrappers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core_wrappers.hpp new file mode 100644 index 00000000..216b22b8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_core_wrappers.hpp @@ -0,0 +1,272 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_WRAPPERS_HPP +#error "Invalid usage" +#endif + +// generated by parser_cl.py +#undef clBuildProgram +#define clBuildProgram clBuildProgram_fn +inline cl_int clBuildProgram(cl_program p0, cl_uint p1, const cl_device_id* p2, const char* p3, void (CL_CALLBACK*p4) (cl_program, void*), void* p5) { return clBuildProgram_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCompileProgram +#define clCompileProgram clCompileProgram_fn +inline cl_int clCompileProgram(cl_program p0, cl_uint p1, const cl_device_id* p2, const char* p3, cl_uint p4, const cl_program* p5, const char** p6, void (CL_CALLBACK*p7) (cl_program, void*), void* p8) { return clCompileProgram_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clCreateBuffer +#define clCreateBuffer clCreateBuffer_fn +inline cl_mem clCreateBuffer(cl_context p0, cl_mem_flags p1, size_t p2, void* p3, cl_int* p4) { return clCreateBuffer_pfn(p0, p1, p2, p3, p4); } +#undef clCreateCommandQueue +#define clCreateCommandQueue clCreateCommandQueue_fn +inline cl_command_queue clCreateCommandQueue(cl_context p0, cl_device_id p1, cl_command_queue_properties p2, cl_int* p3) { return clCreateCommandQueue_pfn(p0, p1, p2, p3); } +#undef clCreateContext +#define clCreateContext clCreateContext_fn +inline cl_context clCreateContext(const cl_context_properties* p0, cl_uint p1, const cl_device_id* p2, void (CL_CALLBACK*p3) (const char*, const void*, size_t, void*), void* p4, cl_int* p5) { return clCreateContext_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateContextFromType +#define clCreateContextFromType clCreateContextFromType_fn +inline cl_context clCreateContextFromType(const cl_context_properties* p0, cl_device_type p1, void (CL_CALLBACK*p2) (const char*, const void*, size_t, void*), void* p3, cl_int* p4) { return clCreateContextFromType_pfn(p0, p1, p2, p3, p4); } +#undef clCreateImage +#define clCreateImage clCreateImage_fn +inline cl_mem clCreateImage(cl_context p0, cl_mem_flags p1, const cl_image_format* p2, const cl_image_desc* p3, void* p4, cl_int* p5) { return clCreateImage_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateImage2D +#define clCreateImage2D clCreateImage2D_fn +inline cl_mem clCreateImage2D(cl_context p0, cl_mem_flags p1, const cl_image_format* p2, size_t p3, size_t p4, size_t p5, void* p6, cl_int* p7) { return clCreateImage2D_pfn(p0, p1, p2, p3, p4, p5, p6, p7); } +#undef clCreateImage3D +#define clCreateImage3D clCreateImage3D_fn +inline cl_mem clCreateImage3D(cl_context p0, cl_mem_flags p1, const cl_image_format* p2, size_t p3, size_t p4, size_t p5, size_t p6, size_t p7, void* p8, cl_int* p9) { return clCreateImage3D_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); } +#undef clCreateKernel +#define clCreateKernel clCreateKernel_fn +inline cl_kernel clCreateKernel(cl_program p0, const char* p1, cl_int* p2) { return clCreateKernel_pfn(p0, p1, p2); } +#undef clCreateKernelsInProgram +#define clCreateKernelsInProgram clCreateKernelsInProgram_fn +inline cl_int clCreateKernelsInProgram(cl_program p0, cl_uint p1, cl_kernel* p2, cl_uint* p3) { return clCreateKernelsInProgram_pfn(p0, p1, p2, p3); } +#undef clCreateProgramWithBinary +#define clCreateProgramWithBinary clCreateProgramWithBinary_fn +inline cl_program clCreateProgramWithBinary(cl_context p0, cl_uint p1, const cl_device_id* p2, const size_t* p3, const unsigned char** p4, cl_int* p5, cl_int* p6) { return clCreateProgramWithBinary_pfn(p0, p1, p2, p3, p4, p5, p6); } +#undef clCreateProgramWithBuiltInKernels +#define clCreateProgramWithBuiltInKernels clCreateProgramWithBuiltInKernels_fn +inline cl_program clCreateProgramWithBuiltInKernels(cl_context p0, cl_uint p1, const cl_device_id* p2, const char* p3, cl_int* p4) { return clCreateProgramWithBuiltInKernels_pfn(p0, p1, p2, p3, p4); } +#undef clCreateProgramWithSource +#define clCreateProgramWithSource clCreateProgramWithSource_fn +inline cl_program clCreateProgramWithSource(cl_context p0, cl_uint p1, const char** p2, const size_t* p3, cl_int* p4) { return clCreateProgramWithSource_pfn(p0, p1, p2, p3, p4); } +#undef clCreateSampler +#define clCreateSampler clCreateSampler_fn +inline cl_sampler clCreateSampler(cl_context p0, cl_bool p1, cl_addressing_mode p2, cl_filter_mode p3, cl_int* p4) { return clCreateSampler_pfn(p0, p1, p2, p3, p4); } +#undef clCreateSubBuffer +#define clCreateSubBuffer clCreateSubBuffer_fn +inline cl_mem clCreateSubBuffer(cl_mem p0, cl_mem_flags p1, cl_buffer_create_type p2, const void* p3, cl_int* p4) { return clCreateSubBuffer_pfn(p0, p1, p2, p3, p4); } +#undef clCreateSubDevices +#define clCreateSubDevices clCreateSubDevices_fn +inline cl_int clCreateSubDevices(cl_device_id p0, const cl_device_partition_property* p1, cl_uint p2, cl_device_id* p3, cl_uint* p4) { return clCreateSubDevices_pfn(p0, p1, p2, p3, p4); } +#undef clCreateUserEvent +#define clCreateUserEvent clCreateUserEvent_fn +inline cl_event clCreateUserEvent(cl_context p0, cl_int* p1) { return clCreateUserEvent_pfn(p0, p1); } +#undef clEnqueueBarrier +#define clEnqueueBarrier clEnqueueBarrier_fn +inline cl_int clEnqueueBarrier(cl_command_queue p0) { return clEnqueueBarrier_pfn(p0); } +#undef clEnqueueBarrierWithWaitList +#define clEnqueueBarrierWithWaitList clEnqueueBarrierWithWaitList_fn +inline cl_int clEnqueueBarrierWithWaitList(cl_command_queue p0, cl_uint p1, const cl_event* p2, cl_event* p3) { return clEnqueueBarrierWithWaitList_pfn(p0, p1, p2, p3); } +#undef clEnqueueCopyBuffer +#define clEnqueueCopyBuffer clEnqueueCopyBuffer_fn +inline cl_int clEnqueueCopyBuffer(cl_command_queue p0, cl_mem p1, cl_mem p2, size_t p3, size_t p4, size_t p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueCopyBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueCopyBufferRect +#define clEnqueueCopyBufferRect clEnqueueCopyBufferRect_fn +inline cl_int clEnqueueCopyBufferRect(cl_command_queue p0, cl_mem p1, cl_mem p2, const size_t* p3, const size_t* p4, const size_t* p5, size_t p6, size_t p7, size_t p8, size_t p9, cl_uint p10, const cl_event* p11, cl_event* p12) { return clEnqueueCopyBufferRect_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12); } +#undef clEnqueueCopyBufferToImage +#define clEnqueueCopyBufferToImage clEnqueueCopyBufferToImage_fn +inline cl_int clEnqueueCopyBufferToImage(cl_command_queue p0, cl_mem p1, cl_mem p2, size_t p3, const size_t* p4, const size_t* p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueCopyBufferToImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueCopyImage +#define clEnqueueCopyImage clEnqueueCopyImage_fn +inline cl_int clEnqueueCopyImage(cl_command_queue p0, cl_mem p1, cl_mem p2, const size_t* p3, const size_t* p4, const size_t* p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueCopyImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueCopyImageToBuffer +#define clEnqueueCopyImageToBuffer clEnqueueCopyImageToBuffer_fn +inline cl_int clEnqueueCopyImageToBuffer(cl_command_queue p0, cl_mem p1, cl_mem p2, const size_t* p3, const size_t* p4, size_t p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueCopyImageToBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueFillBuffer +#define clEnqueueFillBuffer clEnqueueFillBuffer_fn +inline cl_int clEnqueueFillBuffer(cl_command_queue p0, cl_mem p1, const void* p2, size_t p3, size_t p4, size_t p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueFillBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueFillImage +#define clEnqueueFillImage clEnqueueFillImage_fn +inline cl_int clEnqueueFillImage(cl_command_queue p0, cl_mem p1, const void* p2, const size_t* p3, const size_t* p4, cl_uint p5, const cl_event* p6, cl_event* p7) { return clEnqueueFillImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7); } +#undef clEnqueueMapBuffer +#define clEnqueueMapBuffer clEnqueueMapBuffer_fn +inline void* clEnqueueMapBuffer(cl_command_queue p0, cl_mem p1, cl_bool p2, cl_map_flags p3, size_t p4, size_t p5, cl_uint p6, const cl_event* p7, cl_event* p8, cl_int* p9) { return clEnqueueMapBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); } +#undef clEnqueueMapImage +#define clEnqueueMapImage clEnqueueMapImage_fn +inline void* clEnqueueMapImage(cl_command_queue p0, cl_mem p1, cl_bool p2, cl_map_flags p3, const size_t* p4, const size_t* p5, size_t* p6, size_t* p7, cl_uint p8, const cl_event* p9, cl_event* p10, cl_int* p11) { return clEnqueueMapImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11); } +#undef clEnqueueMarker +#define clEnqueueMarker clEnqueueMarker_fn +inline cl_int clEnqueueMarker(cl_command_queue p0, cl_event* p1) { return clEnqueueMarker_pfn(p0, p1); } +#undef clEnqueueMarkerWithWaitList +#define clEnqueueMarkerWithWaitList clEnqueueMarkerWithWaitList_fn +inline cl_int clEnqueueMarkerWithWaitList(cl_command_queue p0, cl_uint p1, const cl_event* p2, cl_event* p3) { return clEnqueueMarkerWithWaitList_pfn(p0, p1, p2, p3); } +#undef clEnqueueMigrateMemObjects +#define clEnqueueMigrateMemObjects clEnqueueMigrateMemObjects_fn +inline cl_int clEnqueueMigrateMemObjects(cl_command_queue p0, cl_uint p1, const cl_mem* p2, cl_mem_migration_flags p3, cl_uint p4, const cl_event* p5, cl_event* p6) { return clEnqueueMigrateMemObjects_pfn(p0, p1, p2, p3, p4, p5, p6); } +#undef clEnqueueNDRangeKernel +#define clEnqueueNDRangeKernel clEnqueueNDRangeKernel_fn +inline cl_int clEnqueueNDRangeKernel(cl_command_queue p0, cl_kernel p1, cl_uint p2, const size_t* p3, const size_t* p4, const size_t* p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueNDRangeKernel_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueNativeKernel +#define clEnqueueNativeKernel clEnqueueNativeKernel_fn +inline cl_int clEnqueueNativeKernel(cl_command_queue p0, void (CL_CALLBACK*p1) (void*), void* p2, size_t p3, cl_uint p4, const cl_mem* p5, const void** p6, cl_uint p7, const cl_event* p8, cl_event* p9) { return clEnqueueNativeKernel_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); } +#undef clEnqueueReadBuffer +#define clEnqueueReadBuffer clEnqueueReadBuffer_fn +inline cl_int clEnqueueReadBuffer(cl_command_queue p0, cl_mem p1, cl_bool p2, size_t p3, size_t p4, void* p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueReadBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueReadBufferRect +#define clEnqueueReadBufferRect clEnqueueReadBufferRect_fn +inline cl_int clEnqueueReadBufferRect(cl_command_queue p0, cl_mem p1, cl_bool p2, const size_t* p3, const size_t* p4, const size_t* p5, size_t p6, size_t p7, size_t p8, size_t p9, void* p10, cl_uint p11, const cl_event* p12, cl_event* p13) { return clEnqueueReadBufferRect_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13); } +#undef clEnqueueReadImage +#define clEnqueueReadImage clEnqueueReadImage_fn +inline cl_int clEnqueueReadImage(cl_command_queue p0, cl_mem p1, cl_bool p2, const size_t* p3, const size_t* p4, size_t p5, size_t p6, void* p7, cl_uint p8, const cl_event* p9, cl_event* p10) { return clEnqueueReadImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); } +#undef clEnqueueTask +#define clEnqueueTask clEnqueueTask_fn +inline cl_int clEnqueueTask(cl_command_queue p0, cl_kernel p1, cl_uint p2, const cl_event* p3, cl_event* p4) { return clEnqueueTask_pfn(p0, p1, p2, p3, p4); } +#undef clEnqueueUnmapMemObject +#define clEnqueueUnmapMemObject clEnqueueUnmapMemObject_fn +inline cl_int clEnqueueUnmapMemObject(cl_command_queue p0, cl_mem p1, void* p2, cl_uint p3, const cl_event* p4, cl_event* p5) { return clEnqueueUnmapMemObject_pfn(p0, p1, p2, p3, p4, p5); } +#undef clEnqueueWaitForEvents +#define clEnqueueWaitForEvents clEnqueueWaitForEvents_fn +inline cl_int clEnqueueWaitForEvents(cl_command_queue p0, cl_uint p1, const cl_event* p2) { return clEnqueueWaitForEvents_pfn(p0, p1, p2); } +#undef clEnqueueWriteBuffer +#define clEnqueueWriteBuffer clEnqueueWriteBuffer_fn +inline cl_int clEnqueueWriteBuffer(cl_command_queue p0, cl_mem p1, cl_bool p2, size_t p3, size_t p4, const void* p5, cl_uint p6, const cl_event* p7, cl_event* p8) { return clEnqueueWriteBuffer_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clEnqueueWriteBufferRect +#define clEnqueueWriteBufferRect clEnqueueWriteBufferRect_fn +inline cl_int clEnqueueWriteBufferRect(cl_command_queue p0, cl_mem p1, cl_bool p2, const size_t* p3, const size_t* p4, const size_t* p5, size_t p6, size_t p7, size_t p8, size_t p9, const void* p10, cl_uint p11, const cl_event* p12, cl_event* p13) { return clEnqueueWriteBufferRect_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13); } +#undef clEnqueueWriteImage +#define clEnqueueWriteImage clEnqueueWriteImage_fn +inline cl_int clEnqueueWriteImage(cl_command_queue p0, cl_mem p1, cl_bool p2, const size_t* p3, const size_t* p4, size_t p5, size_t p6, const void* p7, cl_uint p8, const cl_event* p9, cl_event* p10) { return clEnqueueWriteImage_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); } +#undef clFinish +#define clFinish clFinish_fn +inline cl_int clFinish(cl_command_queue p0) { return clFinish_pfn(p0); } +#undef clFlush +#define clFlush clFlush_fn +inline cl_int clFlush(cl_command_queue p0) { return clFlush_pfn(p0); } +#undef clGetCommandQueueInfo +#define clGetCommandQueueInfo clGetCommandQueueInfo_fn +inline cl_int clGetCommandQueueInfo(cl_command_queue p0, cl_command_queue_info p1, size_t p2, void* p3, size_t* p4) { return clGetCommandQueueInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetContextInfo +#define clGetContextInfo clGetContextInfo_fn +inline cl_int clGetContextInfo(cl_context p0, cl_context_info p1, size_t p2, void* p3, size_t* p4) { return clGetContextInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetDeviceIDs +#define clGetDeviceIDs clGetDeviceIDs_fn +inline cl_int clGetDeviceIDs(cl_platform_id p0, cl_device_type p1, cl_uint p2, cl_device_id* p3, cl_uint* p4) { return clGetDeviceIDs_pfn(p0, p1, p2, p3, p4); } +#undef clGetDeviceInfo +#define clGetDeviceInfo clGetDeviceInfo_fn +inline cl_int clGetDeviceInfo(cl_device_id p0, cl_device_info p1, size_t p2, void* p3, size_t* p4) { return clGetDeviceInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetEventInfo +#define clGetEventInfo clGetEventInfo_fn +inline cl_int clGetEventInfo(cl_event p0, cl_event_info p1, size_t p2, void* p3, size_t* p4) { return clGetEventInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetEventProfilingInfo +#define clGetEventProfilingInfo clGetEventProfilingInfo_fn +inline cl_int clGetEventProfilingInfo(cl_event p0, cl_profiling_info p1, size_t p2, void* p3, size_t* p4) { return clGetEventProfilingInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetExtensionFunctionAddress +#define clGetExtensionFunctionAddress clGetExtensionFunctionAddress_fn +inline void* clGetExtensionFunctionAddress(const char* p0) { return clGetExtensionFunctionAddress_pfn(p0); } +#undef clGetExtensionFunctionAddressForPlatform +#define clGetExtensionFunctionAddressForPlatform clGetExtensionFunctionAddressForPlatform_fn +inline void* clGetExtensionFunctionAddressForPlatform(cl_platform_id p0, const char* p1) { return clGetExtensionFunctionAddressForPlatform_pfn(p0, p1); } +#undef clGetImageInfo +#define clGetImageInfo clGetImageInfo_fn +inline cl_int clGetImageInfo(cl_mem p0, cl_image_info p1, size_t p2, void* p3, size_t* p4) { return clGetImageInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetKernelArgInfo +#define clGetKernelArgInfo clGetKernelArgInfo_fn +inline cl_int clGetKernelArgInfo(cl_kernel p0, cl_uint p1, cl_kernel_arg_info p2, size_t p3, void* p4, size_t* p5) { return clGetKernelArgInfo_pfn(p0, p1, p2, p3, p4, p5); } +#undef clGetKernelInfo +#define clGetKernelInfo clGetKernelInfo_fn +inline cl_int clGetKernelInfo(cl_kernel p0, cl_kernel_info p1, size_t p2, void* p3, size_t* p4) { return clGetKernelInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetKernelWorkGroupInfo +#define clGetKernelWorkGroupInfo clGetKernelWorkGroupInfo_fn +inline cl_int clGetKernelWorkGroupInfo(cl_kernel p0, cl_device_id p1, cl_kernel_work_group_info p2, size_t p3, void* p4, size_t* p5) { return clGetKernelWorkGroupInfo_pfn(p0, p1, p2, p3, p4, p5); } +#undef clGetMemObjectInfo +#define clGetMemObjectInfo clGetMemObjectInfo_fn +inline cl_int clGetMemObjectInfo(cl_mem p0, cl_mem_info p1, size_t p2, void* p3, size_t* p4) { return clGetMemObjectInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetPlatformIDs +#define clGetPlatformIDs clGetPlatformIDs_fn +inline cl_int clGetPlatformIDs(cl_uint p0, cl_platform_id* p1, cl_uint* p2) { return clGetPlatformIDs_pfn(p0, p1, p2); } +#undef clGetPlatformInfo +#define clGetPlatformInfo clGetPlatformInfo_fn +inline cl_int clGetPlatformInfo(cl_platform_id p0, cl_platform_info p1, size_t p2, void* p3, size_t* p4) { return clGetPlatformInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetProgramBuildInfo +#define clGetProgramBuildInfo clGetProgramBuildInfo_fn +inline cl_int clGetProgramBuildInfo(cl_program p0, cl_device_id p1, cl_program_build_info p2, size_t p3, void* p4, size_t* p5) { return clGetProgramBuildInfo_pfn(p0, p1, p2, p3, p4, p5); } +#undef clGetProgramInfo +#define clGetProgramInfo clGetProgramInfo_fn +inline cl_int clGetProgramInfo(cl_program p0, cl_program_info p1, size_t p2, void* p3, size_t* p4) { return clGetProgramInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetSamplerInfo +#define clGetSamplerInfo clGetSamplerInfo_fn +inline cl_int clGetSamplerInfo(cl_sampler p0, cl_sampler_info p1, size_t p2, void* p3, size_t* p4) { return clGetSamplerInfo_pfn(p0, p1, p2, p3, p4); } +#undef clGetSupportedImageFormats +#define clGetSupportedImageFormats clGetSupportedImageFormats_fn +inline cl_int clGetSupportedImageFormats(cl_context p0, cl_mem_flags p1, cl_mem_object_type p2, cl_uint p3, cl_image_format* p4, cl_uint* p5) { return clGetSupportedImageFormats_pfn(p0, p1, p2, p3, p4, p5); } +#undef clLinkProgram +#define clLinkProgram clLinkProgram_fn +inline cl_program clLinkProgram(cl_context p0, cl_uint p1, const cl_device_id* p2, const char* p3, cl_uint p4, const cl_program* p5, void (CL_CALLBACK*p6) (cl_program, void*), void* p7, cl_int* p8) { return clLinkProgram_pfn(p0, p1, p2, p3, p4, p5, p6, p7, p8); } +#undef clReleaseCommandQueue +#define clReleaseCommandQueue clReleaseCommandQueue_fn +inline cl_int clReleaseCommandQueue(cl_command_queue p0) { return clReleaseCommandQueue_pfn(p0); } +#undef clReleaseContext +#define clReleaseContext clReleaseContext_fn +inline cl_int clReleaseContext(cl_context p0) { return clReleaseContext_pfn(p0); } +#undef clReleaseDevice +#define clReleaseDevice clReleaseDevice_fn +inline cl_int clReleaseDevice(cl_device_id p0) { return clReleaseDevice_pfn(p0); } +#undef clReleaseEvent +#define clReleaseEvent clReleaseEvent_fn +inline cl_int clReleaseEvent(cl_event p0) { return clReleaseEvent_pfn(p0); } +#undef clReleaseKernel +#define clReleaseKernel clReleaseKernel_fn +inline cl_int clReleaseKernel(cl_kernel p0) { return clReleaseKernel_pfn(p0); } +#undef clReleaseMemObject +#define clReleaseMemObject clReleaseMemObject_fn +inline cl_int clReleaseMemObject(cl_mem p0) { return clReleaseMemObject_pfn(p0); } +#undef clReleaseProgram +#define clReleaseProgram clReleaseProgram_fn +inline cl_int clReleaseProgram(cl_program p0) { return clReleaseProgram_pfn(p0); } +#undef clReleaseSampler +#define clReleaseSampler clReleaseSampler_fn +inline cl_int clReleaseSampler(cl_sampler p0) { return clReleaseSampler_pfn(p0); } +#undef clRetainCommandQueue +#define clRetainCommandQueue clRetainCommandQueue_fn +inline cl_int clRetainCommandQueue(cl_command_queue p0) { return clRetainCommandQueue_pfn(p0); } +#undef clRetainContext +#define clRetainContext clRetainContext_fn +inline cl_int clRetainContext(cl_context p0) { return clRetainContext_pfn(p0); } +#undef clRetainDevice +#define clRetainDevice clRetainDevice_fn +inline cl_int clRetainDevice(cl_device_id p0) { return clRetainDevice_pfn(p0); } +#undef clRetainEvent +#define clRetainEvent clRetainEvent_fn +inline cl_int clRetainEvent(cl_event p0) { return clRetainEvent_pfn(p0); } +#undef clRetainKernel +#define clRetainKernel clRetainKernel_fn +inline cl_int clRetainKernel(cl_kernel p0) { return clRetainKernel_pfn(p0); } +#undef clRetainMemObject +#define clRetainMemObject clRetainMemObject_fn +inline cl_int clRetainMemObject(cl_mem p0) { return clRetainMemObject_pfn(p0); } +#undef clRetainProgram +#define clRetainProgram clRetainProgram_fn +inline cl_int clRetainProgram(cl_program p0) { return clRetainProgram_pfn(p0); } +#undef clRetainSampler +#define clRetainSampler clRetainSampler_fn +inline cl_int clRetainSampler(cl_sampler p0) { return clRetainSampler_pfn(p0); } +#undef clSetEventCallback +#define clSetEventCallback clSetEventCallback_fn +inline cl_int clSetEventCallback(cl_event p0, cl_int p1, void (CL_CALLBACK*p2) (cl_event, cl_int, void*), void* p3) { return clSetEventCallback_pfn(p0, p1, p2, p3); } +#undef clSetKernelArg +#define clSetKernelArg clSetKernelArg_fn +inline cl_int clSetKernelArg(cl_kernel p0, cl_uint p1, size_t p2, const void* p3) { return clSetKernelArg_pfn(p0, p1, p2, p3); } +#undef clSetMemObjectDestructorCallback +#define clSetMemObjectDestructorCallback clSetMemObjectDestructorCallback_fn +inline cl_int clSetMemObjectDestructorCallback(cl_mem p0, void (CL_CALLBACK*p1) (cl_mem, void*), void* p2) { return clSetMemObjectDestructorCallback_pfn(p0, p1, p2); } +#undef clSetUserEventStatus +#define clSetUserEventStatus clSetUserEventStatus_fn +inline cl_int clSetUserEventStatus(cl_event p0, cl_int p1) { return clSetUserEventStatus_pfn(p0, p1); } +#undef clUnloadCompiler +#define clUnloadCompiler clUnloadCompiler_fn +inline cl_int clUnloadCompiler() { return clUnloadCompiler_pfn(); } +#undef clUnloadPlatformCompiler +#define clUnloadPlatformCompiler clUnloadPlatformCompiler_fn +inline cl_int clUnloadPlatformCompiler(cl_platform_id p0) { return clUnloadPlatformCompiler_pfn(p0); } +#undef clWaitForEvents +#define clWaitForEvents clWaitForEvents_fn +inline cl_int clWaitForEvents(cl_uint p0, const cl_event* p1) { return clWaitForEvents_pfn(p0, p1); } diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp new file mode 100644 index 00000000..0b12aed6 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp @@ -0,0 +1,62 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP +#error "Invalid usage" +#endif + +// generated by parser_cl.py +#define clCreateFromGLBuffer clCreateFromGLBuffer_ +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_ +#define clCreateFromGLTexture clCreateFromGLTexture_ +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_ +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_ +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_ +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_ +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_ +#define clGetGLObjectInfo clGetGLObjectInfo_ +#define clGetGLTextureInfo clGetGLTextureInfo_ + +#if defined __APPLE__ +#include +#else +#include +#endif + +// generated by parser_cl.py +#undef clCreateFromGLBuffer +#define clCreateFromGLBuffer clCreateFromGLBuffer_pfn +#undef clCreateFromGLRenderbuffer +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_pfn +#undef clCreateFromGLTexture +#define clCreateFromGLTexture clCreateFromGLTexture_pfn +#undef clCreateFromGLTexture2D +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_pfn +#undef clCreateFromGLTexture3D +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_pfn +#undef clEnqueueAcquireGLObjects +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_pfn +#undef clEnqueueReleaseGLObjects +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_pfn +#undef clGetGLContextInfoKHR +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_pfn +#undef clGetGLObjectInfo +#define clGetGLObjectInfo clGetGLObjectInfo_pfn +#undef clGetGLTextureInfo +#define clGetGLTextureInfo clGetGLTextureInfo_pfn + +#ifdef cl_khr_gl_sharing + +// generated by parser_cl.py +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLBuffer)(cl_context, cl_mem_flags, cl_GLuint, int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLRenderbuffer)(cl_context, cl_mem_flags, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture2D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture3D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueAcquireGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueReleaseGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLContextInfoKHR)(const cl_context_properties*, cl_gl_context_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLObjectInfo)(cl_mem, cl_gl_object_type*, cl_GLuint*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLTextureInfo)(cl_mem, cl_gl_texture_info, size_t, void*, size_t*); + +#endif // cl_khr_gl_sharing diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp new file mode 100644 index 00000000..12f342b2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp @@ -0,0 +1,42 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP +#error "Invalid usage" +#endif + +#ifdef cl_khr_gl_sharing + +// generated by parser_cl.py +#undef clCreateFromGLBuffer +#define clCreateFromGLBuffer clCreateFromGLBuffer_fn +inline cl_mem clCreateFromGLBuffer(cl_context p0, cl_mem_flags p1, cl_GLuint p2, int* p3) { return clCreateFromGLBuffer_pfn(p0, p1, p2, p3); } +#undef clCreateFromGLRenderbuffer +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_fn +inline cl_mem clCreateFromGLRenderbuffer(cl_context p0, cl_mem_flags p1, cl_GLuint p2, cl_int* p3) { return clCreateFromGLRenderbuffer_pfn(p0, p1, p2, p3); } +#undef clCreateFromGLTexture +#define clCreateFromGLTexture clCreateFromGLTexture_fn +inline cl_mem clCreateFromGLTexture(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateFromGLTexture2D +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_fn +inline cl_mem clCreateFromGLTexture2D(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture2D_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateFromGLTexture3D +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_fn +inline cl_mem clCreateFromGLTexture3D(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture3D_pfn(p0, p1, p2, p3, p4, p5); } +#undef clEnqueueAcquireGLObjects +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_fn +inline cl_int clEnqueueAcquireGLObjects(cl_command_queue p0, cl_uint p1, const cl_mem* p2, cl_uint p3, const cl_event* p4, cl_event* p5) { return clEnqueueAcquireGLObjects_pfn(p0, p1, p2, p3, p4, p5); } +#undef clEnqueueReleaseGLObjects +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_fn +inline cl_int clEnqueueReleaseGLObjects(cl_command_queue p0, cl_uint p1, const cl_mem* p2, cl_uint p3, const cl_event* p4, cl_event* p5) { return clEnqueueReleaseGLObjects_pfn(p0, p1, p2, p3, p4, p5); } +#undef clGetGLContextInfoKHR +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_fn +inline cl_int clGetGLContextInfoKHR(const cl_context_properties* p0, cl_gl_context_info p1, size_t p2, void* p3, size_t* p4) { return clGetGLContextInfoKHR_pfn(p0, p1, p2, p3, p4); } +#undef clGetGLObjectInfo +#define clGetGLObjectInfo clGetGLObjectInfo_fn +inline cl_int clGetGLObjectInfo(cl_mem p0, cl_gl_object_type* p1, cl_GLuint* p2) { return clGetGLObjectInfo_pfn(p0, p1, p2); } +#undef clGetGLTextureInfo +#define clGetGLTextureInfo clGetGLTextureInfo_fn +inline cl_int clGetGLTextureInfo(cl_mem p0, cl_gl_texture_info p1, size_t p2, void* p3, size_t* p4) { return clGetGLTextureInfo_pfn(p0, p1, p2, p3, p4); } + +#endif // cl_khr_gl_sharing diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdblas.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdblas.hpp new file mode 100644 index 00000000..2ad8ac0b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdblas.hpp @@ -0,0 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_CLAMDBLAS_HPP +#define OPENCV_CORE_OCL_RUNTIME_CLAMDBLAS_HPP + +#ifdef HAVE_CLAMDBLAS + +#include "opencl_core.hpp" + +#include "autogenerated/opencl_clamdblas.hpp" + +#endif // HAVE_CLAMDBLAS + +#endif // OPENCV_CORE_OCL_RUNTIME_CLAMDBLAS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdfft.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdfft.hpp new file mode 100644 index 00000000..a328f722 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_clamdfft.hpp @@ -0,0 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_CLAMDFFT_HPP +#define OPENCV_CORE_OCL_RUNTIME_CLAMDFFT_HPP + +#ifdef HAVE_CLAMDFFT + +#include "opencl_core.hpp" + +#include "autogenerated/opencl_clamdfft.hpp" + +#endif // HAVE_CLAMDFFT + +#endif // OPENCV_CORE_OCL_RUNTIME_CLAMDFFT_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core.hpp new file mode 100644 index 00000000..0404b317 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core.hpp @@ -0,0 +1,84 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP + +#ifdef HAVE_OPENCL + +#ifndef CL_RUNTIME_EXPORT +#if (defined(BUILD_SHARED_LIBS) || defined(OPENCV_CORE_SHARED)) && (defined _WIN32 || defined WINCE) && \ + !(defined(__OPENCV_BUILD) && defined(OPENCV_MODULE_IS_PART_OF_WORLD)) +#define CL_RUNTIME_EXPORT __declspec(dllimport) +#else +#define CL_RUNTIME_EXPORT +#endif +#endif + +#ifdef HAVE_OPENCL_SVM +#define clSVMAlloc clSVMAlloc_ +#define clSVMFree clSVMFree_ +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_ +#define clSetKernelExecInfo clSetKernelExecInfo_ +#define clEnqueueSVMFree clEnqueueSVMFree_ +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_ +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_ +#define clEnqueueSVMMap clEnqueueSVMMap_ +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_ +#endif + +#include "autogenerated/opencl_core.hpp" + +#ifndef CL_DEVICE_DOUBLE_FP_CONFIG +#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032 +#endif + +#ifndef CL_DEVICE_HALF_FP_CONFIG +#define CL_DEVICE_HALF_FP_CONFIG 0x1033 +#endif + +#ifndef CL_VERSION_1_2 +#define CV_REQUIRE_OPENCL_1_2_ERROR CV_Error(cv::Error::OpenCLApiCallError, "OpenCV compiled without OpenCL v1.2 support, so we can't use functionality from OpenCL v1.2") +#endif + +#endif // HAVE_OPENCL + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_CORE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core_wrappers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core_wrappers.hpp new file mode 100644 index 00000000..38fcae99 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_core_wrappers.hpp @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_WRAPPERS_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_WRAPPERS_HPP + +#include "autogenerated/opencl_core_wrappers.hpp" + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_WRAPPERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl.hpp new file mode 100644 index 00000000..659c7d80 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl.hpp @@ -0,0 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP + +#if defined HAVE_OPENCL && defined HAVE_OPENGL + +#include "opencl_core.hpp" + +#include "autogenerated/opencl_gl.hpp" + +#endif // defined HAVE_OPENCL && defined HAVE_OPENGL + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp new file mode 100644 index 00000000..9700004c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP + +#include "autogenerated/opencl_gl_wrappers.hpp" + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp new file mode 100644 index 00000000..9636b19b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_20.hpp @@ -0,0 +1,48 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#include "opencl_svm_definitions.hpp" + +#undef clSVMAlloc +#define clSVMAlloc clSVMAlloc_pfn +#undef clSVMFree +#define clSVMFree clSVMFree_pfn +#undef clSetKernelArgSVMPointer +#define clSetKernelArgSVMPointer clSetKernelArgSVMPointer_pfn +#undef clSetKernelExecInfo +//#define clSetKernelExecInfo clSetKernelExecInfo_pfn +#undef clEnqueueSVMFree +//#define clEnqueueSVMFree clEnqueueSVMFree_pfn +#undef clEnqueueSVMMemcpy +#define clEnqueueSVMMemcpy clEnqueueSVMMemcpy_pfn +#undef clEnqueueSVMMemFill +#define clEnqueueSVMMemFill clEnqueueSVMMemFill_pfn +#undef clEnqueueSVMMap +#define clEnqueueSVMMap clEnqueueSVMMap_pfn +#undef clEnqueueSVMUnmap +#define clEnqueueSVMUnmap clEnqueueSVMUnmap_pfn + +extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSVMAlloc)(cl_context context, cl_svm_mem_flags flags, size_t size, unsigned int alignment); +extern CL_RUNTIME_EXPORT void (CL_API_CALL *clSVMFree)(cl_context context, void* svm_pointer); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clSetKernelArgSVMPointer)(cl_kernel kernel, cl_uint arg_index, const void* arg_value); +//extern CL_RUNTIME_EXPORT void* (CL_API_CALL *clSetKernelExecInfo)(cl_kernel kernel, cl_kernel_exec_info param_name, size_t param_value_size, const void* param_value); +//extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMFree)(cl_command_queue command_queue, cl_uint num_svm_pointers, void* svm_pointers[], +// void (CL_CALLBACK *pfn_free_func)(cl_command_queue queue, cl_uint num_svm_pointers, void* svm_pointers[], void* user_data), void* user_data, +// cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemcpy)(cl_command_queue command_queue, cl_bool blocking_copy, void* dst_ptr, const void* src_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMemFill)(cl_command_queue command_queue, void* svm_ptr, const void* pattern, size_t pattern_size, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMMap)(cl_command_queue command_queue, cl_bool blocking_map, cl_map_flags map_flags, void* svm_ptr, size_t size, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL *clEnqueueSVMUnmap)(cl_command_queue command_queue, void* svm_ptr, + cl_uint num_events_in_wait_list, const cl_event* event_wait_list, cl_event* event); + +#endif // HAVE_OPENCL_SVM + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_2_0_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp new file mode 100644 index 00000000..97c927b4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_definitions.hpp @@ -0,0 +1,42 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP + +#if defined(HAVE_OPENCL_SVM) +#if defined(CL_VERSION_2_0) + +// OpenCL 2.0 contains SVM definitions + +#else + +typedef cl_bitfield cl_device_svm_capabilities; +typedef cl_bitfield cl_svm_mem_flags; +typedef cl_uint cl_kernel_exec_info; + +// +// TODO Add real values after OpenCL 2.0 release +// + +#ifndef CL_DEVICE_SVM_CAPABILITIES +#define CL_DEVICE_SVM_CAPABILITIES 0x1053 + +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM (1 << 2) +#define CL_DEVICE_SVM_ATOMICS (1 << 3) +#endif + +#ifndef CL_MEM_SVM_FINE_GRAIN_BUFFER +#define CL_MEM_SVM_FINE_GRAIN_BUFFER (1 << 10) +#endif + +#ifndef CL_MEM_SVM_ATOMICS +#define CL_MEM_SVM_ATOMICS (1 << 11) +#endif + + +#endif // CL_VERSION_2_0 +#endif // HAVE_OPENCL_SVM + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_DEFINITIONS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp new file mode 100644 index 00000000..497bc3de --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp @@ -0,0 +1,166 @@ +/* See LICENSE file in the root OpenCV directory */ + +#ifndef OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP +#define OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP + +#if defined(HAVE_OPENCL_SVM) +#include "opencl_core.hpp" + +#ifndef CL_DEVICE_SVM_CAPABILITIES_AMD +// +// Part of the file is an extract from the cl_ext.h file from AMD APP SDK package. +// Below is the original copyright. +// +/******************************************************************************* + * Copyright (c) 2008-2013 The Khronos Group Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and/or associated documentation files (the + * "Materials"), to deal in the Materials without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Materials, and to + * permit persons to whom the Materials are furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Materials. + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. + ******************************************************************************/ + +/******************************************* + * Shared Virtual Memory (SVM) extension + *******************************************/ +typedef cl_bitfield cl_device_svm_capabilities_amd; +typedef cl_bitfield cl_svm_mem_flags_amd; +typedef cl_uint cl_kernel_exec_info_amd; + +/* cl_device_info */ +#define CL_DEVICE_SVM_CAPABILITIES_AMD 0x1053 +#define CL_DEVICE_PREFERRED_PLATFORM_ATOMIC_ALIGNMENT_AMD 0x1054 + +/* cl_device_svm_capabilities_amd */ +#define CL_DEVICE_SVM_COARSE_GRAIN_BUFFER_AMD (1 << 0) +#define CL_DEVICE_SVM_FINE_GRAIN_BUFFER_AMD (1 << 1) +#define CL_DEVICE_SVM_FINE_GRAIN_SYSTEM_AMD (1 << 2) +#define CL_DEVICE_SVM_ATOMICS_AMD (1 << 3) + +/* cl_svm_mem_flags_amd */ +#define CL_MEM_SVM_FINE_GRAIN_BUFFER_AMD (1 << 10) +#define CL_MEM_SVM_ATOMICS_AMD (1 << 11) + +/* cl_mem_info */ +#define CL_MEM_USES_SVM_POINTER_AMD 0x1109 + +/* cl_kernel_exec_info_amd */ +#define CL_KERNEL_EXEC_INFO_SVM_PTRS_AMD 0x11B6 +#define CL_KERNEL_EXEC_INFO_SVM_FINE_GRAIN_SYSTEM_AMD 0x11B7 + +/* cl_command_type */ +#define CL_COMMAND_SVM_FREE_AMD 0x1209 +#define CL_COMMAND_SVM_MEMCPY_AMD 0x120A +#define CL_COMMAND_SVM_MEMFILL_AMD 0x120B +#define CL_COMMAND_SVM_MAP_AMD 0x120C +#define CL_COMMAND_SVM_UNMAP_AMD 0x120D + +typedef CL_API_ENTRY void* +(CL_API_CALL * clSVMAllocAMD_fn)( + cl_context /* context */, + cl_svm_mem_flags_amd /* flags */, + size_t /* size */, + unsigned int /* alignment */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY void +(CL_API_CALL * clSVMFreeAMD_fn)( + cl_context /* context */, + void* /* svm_pointer */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMFreeAMD_fn)( + cl_command_queue /* command_queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void (CL_CALLBACK *)( /*pfn_free_func*/ + cl_command_queue /* queue */, + cl_uint /* num_svm_pointers */, + void** /* svm_pointers */, + void* /* user_data */), + void* /* user_data */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemcpyAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_copy */, + void* /* dst_ptr */, + const void* /* src_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMemFillAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + const void* /* pattern */, + size_t /* pattern_size */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMMapAMD_fn)( + cl_command_queue /* command_queue */, + cl_bool /* blocking_map */, + cl_map_flags /* map_flags */, + void* /* svm_ptr */, + size_t /* size */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clEnqueueSVMUnmapAMD_fn)( + cl_command_queue /* command_queue */, + void* /* svm_ptr */, + cl_uint /* num_events_in_wait_list */, + const cl_event* /* event_wait_list */, + cl_event* /* event */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelArgSVMPointerAMD_fn)( + cl_kernel /* kernel */, + cl_uint /* arg_index */, + const void * /* arg_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +typedef CL_API_ENTRY cl_int +(CL_API_CALL * clSetKernelExecInfoAMD_fn)( + cl_kernel /* kernel */, + cl_kernel_exec_info_amd /* param_name */, + size_t /* param_value_size */, + const void * /* param_value */ +) CL_EXT_SUFFIX__VERSION_1_2; + +#endif + +#endif // HAVE_OPENCL_SVM + +#endif // OPENCV_CORE_OCL_RUNTIME_OPENCL_SVM_HSA_EXTENSION_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/opengl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/opengl.hpp new file mode 100644 index 00000000..a6288beb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/opengl.hpp @@ -0,0 +1,725 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OPENGL_HPP +#define OPENCV_CORE_OPENGL_HPP + +#ifndef __cplusplus +# error opengl.hpp header must be compiled as C++ +#endif + +#include "opencv2/core.hpp" +#include "ocl.hpp" + +namespace cv { namespace ogl { + +/** @addtogroup core_opengl +This section describes OpenGL interoperability. + +To enable OpenGL support, configure OpenCV using CMake with WITH_OPENGL=ON . Currently OpenGL is +supported only with WIN32, GTK and Qt backends on Windows and Linux (MacOS and Android are not +supported). For GTK backend gtkglext-1.0 library is required. + +To use OpenGL functionality you should first create OpenGL context (window or frame buffer). You can +do this with namedWindow function or with other OpenGL toolkit (GLUT, for example). +*/ +//! @{ + +/////////////////// OpenGL Objects /////////////////// + +/** @brief Smart pointer for OpenGL buffer object with reference counting. + +Buffer Objects are OpenGL objects that store an array of unformatted memory allocated by the OpenGL +context. These can be used to store vertex data, pixel data retrieved from images or the +framebuffer, and a variety of other things. + +ogl::Buffer has interface similar with Mat interface and represents 2D array memory. + +ogl::Buffer supports memory transfers between host and device and also can be mapped to CUDA memory. + */ +class CV_EXPORTS Buffer +{ +public: + /** @brief The target defines how you intend to use the buffer object. + */ + enum Target + { + ARRAY_BUFFER = 0x8892, //!< The buffer will be used as a source for vertex data + ELEMENT_ARRAY_BUFFER = 0x8893, //!< The buffer will be used for indices (in glDrawElements, for example) + PIXEL_PACK_BUFFER = 0x88EB, //!< The buffer will be used for reading from OpenGL textures + PIXEL_UNPACK_BUFFER = 0x88EC //!< The buffer will be used for writing to OpenGL textures + }; + + enum Access + { + READ_ONLY = 0x88B8, + WRITE_ONLY = 0x88B9, + READ_WRITE = 0x88BA + }; + + /** @brief The constructors. + + Creates empty ogl::Buffer object, creates ogl::Buffer object from existed buffer ( abufId + parameter), allocates memory for ogl::Buffer object or copies from host/device memory. + */ + Buffer(); + + /** @overload + @param arows Number of rows in a 2D array. + @param acols Number of columns in a 2D array. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param abufId Buffer object name. + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false); + + /** @overload + @param asize 2D array size. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param abufId Buffer object name. + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false); + + /** @overload + @param arows Number of rows in a 2D array. + @param acols Number of columns in a 2D array. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @overload + @param asize 2D array size. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @overload + @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or std::vector ). + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @brief Allocates memory for ogl::Buffer object. + + @param arows Number of rows in a 2D array. + @param acols Number of columns in a 2D array. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @overload + @param asize 2D array size. + @param atype Array type ( CV_8UC1, ..., CV_64FC4 ). See Mat for details. + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @brief Decrements the reference counter and destroys the buffer object if needed. + + The function will call setAutoRelease(true) . + */ + void release(); + + /** @brief Sets auto release mode. + + The lifetime of the OpenGL object is tied to the lifetime of the context. If OpenGL context was + bound to a window it could be released at any time (user can close a window). If object's destructor + is called after destruction of the context it will cause an error. Thus ogl::Buffer doesn't destroy + OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL context). + This function can force ogl::Buffer destructor to destroy OpenGL object. + @param flag Auto release mode (if true, release will be called in object's destructor). + */ + void setAutoRelease(bool flag); + + /** @brief Copies from host/device memory to OpenGL buffer. + @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or std::vector ). + @param target Buffer usage. See cv::ogl::Buffer::Target . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @overload */ + void copyFrom(InputArray arr, cuda::Stream& stream, Target target = ARRAY_BUFFER, bool autoRelease = false); + + /** @brief Copies from OpenGL buffer to host/device memory or another OpenGL buffer object. + + @param arr Destination array (host or device memory, can be Mat , cuda::GpuMat , std::vector or + ogl::Buffer ). + */ + void copyTo(OutputArray arr) const; + + /** @overload */ + void copyTo(OutputArray arr, cuda::Stream& stream) const; + + /** @brief Creates a full copy of the buffer object and the underlying data. + + @param target Buffer usage for destination buffer. + @param autoRelease Auto release mode for destination buffer. + */ + Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const; + + /** @brief Binds OpenGL buffer to the specified buffer binding point. + + @param target Binding point. See cv::ogl::Buffer::Target . + */ + void bind(Target target) const; + + /** @brief Unbind any buffers from the specified binding point. + + @param target Binding point. See cv::ogl::Buffer::Target . + */ + static void unbind(Target target); + + /** @brief Maps OpenGL buffer to host memory. + + mapHost maps to the client's address space the entire data store of the buffer object. The data can + then be directly read and/or written relative to the returned pointer, depending on the specified + access policy. + + A mapped data store must be unmapped with ogl::Buffer::unmapHost before its buffer object is used. + + This operation can lead to memory transfers between host and device. + + Only one buffer object can be mapped at a time. + @param access Access policy, indicating whether it will be possible to read from, write to, or both + read from and write to the buffer object's mapped data store. The symbolic constant must be + ogl::Buffer::READ_ONLY , ogl::Buffer::WRITE_ONLY or ogl::Buffer::READ_WRITE . + */ + Mat mapHost(Access access); + + /** @brief Unmaps OpenGL buffer. + */ + void unmapHost(); + + //! map to device memory (blocking) + cuda::GpuMat mapDevice(); + void unmapDevice(); + + /** @brief Maps OpenGL buffer to CUDA device memory. + + This operation doesn't copy data. Several buffer objects can be mapped to CUDA memory at a time. + + A mapped data store must be unmapped with ogl::Buffer::unmapDevice before its buffer object is used. + */ + cuda::GpuMat mapDevice(cuda::Stream& stream); + + /** @brief Unmaps OpenGL buffer. + */ + void unmapDevice(cuda::Stream& stream); + + int rows() const; + int cols() const; + Size size() const; + bool empty() const; + + int type() const; + int depth() const; + int channels() const; + int elemSize() const; + int elemSize1() const; + + //! get OpenGL opject id + unsigned int bufId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + int type_; +}; + +/** @brief Smart pointer for OpenGL 2D texture memory with reference counting. + */ +class CV_EXPORTS Texture2D +{ +public: + /** @brief An Image Format describes the way that the images in Textures store their data. + */ + enum Format + { + NONE = 0, + DEPTH_COMPONENT = 0x1902, //!< Depth + RGB = 0x1907, //!< Red, Green, Blue + RGBA = 0x1908 //!< Red, Green, Blue, Alpha + }; + + /** @brief The constructors. + + Creates empty ogl::Texture2D object, allocates memory for ogl::Texture2D object or copies from + host/device memory. + */ + Texture2D(); + + /** @overload */ + Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false); + + /** @overload */ + Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false); + + /** @overload + @param arows Number of rows. + @param acols Number of columns. + @param aformat Image format. See cv::ogl::Texture2D::Format . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Texture2D(int arows, int acols, Format aformat, bool autoRelease = false); + + /** @overload + @param asize 2D array size. + @param aformat Image format. See cv::ogl::Texture2D::Format . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + Texture2D(Size asize, Format aformat, bool autoRelease = false); + + /** @overload + @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or ogl::Buffer ). + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + explicit Texture2D(InputArray arr, bool autoRelease = false); + + /** @brief Allocates memory for ogl::Texture2D object. + + @param arows Number of rows. + @param acols Number of columns. + @param aformat Image format. See cv::ogl::Texture2D::Format . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void create(int arows, int acols, Format aformat, bool autoRelease = false); + /** @overload + @param asize 2D array size. + @param aformat Image format. See cv::ogl::Texture2D::Format . + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void create(Size asize, Format aformat, bool autoRelease = false); + + /** @brief Decrements the reference counter and destroys the texture object if needed. + + The function will call setAutoRelease(true) . + */ + void release(); + + /** @brief Sets auto release mode. + + @param flag Auto release mode (if true, release will be called in object's destructor). + + The lifetime of the OpenGL object is tied to the lifetime of the context. If OpenGL context was + bound to a window it could be released at any time (user can close a window). If object's destructor + is called after destruction of the context it will cause an error. Thus ogl::Texture2D doesn't + destroy OpenGL object in destructor by default (all OpenGL resources will be released with OpenGL + context). This function can force ogl::Texture2D destructor to destroy OpenGL object. + */ + void setAutoRelease(bool flag); + + /** @brief Copies from host/device memory to OpenGL texture. + + @param arr Input array (host or device memory, it can be Mat , cuda::GpuMat or ogl::Buffer ). + @param autoRelease Auto release mode (if true, release will be called in object's destructor). + */ + void copyFrom(InputArray arr, bool autoRelease = false); + + /** @brief Copies from OpenGL texture to host/device memory or another OpenGL texture object. + + @param arr Destination array (host or device memory, can be Mat , cuda::GpuMat , ogl::Buffer or + ogl::Texture2D ). + @param ddepth Destination depth. + @param autoRelease Auto release mode for destination buffer (if arr is OpenGL buffer or texture). + */ + void copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const; + + /** @brief Binds texture to current active texture unit for GL_TEXTURE_2D target. + */ + void bind() const; + + int rows() const; + int cols() const; + Size size() const; + bool empty() const; + + Format format() const; + + //! get OpenGL opject id + unsigned int texId() const; + + class Impl; + +private: + Ptr impl_; + int rows_; + int cols_; + Format format_; +}; + +/** @brief Wrapper for OpenGL Client-Side Vertex arrays. + +ogl::Arrays stores vertex data in ogl::Buffer objects. + */ +class CV_EXPORTS Arrays +{ +public: + /** @brief Default constructor + */ + Arrays(); + + /** @brief Sets an array of vertex coordinates. + @param vertex array with vertex coordinates, can be both host and device memory. + */ + void setVertexArray(InputArray vertex); + + /** @brief Resets vertex coordinates. + */ + void resetVertexArray(); + + /** @brief Sets an array of vertex colors. + @param color array with vertex colors, can be both host and device memory. + */ + void setColorArray(InputArray color); + + /** @brief Resets vertex colors. + */ + void resetColorArray(); + + /** @brief Sets an array of vertex normals. + @param normal array with vertex normals, can be both host and device memory. + */ + void setNormalArray(InputArray normal); + + /** @brief Resets vertex normals. + */ + void resetNormalArray(); + + /** @brief Sets an array of vertex texture coordinates. + @param texCoord array with vertex texture coordinates, can be both host and device memory. + */ + void setTexCoordArray(InputArray texCoord); + + /** @brief Resets vertex texture coordinates. + */ + void resetTexCoordArray(); + + /** @brief Releases all inner buffers. + */ + void release(); + + /** @brief Sets auto release mode all inner buffers. + @param flag Auto release mode. + */ + void setAutoRelease(bool flag); + + /** @brief Binds all vertex arrays. + */ + void bind() const; + + /** @brief Returns the vertex count. + */ + int size() const; + bool empty() const; + +private: + int size_; + Buffer vertex_; + Buffer color_; + Buffer normal_; + Buffer texCoord_; +}; + +/////////////////// Render Functions /////////////////// + +//! render mode +enum RenderModes { + POINTS = 0x0000, + LINES = 0x0001, + LINE_LOOP = 0x0002, + LINE_STRIP = 0x0003, + TRIANGLES = 0x0004, + TRIANGLE_STRIP = 0x0005, + TRIANGLE_FAN = 0x0006, + QUADS = 0x0007, + QUAD_STRIP = 0x0008, + POLYGON = 0x0009 +}; + +/** @brief Render OpenGL texture or primitives. +@param tex Texture to draw. +@param wndRect Region of window, where to draw a texture (normalized coordinates). +@param texRect Region of texture to draw (normalized coordinates). + */ +CV_EXPORTS void render(const Texture2D& tex, + Rect_ wndRect = Rect_(0.0, 0.0, 1.0, 1.0), + Rect_ texRect = Rect_(0.0, 0.0, 1.0, 1.0)); + +/** @overload +@param arr Array of privitives vertices. +@param mode Render mode. One of cv::ogl::RenderModes +@param color Color for all vertices. Will be used if arr doesn't contain color array. +*/ +CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255)); + +/** @overload +@param arr Array of privitives vertices. +@param indices Array of vertices indices (host or device memory). +@param mode Render mode. One of cv::ogl::RenderModes +@param color Color for all vertices. Will be used if arr doesn't contain color array. +*/ +CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255)); + +/////////////////// CL-GL Interoperability Functions /////////////////// + +namespace ocl { +using namespace cv::ocl; + +// TODO static functions in the Context class +/** @brief Creates OpenCL context from GL. +@return Returns reference to OpenCL Context + */ +CV_EXPORTS Context& initializeContextFromGL(); + +} // namespace cv::ogl::ocl + +/** @brief Converts InputArray to Texture2D object. +@param src - source InputArray. +@param texture - destination Texture2D object. + */ +CV_EXPORTS void convertToGLTexture2D(InputArray src, Texture2D& texture); + +/** @brief Converts Texture2D object to OutputArray. +@param texture - source Texture2D object. +@param dst - destination OutputArray. + */ +CV_EXPORTS void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst); + +/** @brief Maps Buffer object to process on CL side (convert to UMat). + +Function creates CL buffer from GL one, and then constructs UMat that can be used +to process buffer data with OpenCV functions. Note that in current implementation +UMat constructed this way doesn't own corresponding GL buffer object, so it is +the user responsibility to close down CL/GL buffers relationships by explicitly +calling unmapGLBuffer() function. +@param buffer - source Buffer object. +@param accessFlags - data access flags (ACCESS_READ|ACCESS_WRITE). +@return Returns UMat object + */ +CV_EXPORTS UMat mapGLBuffer(const Buffer& buffer, int accessFlags = ACCESS_READ|ACCESS_WRITE); + +/** @brief Unmaps Buffer object (releases UMat, previously mapped from Buffer). + +Function must be called explicitly by the user for each UMat previously constructed +by the call to mapGLBuffer() function. +@param u - source UMat, created by mapGLBuffer(). + */ +CV_EXPORTS void unmapGLBuffer(UMat& u); + +//! @} +}} // namespace cv::ogl + +namespace cv { namespace cuda { + +/** @brief Sets a CUDA device and initializes it for the current thread with OpenGL interoperability. + +This function should be explicitly called after OpenGL context creation and before any CUDA calls. +@param device System index of a CUDA device starting with 0. +@ingroup core_opengl + */ +CV_EXPORTS void setGlDevice(int device = 0); + +}} + +//! @cond IGNORED + +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// + +inline +cv::ogl::Buffer::Buffer(int arows, int acols, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) +{ + create(arows, acols, atype, target, autoRelease); +} + +inline +cv::ogl::Buffer::Buffer(Size asize, int atype, Target target, bool autoRelease) : rows_(0), cols_(0), type_(0) +{ + create(asize, atype, target, autoRelease); +} + +inline +void cv::ogl::Buffer::create(Size asize, int atype, Target target, bool autoRelease) +{ + create(asize.height, asize.width, atype, target, autoRelease); +} + +inline +int cv::ogl::Buffer::rows() const +{ + return rows_; +} + +inline +int cv::ogl::Buffer::cols() const +{ + return cols_; +} + +inline +cv::Size cv::ogl::Buffer::size() const +{ + return Size(cols_, rows_); +} + +inline +bool cv::ogl::Buffer::empty() const +{ + return rows_ == 0 || cols_ == 0; +} + +inline +int cv::ogl::Buffer::type() const +{ + return type_; +} + +inline +int cv::ogl::Buffer::depth() const +{ + return CV_MAT_DEPTH(type_); +} + +inline +int cv::ogl::Buffer::channels() const +{ + return CV_MAT_CN(type_); +} + +inline +int cv::ogl::Buffer::elemSize() const +{ + return CV_ELEM_SIZE(type_); +} + +inline +int cv::ogl::Buffer::elemSize1() const +{ + return CV_ELEM_SIZE1(type_); +} + +/////// + +inline +cv::ogl::Texture2D::Texture2D(int arows, int acols, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE) +{ + create(arows, acols, aformat, autoRelease); +} + +inline +cv::ogl::Texture2D::Texture2D(Size asize, Format aformat, bool autoRelease) : rows_(0), cols_(0), format_(NONE) +{ + create(asize, aformat, autoRelease); +} + +inline +void cv::ogl::Texture2D::create(Size asize, Format aformat, bool autoRelease) +{ + create(asize.height, asize.width, aformat, autoRelease); +} + +inline +int cv::ogl::Texture2D::rows() const +{ + return rows_; +} + +inline +int cv::ogl::Texture2D::cols() const +{ + return cols_; +} + +inline +cv::Size cv::ogl::Texture2D::size() const +{ + return Size(cols_, rows_); +} + +inline +bool cv::ogl::Texture2D::empty() const +{ + return rows_ == 0 || cols_ == 0; +} + +inline +cv::ogl::Texture2D::Format cv::ogl::Texture2D::format() const +{ + return format_; +} + +/////// + +inline +cv::ogl::Arrays::Arrays() : size_(0) +{ +} + +inline +int cv::ogl::Arrays::size() const +{ + return size_; +} + +inline +bool cv::ogl::Arrays::empty() const +{ + return size_ == 0; +} + +//! @endcond + +#endif /* OPENCV_CORE_OPENGL_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/operations.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/operations.hpp new file mode 100644 index 00000000..ef1808a8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/operations.hpp @@ -0,0 +1,573 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_OPERATIONS_HPP +#define OPENCV_CORE_OPERATIONS_HPP + +#ifndef __cplusplus +# error operations.hpp header must be compiled as C++ +#endif + +#include + +//! @cond IGNORED + +namespace cv +{ + +////////////////////////////// Matx methods depending on core API ///////////////////////////// + +namespace internal +{ + +template struct Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, n>& a, Matx<_Tp, n, m>& b, int method) const + { + return invert(a, b, method) != 0; + } +}; + +template struct Matx_FastInvOp<_Tp, m, m> +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + if (method == DECOMP_LU || method == DECOMP_CHOLESKY) + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for (int i = 0; i < m; i++) + b(i, i) = (_Tp)1; + + if (method == DECOMP_CHOLESKY) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } + else + { + return invert(a, b, method) != 0; + } + } +}; + +template struct Matx_FastInvOp<_Tp, 2, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int /*method*/) const + { + _Tp d = (_Tp)determinant(a); + if (d == 0) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + +template struct Matx_FastInvOp<_Tp, 3, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int /*method*/) const + { + _Tp d = (_Tp)determinant(a); + if (d == 0) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template struct Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, l>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, l, n>& x, int method) const + { + return cv::solve(a, b, x, method); + } +}; + +template struct Matx_FastSolveOp<_Tp, m, m, n> +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + if (method == DECOMP_LU || method == DECOMP_CHOLESKY) + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } + else + { + return cv::solve(a, b, x, method); + } + } +}; + +template struct Matx_FastSolveOp<_Tp, 2, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int) const + { + _Tp d = (_Tp)determinant(a); + if (d == 0) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + +template struct Matx_FastSolveOp<_Tp, 3, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int) const + { + _Tp d = (_Tp)determinant(a); + if (d == 0) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + +} // internal + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + cv::randu(M, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + cv::randn(M, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method, bool *p_is_ok /*= NULL*/) const +{ + Matx<_Tp, n, m> b; + bool ok = cv::internal::Matx_FastInvOp<_Tp, m, n>()(*this, b, method); + if (p_is_ok) *p_is_ok = ok; + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok = cv::internal::Matx_FastSolveOp<_Tp, m, n, l>()(*this, rhs, x, method); + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + + + +////////////////////////// Augmenting algebraic & logical operations ////////////////////////// + +#define CV_MAT_AUG_OPERATOR1(op, cvop, A, B) \ + static inline A& operator op (A& a, const B& b) { cvop; return a; } + +#define CV_MAT_AUG_OPERATOR(op, cvop, A, B) \ + CV_MAT_AUG_OPERATOR1(op, cvop, A, B) \ + CV_MAT_AUG_OPERATOR1(op, cvop, const A, B) + +#define CV_MAT_AUG_OPERATOR_T(op, cvop, A, B) \ + template CV_MAT_AUG_OPERATOR1(op, cvop, A, B) \ + template CV_MAT_AUG_OPERATOR1(op, cvop, const A, B) + +#define CV_MAT_AUG_OPERATOR_TN(op, cvop, A) \ + template static inline A& operator op (A& a, const Matx<_Tp,m,n>& b) { cvop; return a; } \ + template static inline const A& operator op (const A& a, const Matx<_Tp,m,n>& b) { cvop; return a; } + +CV_MAT_AUG_OPERATOR (+=, cv::add(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR (+=, cv::add(a, b, (const Mat&)a), Mat, Scalar) +CV_MAT_AUG_OPERATOR_T(+=, cv::add(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(+=, cv::add(a, b, (const Mat&)a), Mat_<_Tp>, Scalar) +CV_MAT_AUG_OPERATOR_T(+=, cv::add(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR_TN(+=, cv::add(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(+=, cv::add(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (-=, cv::subtract(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR (-=, cv::subtract(a, b, (const Mat&)a), Mat, Scalar) +CV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a, b, (const Mat&)a), Mat_<_Tp>, Scalar) +CV_MAT_AUG_OPERATOR_T(-=, cv::subtract(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR_TN(-=, cv::subtract(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(-=, cv::subtract(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat, Mat) +CV_MAT_AUG_OPERATOR_T(*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(*=, cv::gemm(a, b, 1, Mat(), 0, a, 0), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR (*=, a.convertTo(a, -1, b), Mat, double) +CV_MAT_AUG_OPERATOR_T(*=, a.convertTo(a, -1, b), Mat_<_Tp>, double) +CV_MAT_AUG_OPERATOR_TN(*=, cv::gemm(a, Mat(b), 1, Mat(), 0, a, 0), Mat) +CV_MAT_AUG_OPERATOR_TN(*=, cv::gemm(a, Mat(b), 1, Mat(), 0, a, 0), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (/=, cv::divide(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR_T(/=, cv::divide(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(/=, cv::divide(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR (/=, a.convertTo((Mat&)a, -1, 1./b), Mat, double) +CV_MAT_AUG_OPERATOR_T(/=, a.convertTo((Mat&)a, -1, 1./b), Mat_<_Tp>, double) +CV_MAT_AUG_OPERATOR_TN(/=, cv::divide(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(/=, cv::divide(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (&=, cv::bitwise_and(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR (&=, cv::bitwise_and(a, b, (const Mat&)a), Mat, Scalar) +CV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a, b, (const Mat&)a), Mat_<_Tp>, Scalar) +CV_MAT_AUG_OPERATOR_T(&=, cv::bitwise_and(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR_TN(&=, cv::bitwise_and(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(&=, cv::bitwise_and(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (|=, cv::bitwise_or(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR (|=, cv::bitwise_or(a, b, (const Mat&)a), Mat, Scalar) +CV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a, b, (const Mat&)a), Mat_<_Tp>, Scalar) +CV_MAT_AUG_OPERATOR_T(|=, cv::bitwise_or(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR_TN(|=, cv::bitwise_or(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(|=, cv::bitwise_or(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +CV_MAT_AUG_OPERATOR (^=, cv::bitwise_xor(a, b, (const Mat&)a), Mat, Mat) +CV_MAT_AUG_OPERATOR (^=, cv::bitwise_xor(a, b, (const Mat&)a), Mat, Scalar) +CV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a, b, (const Mat&)a), Mat_<_Tp>, Mat) +CV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a, b, (const Mat&)a), Mat_<_Tp>, Scalar) +CV_MAT_AUG_OPERATOR_T(^=, cv::bitwise_xor(a, b, (const Mat&)a), Mat_<_Tp>, Mat_<_Tp>) +CV_MAT_AUG_OPERATOR_TN(^=, cv::bitwise_xor(a, Mat(b), (const Mat&)a), Mat) +CV_MAT_AUG_OPERATOR_TN(^=, cv::bitwise_xor(a, Mat(b), (const Mat&)a), Mat_<_Tp>) + +#undef CV_MAT_AUG_OPERATOR_TN +#undef CV_MAT_AUG_OPERATOR_T +#undef CV_MAT_AUG_OPERATOR +#undef CV_MAT_AUG_OPERATOR1 + + + +///////////////////////////////////////////// SVD ///////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + Mat mtx = m.getMat(); + SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV)); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector."); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector."); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector."); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + + + +/////////////////////////////////// Multiply-with-Carry RNG /////////////////////////////////// + +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator int() { return (int)next(); } +inline RNG::operator unsigned() { return next(); } +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() { unsigned t = next(); return (((uint64)t << 32) | next()) * 5.4210108624275221700372640043497e-20; } + +inline unsigned RNG::operator ()(unsigned N) { return (unsigned)uniform(0,N); } +inline unsigned RNG::operator ()() { return next(); } + +inline int RNG::uniform(int a, int b) { return a == b ? a : (int)(next() % (b - a) + a); } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline bool RNG::operator ==(const RNG& other) const { return state == other.state; } + +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state* /*CV_RNG_COEFF*/ 4164903690U + (unsigned)(state >> 32); + return (unsigned)state; +} + +//! returns the next uniformly-distributed random number of the specified type +template static inline _Tp randu() +{ + return (_Tp)theRNG(); +} + +///////////////////////////////// Formatted string generation ///////////////////////////////// + +/** @brief Returns a text string formatted using the printf-like expression. + +The function acts like sprintf but forms and returns an STL string. It can be used to form an error +message in the Exception constructor. +@param fmt printf-compatible formatting specifiers. + */ +CV_EXPORTS String format( const char* fmt, ... ); + +///////////////////////////////// Formatted output of cv::Mat ///////////////////////////////// + +static inline +Ptr format(InputArray mtx, int fmt) +{ + return Formatter::get(fmt)->format(mtx.getMat()); +} + +static inline +int print(Ptr fmtd, FILE* stream = stdout) +{ + int written = 0; + fmtd->reset(); + for(const char* str = fmtd->next(); str; str = fmtd->next()) + written += fputs(str, stream); + + return written; +} + +static inline +int print(const Mat& mtx, FILE* stream = stdout) +{ + return print(Formatter::get()->format(mtx), stream); +} + +static inline +int print(const UMat& mtx, FILE* stream = stdout) +{ + return print(Formatter::get()->format(mtx.getMat(ACCESS_READ)), stream); +} + +template static inline +int print(const std::vector >& vec, FILE* stream = stdout) +{ + return print(Formatter::get()->format(Mat(vec)), stream); +} + +template static inline +int print(const std::vector >& vec, FILE* stream = stdout) +{ + return print(Formatter::get()->format(Mat(vec)), stream); +} + +template static inline +int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout) +{ + return print(Formatter::get()->format(cv::Mat(matx)), stream); +} + +//! @endcond + +/****************************************************************************************\ +* Auxiliary algorithms * +\****************************************************************************************/ + +/** @brief Splits an element set into equivalency classes. + +The generic function partition implements an \f$O(N^2)\f$ algorithm for splitting a set of \f$N\f$ elements +into one or more equivalency classes, as described in + . The function returns the number of +equivalency classes. +@param _vec Set of elements stored as a vector. +@param labels Output vector of labels. It contains as many elements as vec. Each label labels[i] is +a 0-based cluster index of `vec[i]`. +@param predicate Equivalence predicate (pointer to a boolean function of two arguments or an +instance of the class that has the method bool operator()(const _Tp& a, const _Tp& b) ). The +predicate returns true when the elements are certainly in the same class, and returns false if they +may or may not be in the same class. +@ingroup core_cluster +*/ +template int +partition( const std::vector<_Tp>& _vec, std::vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + std::vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + CV_Assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + +} // cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/optim.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/optim.hpp new file mode 100644 index 00000000..5a094003 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/optim.hpp @@ -0,0 +1,302 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OPTIM_HPP +#define OPENCV_OPTIM_HPP + +#include "opencv2/core.hpp" + +namespace cv +{ + +/** @addtogroup core_optim +The algorithms in this section minimize or maximize function value within specified constraints or +without any constraints. +@{ +*/ + +/** @brief Basic interface for all solvers + */ +class CV_EXPORTS MinProblemSolver : public Algorithm +{ +public: + /** @brief Represents function being optimized + */ + class CV_EXPORTS Function + { + public: + virtual ~Function() {} + virtual int getDims() const = 0; + virtual double getGradientEps() const; + virtual double calc(const double* x) const = 0; + virtual void getGradient(const double* x,double* grad); + }; + + /** @brief Getter for the optimized function. + + The optimized function is represented by Function interface, which requires derivatives to + implement the calc(double*) and getDim() methods to evaluate the function. + + @return Smart-pointer to an object that implements Function interface - it represents the + function that is being optimized. It can be empty, if no function was given so far. + */ + virtual Ptr getFunction() const = 0; + + /** @brief Setter for the optimized function. + + *It should be called at least once before the call to* minimize(), as default value is not usable. + + @param f The new function to optimize. + */ + virtual void setFunction(const Ptr& f) = 0; + + /** @brief Getter for the previously set terminal criteria for this algorithm. + + @return Deep copy of the terminal criteria used at the moment. + */ + virtual TermCriteria getTermCriteria() const = 0; + + /** @brief Set terminal criteria for solver. + + This method *is not necessary* to be called before the first call to minimize(), as the default + value is sensible. + + Algorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when + the function values at the vertices of simplex are within termcrit.epsilon range or simplex + becomes so small that it can enclosed in a box with termcrit.epsilon sides, whatever comes + first. + @param termcrit Terminal criteria to be used, represented as cv::TermCriteria structure. + */ + virtual void setTermCriteria(const TermCriteria& termcrit) = 0; + + /** @brief actually runs the algorithm and performs the minimization. + + The sole input parameter determines the centroid of the starting simplex (roughly, it tells + where to start), all the others (terminal criteria, initial step, function to be minimized) are + supposed to be set via the setters before the call to this method or the default values (not + always sensible) will be used. + + @param x The initial point, that will become a centroid of an initial simplex. After the algorithm + will terminate, it will be set to the point where the algorithm stops, the point of possible + minimum. + @return The value of a function at the point found. + */ + virtual double minimize(InputOutputArray x) = 0; +}; + +/** @brief This class is used to perform the non-linear non-constrained minimization of a function, + +defined on an `n`-dimensional Euclidean space, using the **Nelder-Mead method**, also known as +**downhill simplex method**. The basic idea about the method can be obtained from +. + +It should be noted, that this method, although deterministic, is rather a heuristic and therefore +may converge to a local minima, not necessary a global one. It is iterative optimization technique, +which at each step uses an information about the values of a function evaluated only at `n+1` +points, arranged as a *simplex* in `n`-dimensional space (hence the second name of the method). At +each step new point is chosen to evaluate function at, obtained value is compared with previous +ones and based on this information simplex changes it's shape , slowly moving to the local minimum. +Thus this method is using *only* function values to make decision, on contrary to, say, Nonlinear +Conjugate Gradient method (which is also implemented in optim). + +Algorithm stops when the number of function evaluations done exceeds termcrit.maxCount, when the +function values at the vertices of simplex are within termcrit.epsilon range or simplex becomes so +small that it can enclosed in a box with termcrit.epsilon sides, whatever comes first, for some +defined by user positive integer termcrit.maxCount and positive non-integer termcrit.epsilon. + +@note DownhillSolver is a derivative of the abstract interface +cv::MinProblemSolver, which in turn is derived from the Algorithm interface and is used to +encapsulate the functionality, common to all non-linear optimization algorithms in the optim +module. + +@note term criteria should meet following condition: +@code + termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0 +@endcode + */ +class CV_EXPORTS DownhillSolver : public MinProblemSolver +{ +public: + /** @brief Returns the initial step that will be used in downhill simplex algorithm. + + @param step Initial step that will be used in algorithm. Note, that although corresponding setter + accepts column-vectors as well as row-vectors, this method will return a row-vector. + @see DownhillSolver::setInitStep + */ + virtual void getInitStep(OutputArray step) const=0; + + /** @brief Sets the initial step that will be used in downhill simplex algorithm. + + Step, together with initial point (given in DownhillSolver::minimize) are two `n`-dimensional + vectors that are used to determine the shape of initial simplex. Roughly said, initial point + determines the position of a simplex (it will become simplex's centroid), while step determines the + spread (size in each dimension) of a simplex. To be more precise, if \f$s,x_0\in\mathbb{R}^n\f$ are + the initial step and initial point respectively, the vertices of a simplex will be: + \f$v_0:=x_0-\frac{1}{2} s\f$ and \f$v_i:=x_0+s_i\f$ for \f$i=1,2,\dots,n\f$ where \f$s_i\f$ denotes + projections of the initial step of *n*-th coordinate (the result of projection is treated to be + vector given by \f$s_i:=e_i\cdot\left\f$, where \f$e_i\f$ form canonical basis) + + @param step Initial step that will be used in algorithm. Roughly said, it determines the spread + (size in each dimension) of an initial simplex. + */ + virtual void setInitStep(InputArray step)=0; + + /** @brief This function returns the reference to the ready-to-use DownhillSolver object. + + All the parameters are optional, so this procedure can be called even without parameters at + all. In this case, the default values will be used. As default value for terminal criteria are + the only sensible ones, MinProblemSolver::setFunction() and DownhillSolver::setInitStep() + should be called upon the obtained object, if the respective parameters were not given to + create(). Otherwise, the two ways (give parameters to createDownhillSolver() or miss them out + and call the MinProblemSolver::setFunction() and DownhillSolver::setInitStep()) are absolutely + equivalent (and will drop the same errors in the same way, should invalid input be detected). + @param f Pointer to the function that will be minimized, similarly to the one you submit via + MinProblemSolver::setFunction. + @param initStep Initial step, that will be used to construct the initial simplex, similarly to the one + you submit via MinProblemSolver::setInitStep. + @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via + MinProblemSolver::setTermCriteria. + */ + static Ptr create(const Ptr& f=Ptr(), + InputArray initStep=Mat_(1,1,0.0), + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001)); +}; + +/** @brief This class is used to perform the non-linear non-constrained minimization of a function +with known gradient, + +defined on an *n*-dimensional Euclidean space, using the **Nonlinear Conjugate Gradient method**. +The implementation was done based on the beautifully clear explanatory article [An Introduction to +the Conjugate Gradient Method Without the Agonizing +Pain](http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf) by Jonathan Richard +Shewchuk. The method can be seen as an adaptation of a standard Conjugate Gradient method (see, for +example ) for numerically solving the +systems of linear equations. + +It should be noted, that this method, although deterministic, is rather a heuristic method and +therefore may converge to a local minima, not necessary a global one. What is even more disastrous, +most of its behaviour is ruled by gradient, therefore it essentially cannot distinguish between +local minima and maxima. Therefore, if it starts sufficiently near to the local maximum, it may +converge to it. Another obvious restriction is that it should be possible to compute the gradient of +a function at any point, thus it is preferable to have analytic expression for gradient and +computational burden should be born by the user. + +The latter responsibility is accomplished via the getGradient method of a +MinProblemSolver::Function interface (which represents function being optimized). This method takes +point a point in *n*-dimensional space (first argument represents the array of coordinates of that +point) and compute its gradient (it should be stored in the second argument as an array). + +@note class ConjGradSolver thus does not add any new methods to the basic MinProblemSolver interface. + +@note term criteria should meet following condition: +@code + termcrit.type == (TermCriteria::MAX_ITER + TermCriteria::EPS) && termcrit.epsilon > 0 && termcrit.maxCount > 0 + // or + termcrit.type == TermCriteria::MAX_ITER) && termcrit.maxCount > 0 +@endcode + */ +class CV_EXPORTS ConjGradSolver : public MinProblemSolver +{ +public: + /** @brief This function returns the reference to the ready-to-use ConjGradSolver object. + + All the parameters are optional, so this procedure can be called even without parameters at + all. In this case, the default values will be used. As default value for terminal criteria are + the only sensible ones, MinProblemSolver::setFunction() should be called upon the obtained + object, if the function was not given to create(). Otherwise, the two ways (submit it to + create() or miss it out and call the MinProblemSolver::setFunction()) are absolutely equivalent + (and will drop the same errors in the same way, should invalid input be detected). + @param f Pointer to the function that will be minimized, similarly to the one you submit via + MinProblemSolver::setFunction. + @param termcrit Terminal criteria to the algorithm, similarly to the one you submit via + MinProblemSolver::setTermCriteria. + */ + static Ptr create(const Ptr& f=Ptr(), + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5000,0.000001)); +}; + +//! return codes for cv::solveLP() function +enum SolveLPResult +{ + SOLVELP_UNBOUNDED = -2, //!< problem is unbounded (target function can achieve arbitrary high values) + SOLVELP_UNFEASIBLE = -1, //!< problem is unfeasible (there are no points that satisfy all the constraints imposed) + SOLVELP_SINGLE = 0, //!< there is only one maximum for target function + SOLVELP_MULTI = 1 //!< there are multiple maxima for target function - the arbitrary one is returned +}; + +/** @brief Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method). + +What we mean here by "linear programming problem" (or LP problem, for short) can be formulated as: + +\f[\mbox{Maximize } c\cdot x\\ + \mbox{Subject to:}\\ + Ax\leq b\\ + x\geq 0\f] + +Where \f$c\f$ is fixed `1`-by-`n` row-vector, \f$A\f$ is fixed `m`-by-`n` matrix, \f$b\f$ is fixed `m`-by-`1` +column vector and \f$x\f$ is an arbitrary `n`-by-`1` column vector, which satisfies the constraints. + +Simplex algorithm is one of many algorithms that are designed to handle this sort of problems +efficiently. Although it is not optimal in theoretical sense (there exist algorithms that can solve +any problem written as above in polynomial time, while simplex method degenerates to exponential +time for some special cases), it is well-studied, easy to implement and is shown to work well for +real-life purposes. + +The particular implementation is taken almost verbatim from **Introduction to Algorithms, third +edition** by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. In particular, the +Bland's rule is used to prevent cycling. + +@param Func This row-vector corresponds to \f$c\f$ in the LP problem formulation (see above). It should +contain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted, +in the latter case it is understood to correspond to \f$c^T\f$. +@param Constr `m`-by-`n+1` matrix, whose rightmost column corresponds to \f$b\f$ in formulation above +and the remaining to \f$A\f$. It should contain 32- or 64-bit floating point numbers. +@param z The solution will be returned here as a column-vector - it corresponds to \f$c\f$ in the +formulation above. It will contain 64-bit floating point numbers. +@return One of cv::SolveLPResult + */ +CV_EXPORTS_W int solveLP(const Mat& Func, const Mat& Constr, Mat& z); + +//! @} + +}// cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/ovx.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/ovx.hpp new file mode 100644 index 00000000..8bb7d549 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/ovx.hpp @@ -0,0 +1,28 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +// Copyright (C) 2016, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. + +// OpenVX related definitions and declarations + +#pragma once +#ifndef OPENCV_OVX_HPP +#define OPENCV_OVX_HPP + +#include "cvdef.h" + +namespace cv +{ +/// Check if use of OpenVX is possible +CV_EXPORTS_W bool haveOpenVX(); + +/// Check if use of OpenVX is enabled +CV_EXPORTS_W bool useOpenVX(); + +/// Enable/disable use of OpenVX +CV_EXPORTS_W void setUseOpenVX(bool flag); +} // namespace cv + +#endif // OPENCV_OVX_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/persistence.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/persistence.hpp new file mode 100644 index 00000000..e3026e3c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/persistence.hpp @@ -0,0 +1,1383 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_PERSISTENCE_HPP +#define OPENCV_CORE_PERSISTENCE_HPP + +#ifndef CV_DOXYGEN +/// Define to support persistence legacy formats +#define CV__LEGACY_PERSISTENCE +#endif + +#ifndef __cplusplus +# error persistence.hpp header must be compiled as C++ +#endif + +//! @addtogroup core_c +//! @{ + +/** @brief "black box" representation of the file storage associated with a file on disk. + +Several functions that are described below take CvFileStorage\* as inputs and allow the user to +save or to load hierarchical collections that consist of scalar values, standard CXCore objects +(such as matrices, sequences, graphs), and user-defined objects. + +OpenCV can read and write data in XML (), YAML () or +JSON () formats. Below is an example of 3x3 floating-point identity matrix A, +stored in XML and YAML files +using CXCore functions: +XML: +@code{.xml} + + + + 3 + 3 +
f
+ 1. 0. 0. 0. 1. 0. 0. 0. 1. +
+
+@endcode +YAML: +@code{.yaml} + %YAML:1.0 + A: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1.] +@endcode +As it can be seen from the examples, XML uses nested tags to represent hierarchy, while YAML uses +indentation for that purpose (similar to the Python programming language). + +The same functions can read and write data in both formats; the particular format is determined by +the extension of the opened file, ".xml" for XML files, ".yml" or ".yaml" for YAML and ".json" for +JSON. + */ +typedef struct CvFileStorage CvFileStorage; +typedef struct CvFileNode CvFileNode; +typedef struct CvMat CvMat; +typedef struct CvMatND CvMatND; + +//! @} core_c + +#include "opencv2/core/types.hpp" +#include "opencv2/core/mat.hpp" + +namespace cv { + +/** @addtogroup core_xml + +XML/YAML/JSON file storages. {#xml_storage} +======================= +Writing to a file storage. +-------------------------- +You can store and then restore various OpenCV data structures to/from XML (), +YAML () or JSON () formats. Also, it is possible to store +and load arbitrarily complex data structures, which include OpenCV data structures, as well as +primitive data types (integer and floating-point numbers and text strings) as their elements. + +Use the following procedure to write something to XML, YAML or JSON: +-# Create new FileStorage and open it for writing. It can be done with a single call to +FileStorage::FileStorage constructor that takes a filename, or you can use the default constructor +and then call FileStorage::open. Format of the file (XML, YAML or JSON) is determined from the filename +extension (".xml", ".yml"/".yaml" and ".json", respectively) +-# Write all the data you want using the streaming operator `<<`, just like in the case of STL +streams. +-# Close the file using FileStorage::release. FileStorage destructor also closes the file. + +Here is an example: +@code + #include "opencv2/opencv.hpp" + #include + + using namespace cv; + + int main(int, char** argv) + { + FileStorage fs("test.yml", FileStorage::WRITE); + + fs << "frameCount" << 5; + time_t rawtime; time(&rawtime); + fs << "calibrationDate" << asctime(localtime(&rawtime)); + Mat cameraMatrix = (Mat_(3,3) << 1000, 0, 320, 0, 1000, 240, 0, 0, 1); + Mat distCoeffs = (Mat_(5,1) << 0.1, 0.01, -0.001, 0, 0); + fs << "cameraMatrix" << cameraMatrix << "distCoeffs" << distCoeffs; + fs << "features" << "["; + for( int i = 0; i < 3; i++ ) + { + int x = rand() % 640; + int y = rand() % 480; + uchar lbp = rand() % 256; + + fs << "{:" << "x" << x << "y" << y << "lbp" << "[:"; + for( int j = 0; j < 8; j++ ) + fs << ((lbp >> j) & 1); + fs << "]" << "}"; + } + fs << "]"; + fs.release(); + return 0; + } +@endcode +The sample above stores to YML an integer, a text string (calibration date), 2 matrices, and a custom +structure "feature", which includes feature coordinates and LBP (local binary pattern) value. Here +is output of the sample: +@code{.yaml} +%YAML:1.0 +frameCount: 5 +calibrationDate: "Fri Jun 17 14:09:29 2011\n" +cameraMatrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 1000., 0., 320., 0., 1000., 240., 0., 0., 1. ] +distCoeffs: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ 1.0000000000000001e-01, 1.0000000000000000e-02, + -1.0000000000000000e-03, 0., 0. ] +features: + - { x:167, y:49, lbp:[ 1, 0, 0, 1, 1, 0, 1, 1 ] } + - { x:298, y:130, lbp:[ 0, 0, 0, 1, 0, 0, 1, 1 ] } + - { x:344, y:158, lbp:[ 1, 1, 0, 0, 0, 0, 1, 0 ] } +@endcode + +As an exercise, you can replace ".yml" with ".xml" or ".json" in the sample above and see, how the +corresponding XML file will look like. + +Several things can be noted by looking at the sample code and the output: + +- The produced YAML (and XML/JSON) consists of heterogeneous collections that can be nested. There are + 2 types of collections: named collections (mappings) and unnamed collections (sequences). In mappings + each element has a name and is accessed by name. This is similar to structures and std::map in + C/C++ and dictionaries in Python. In sequences elements do not have names, they are accessed by + indices. This is similar to arrays and std::vector in C/C++ and lists, tuples in Python. + "Heterogeneous" means that elements of each single collection can have different types. + + Top-level collection in YAML/XML/JSON is a mapping. Each matrix is stored as a mapping, and the matrix + elements are stored as a sequence. Then, there is a sequence of features, where each feature is + represented a mapping, and lbp value in a nested sequence. + +- When you write to a mapping (a structure), you write element name followed by its value. When you + write to a sequence, you simply write the elements one by one. OpenCV data structures (such as + cv::Mat) are written in absolutely the same way as simple C data structures - using `<<` + operator. + +- To write a mapping, you first write the special string `{` to the storage, then write the + elements as pairs (`fs << << `) and then write the closing + `}`. + +- To write a sequence, you first write the special string `[`, then write the elements, then + write the closing `]`. + +- In YAML/JSON (but not XML), mappings and sequences can be written in a compact Python-like inline + form. In the sample above matrix elements, as well as each feature, including its lbp value, is + stored in such inline form. To store a mapping/sequence in a compact form, put `:` after the + opening character, e.g. use `{:` instead of `{` and `[:` instead of `[`. When the + data is written to XML, those extra `:` are ignored. + +Reading data from a file storage. +--------------------------------- +To read the previously written XML, YAML or JSON file, do the following: +-# Open the file storage using FileStorage::FileStorage constructor or FileStorage::open method. + In the current implementation the whole file is parsed and the whole representation of file + storage is built in memory as a hierarchy of file nodes (see FileNode) + +-# Read the data you are interested in. Use FileStorage::operator [], FileNode::operator [] + and/or FileNodeIterator. + +-# Close the storage using FileStorage::release. + +Here is how to read the file created by the code sample above: +@code + FileStorage fs2("test.yml", FileStorage::READ); + + // first method: use (type) operator on FileNode. + int frameCount = (int)fs2["frameCount"]; + + String date; + // second method: use FileNode::operator >> + fs2["calibrationDate"] >> date; + + Mat cameraMatrix2, distCoeffs2; + fs2["cameraMatrix"] >> cameraMatrix2; + fs2["distCoeffs"] >> distCoeffs2; + + cout << "frameCount: " << frameCount << endl + << "calibration date: " << date << endl + << "camera matrix: " << cameraMatrix2 << endl + << "distortion coeffs: " << distCoeffs2 << endl; + + FileNode features = fs2["features"]; + FileNodeIterator it = features.begin(), it_end = features.end(); + int idx = 0; + std::vector lbpval; + + // iterate through a sequence using FileNodeIterator + for( ; it != it_end; ++it, idx++ ) + { + cout << "feature #" << idx << ": "; + cout << "x=" << (int)(*it)["x"] << ", y=" << (int)(*it)["y"] << ", lbp: ("; + // you can also easily read numerical arrays using FileNode >> std::vector operator. + (*it)["lbp"] >> lbpval; + for( int i = 0; i < (int)lbpval.size(); i++ ) + cout << " " << (int)lbpval[i]; + cout << ")" << endl; + } + fs2.release(); +@endcode + +Format specification {#format_spec} +-------------------- +`([count]{u|c|w|s|i|f|d})`... where the characters correspond to fundamental C++ types: +- `u` 8-bit unsigned number +- `c` 8-bit signed number +- `w` 16-bit unsigned number +- `s` 16-bit signed number +- `i` 32-bit signed number +- `f` single precision floating-point number +- `d` double precision floating-point number +- `r` pointer, 32 lower bits of which are written as a signed integer. The type can be used to + store structures with links between the elements. + +`count` is the optional counter of values of a given type. For example, `2if` means that each array +element is a structure of 2 integers, followed by a single-precision floating-point number. The +equivalent notations of the above specification are `iif`, `2i1f` and so forth. Other examples: `u` +means that the array consists of bytes, and `2d` means the array consists of pairs of doubles. + +@see @ref samples/cpp/filestorage.cpp +*/ + +//! @{ + +/** @example samples/cpp/filestorage.cpp +A complete example using the FileStorage interface +*/ + +////////////////////////// XML & YAML I/O ////////////////////////// + +class CV_EXPORTS FileNode; +class CV_EXPORTS FileNodeIterator; + +/** @brief XML/YAML/JSON file storage class that encapsulates all the information necessary for writing or +reading data to/from a file. + */ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum Mode + { + READ = 0, //!< value, open the file for reading + WRITE = 1, //!< value, open the file for writing + APPEND = 2, //!< value, open the file for appending + MEMORY = 4, //!< flag, read data from source or write data to the internal buffer (which is + //!< returned by FileStorage::release) + FORMAT_MASK = (7<<3), //!< mask for format flags + FORMAT_AUTO = 0, //!< flag, auto format + FORMAT_XML = (1<<3), //!< flag, XML format + FORMAT_YAML = (2<<3), //!< flag, YAML format + FORMAT_JSON = (3<<3), //!< flag, JSON format + + BASE64 = 64, //!< flag, write rawdata in Base64 by default. (consider using WRITE_BASE64) + WRITE_BASE64 = BASE64 | WRITE, //!< flag, enable both WRITE and BASE64 + }; + enum + { + UNDEFINED = 0, + VALUE_EXPECTED = 1, + NAME_EXPECTED = 2, + INSIDE_MAP = 4 + }; + + /** @brief The constructors. + + The full constructor opens the file. Alternatively you can use the default constructor and then + call FileStorage::open. + */ + CV_WRAP FileStorage(); + + /** @overload + @copydoc open() + */ + CV_WRAP FileStorage(const String& filename, int flags, const String& encoding=String()); + + /** @overload */ + FileStorage(CvFileStorage* fs, bool owning=true); + + //! the destructor. calls release() + virtual ~FileStorage(); + + /** @brief Opens a file. + + See description of parameters in FileStorage::FileStorage. The method calls FileStorage::release + before opening the file. + @param filename Name of the file to open or the text string to read the data from. + Extension of the file (.xml, .yml/.yaml or .json) determines its format (XML, YAML or JSON + respectively). Also you can append .gz to work with compressed files, for example myHugeMatrix.xml.gz. If both + FileStorage::WRITE and FileStorage::MEMORY flags are specified, source is used just to specify + the output file format (e.g. mydata.xml, .yml etc.). A file name can also contain parameters. + You can use this format, "*?base64" (e.g. "file.json?base64" (case sensitive)), as an alternative to + FileStorage::BASE64 flag. + @param flags Mode of operation. One of FileStorage::Mode + @param encoding Encoding of the file. Note that UTF-16 XML encoding is not supported currently and + you should use 8-bit encoding instead of it. + */ + CV_WRAP virtual bool open(const String& filename, int flags, const String& encoding=String()); + + /** @brief Checks whether the file is opened. + + @returns true if the object is associated with the current file and false otherwise. It is a + good practice to call this method after you tried to open a file. + */ + CV_WRAP virtual bool isOpened() const; + + /** @brief Closes the file and releases all the memory buffers. + + Call this method after all I/O operations with the storage are finished. + */ + CV_WRAP virtual void release(); + + /** @brief Closes the file and releases all the memory buffers. + + Call this method after all I/O operations with the storage are finished. If the storage was + opened for writing data and FileStorage::WRITE was specified + */ + CV_WRAP virtual String releaseAndGetString(); + + /** @brief Returns the first element of the top-level mapping. + @returns The first element of the top-level mapping. + */ + CV_WRAP FileNode getFirstTopLevelNode() const; + + /** @brief Returns the top-level mapping + @param streamidx Zero-based index of the stream. In most cases there is only one stream in the file. + However, YAML supports multiple streams and so there can be several. + @returns The top-level mapping. + */ + CV_WRAP FileNode root(int streamidx=0) const; + + /** @brief Returns the specified element of the top-level mapping. + @param nodename Name of the file node. + @returns Node with the given name. + */ + FileNode operator[](const String& nodename) const; + + /** @overload */ + CV_WRAP_AS(getNode) FileNode operator[](const char* nodename) const; + + /** @brief Returns the obsolete C FileStorage structure. + @returns Pointer to the underlying C FileStorage structure + */ + CvFileStorage* operator *() { return fs.get(); } + + /** @overload */ + const CvFileStorage* operator *() const { return fs.get(); } + + /** @brief Writes multiple numbers. + + Writes one or more numbers of the specified format to the currently written structure. Usually it is + more convenient to use operator `<<` instead of this method. + @param fmt Specification of each array element, see @ref format_spec "format specification" + @param vec Pointer to the written array. + @param len Number of the uchar elements to write. + */ + void writeRaw( const String& fmt, const uchar* vec, size_t len ); + + /** @brief Writes the registered C structure (CvMat, CvMatND, CvSeq). + @param name Name of the written object. + @param obj Pointer to the object. + @see cvWrite for details. + */ + void writeObj( const String& name, const void* obj ); + + /** + * @brief Simplified writing API to use with bindings. + * @param name Name of the written object + * @param val Value of the written object + */ + CV_WRAP void write(const String& name, int val); + /// @overload + CV_WRAP void write(const String& name, double val); + /// @overload + CV_WRAP void write(const String& name, const String& val); + /// @overload + CV_WRAP void write(const String& name, InputArray val); + + /** @brief Writes a comment. + + The function writes a comment into file storage. The comments are skipped when the storage is read. + @param comment The written comment, single-line or multi-line + @param append If true, the function tries to put the comment at the end of current line. + Else if the comment is multi-line, or if it does not fit at the end of the current + line, the comment starts a new line. + */ + CV_WRAP void writeComment(const String& comment, bool append = false); + + /** @brief Starts to write a nested structure (sequence or a mapping). + @param name name of the structure (if it's a member of parent mapping, otherwise it should be empty + @param flags type of the structure (FileNode::MAP or FileNode::SEQ (both with optional FileNode::FLOW)). + @param typeName usually an empty string + */ + CV_WRAP void startWriteStruct(const String& name, int flags, const String& typeName=String()); + + /** @brief Finishes writing nested structure (should pair startWriteStruct()) + */ + CV_WRAP void endWriteStruct(); + + /** @brief Returns the normalized object name for the specified name of a file. + @param filename Name of a file + @returns The normalized object name. + */ + static String getDefaultObjectName(const String& filename); + + /** @brief Returns the current format. + * @returns The current format, see FileStorage::Mode + */ + CV_WRAP int getFormat() const; + + Ptr fs; //!< the underlying C FileStorage structure + String elname; //!< the currently written element + std::vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvFileStorage* obj) const; + +/** @brief File Storage Node class. + +The node is used to store each and every element of the file storage opened for reading. When +XML/YAML file is read, it is first parsed and stored in the memory as a hierarchical collection of +nodes. Each node can be a "leaf" that is contain a single number or a string, or be a collection of +other nodes. There can be named collections (mappings) where each element has a name and it is +accessed by a name, and ordered collections (sequences) where elements do not have names but rather +accessed by index. Type of the file node can be determined using FileNode::type method. + +Note that file nodes are only used for navigating file storages opened for reading. When a file +storage is opened for writing, no data is stored in memory after it is written. + */ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum Type + { + NONE = 0, //!< empty node + INT = 1, //!< an integer + REAL = 2, //!< floating-point number + FLOAT = REAL, //!< synonym or REAL + STR = 3, //!< text string in UTF-8 encoding + STRING = STR, //!< synonym for STR + REF = 4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ = 5, //!< sequence + MAP = 6, //!< mapping + TYPE_MASK = 7, + FLOW = 8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER = 16, //!< a registered object (e.g. a matrix) + EMPTY = 32, //!< empty structure (sequence or mapping) + NAMED = 64 //!< the node has a name (i.e. it is element of a mapping) + }; + /** @brief The constructors. + + These constructors are used to create a default file node, construct it from obsolete structures or + from the another file node. + */ + CV_WRAP FileNode(); + + /** @overload + @param fs Pointer to the obsolete file storage structure. + @param node File node to be used as initialization for the created file node. + */ + FileNode(const CvFileStorage* fs, const CvFileNode* node); + + /** @overload + @param node File node to be used as initialization for the created file node. + */ + FileNode(const FileNode& node); + + FileNode& operator=(const FileNode& node); + + /** @brief Returns element of a mapping node or a sequence node. + @param nodename Name of an element in the mapping node. + @returns Returns the element with the given identifier. + */ + FileNode operator[](const String& nodename) const; + + /** @overload + @param nodename Name of an element in the mapping node. + */ + CV_WRAP_AS(getNode) FileNode operator[](const char* nodename) const; + + /** @overload + @param i Index of an element in the sequence node. + */ + CV_WRAP_AS(at) FileNode operator[](int i) const; + + /** @brief Returns keys of a mapping node. + @returns Keys of a mapping node. + */ + CV_WRAP std::vector keys() const; + + /** @brief Returns type of the node. + @returns Type of the node. See FileNode::Type + */ + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP String name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator String() const; + operator std::string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + /** @brief Reads node elements to the buffer with the specified format. + + Usually it is more convenient to use operator `>>` instead of this method. + @param fmt Specification of each array element. See @ref format_spec "format specification" + @param vec Pointer to the destination array. + @param len Number of bytes to read (buffer size limit). If it is greater than number of + remaining elements then all of them will be read. + */ + void readRaw( const String& fmt, uchar* vec, size_t len ) const; + + //! reads the registered object and returns pointer to it + void* readObj() const; + + //! Simplified reading API to use with bindings. + CV_WRAP double real() const; + //! Simplified reading API to use with bindings. + CV_WRAP String string() const; + //! Simplified reading API to use with bindings. + CV_WRAP Mat mat() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/** @brief used to iterate through sequences and mappings. + +A standard STL notation, with node.begin(), node.end() denoting the beginning and the end of a +sequence, stored in node. See the data reading sample in the beginning of the section. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + /** @brief The constructors. + + These constructors are used to create a default iterator, set it to specific element in a file node + or construct it from another iterator. + */ + FileNodeIterator(); + + /** @overload + @param fs File storage for the iterator. + @param node File node for the iterator. + @param ofs Index of the element in the node. The created iterator will point to this element. + */ + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + + /** @overload + @param it Iterator to be used as initialization for the created iterator. + */ + FileNodeIterator(const FileNodeIterator& it); + + FileNodeIterator& operator=(const FileNodeIterator& it); + + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int ofs); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int ofs); + + /** @brief Reads node elements to the buffer with the specified format. + + Usually it is more convenient to use operator `>>` instead of this method. + @param fmt Specification of each array element. See @ref format_spec "format specification" + @param vec Pointer to the destination array. + @param len Number of bytes to read (buffer size limit). If it is greater than number of + remaining elements then all of them will be read. + + */ + FileNodeIterator& readRaw( const String& fmt, uchar* vec, + size_t len=(size_t)INT_MAX ); + + struct SeqReader + { + int header_size; + void* seq; /* sequence, beign read; CvSeq */ + void* block; /* current block; CvSeqBlock */ + schar* ptr; /* pointer to element be read next */ + schar* block_min; /* pointer to the beginning of block */ + schar* block_max; /* pointer to the end of block */ + int delta_index;/* = seq->first->start_index */ + schar* prev_elem; /* pointer to previous element */ + }; + + const CvFileStorage* fs; + const CvFileNode* container; + SeqReader reader; + size_t remaining; +}; + +//! @} core_xml + +/////////////////// XML & YAML I/O implementation ////////////////// + +//! @relates cv::FileStorage +//! @{ + +CV_EXPORTS void write( FileStorage& fs, const String& name, int value ); +CV_EXPORTS void write( FileStorage& fs, const String& name, float value ); +CV_EXPORTS void write( FileStorage& fs, const String& name, double value ); +CV_EXPORTS void write( FileStorage& fs, const String& name, const String& value ); +CV_EXPORTS void write( FileStorage& fs, const String& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const String& name, const SparseMat& value ); +#ifdef CV__LEGACY_PERSISTENCE +CV_EXPORTS void write( FileStorage& fs, const String& name, const std::vector& value); +CV_EXPORTS void write( FileStorage& fs, const String& name, const std::vector& value); +#endif + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const String& value ); + +//! @} + +//! @relates cv::FileNode +//! @{ + +CV_EXPORTS void read(const FileNode& node, int& value, int default_value); +CV_EXPORTS void read(const FileNode& node, float& value, float default_value); +CV_EXPORTS void read(const FileNode& node, double& value, double default_value); +CV_EXPORTS void read(const FileNode& node, String& value, const String& default_value); +CV_EXPORTS void read(const FileNode& node, std::string& value, const std::string& default_value); +CV_EXPORTS void read(const FileNode& node, Mat& mat, const Mat& default_mat = Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat = SparseMat() ); +#ifdef CV__LEGACY_PERSISTENCE +CV_EXPORTS void read(const FileNode& node, std::vector& keypoints); +CV_EXPORTS void read(const FileNode& node, std::vector& matches); +#endif +CV_EXPORTS void read(const FileNode& node, KeyPoint& value, const KeyPoint& default_value); +CV_EXPORTS void read(const FileNode& node, DMatch& value, const DMatch& default_value); + +template static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2])); +} + +template static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1])); +} + +template static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +template static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]); +} + +template static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value) +{ + std::vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp; + value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]), + saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3])); +} + +static inline void read(const FileNode& node, Range& value, const Range& default_value) +{ + Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end); + read(node, temp, default_temp); + value.start = temp.x; value.end = temp.y; +} + +//! @} + +/** @brief Writes string to a file storage. +@relates cv::FileStorage + */ +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const String& str); + +//! @cond IGNORED + +namespace internal +{ + class CV_EXPORTS WriteStructContext + { + public: + WriteStructContext(FileStorage& _fs, const String& name, int flags, const String& typeName = String()); + ~WriteStructContext(); + private: + FileStorage* fs; + }; + + template class VecWriterProxy + { + public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const std::vector<_Tp>& vec) const + { + size_t count = vec.size(); + for (size_t i = 0; i < count; i++) + write(*fs, vec[i]); + } + private: + FileStorage* fs; + }; + + template class VecWriterProxy<_Tp, 1> + { + public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const std::vector<_Tp>& vec) const + { + int _fmt = traits::SafeFmt<_Tp>::fmt; + char fmt[] = { (char)((_fmt >> 8) + '1'), (char)_fmt, '\0' }; + fs->writeRaw(fmt, !vec.empty() ? (uchar*)&vec[0] : 0, vec.size() * sizeof(_Tp)); + } + private: + FileStorage* fs; + }; + + template class VecReaderProxy + { + public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(std::vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for (size_t i = 0; i < count; i++, ++(*it)) + read(**it, vec[i], _Tp()); + } + private: + FileNodeIterator* it; + }; + + template class VecReaderProxy<_Tp, 1> + { + public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(std::vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining; + size_t cn = DataType<_Tp>::channels; + int _fmt = traits::SafeFmt<_Tp>::fmt; + CV_Assert((_fmt >> 8) < 9); + char fmt[] = { (char)((_fmt >> 8)+'1'), (char)_fmt, '\0' }; + CV_Assert((remaining % cn) == 0); + size_t remaining1 = remaining / cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw(fmt, !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp)); + } + private: + FileNodeIterator* it; + }; + +} // internal + +//! @endcond + +//! @relates cv::FileStorage +//! @{ + +template static inline +void write(FileStorage& fs, const _Tp& value) +{ + write(fs, String(), value); +} + +template<> inline +void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline +void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline +void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline +void write( FileStorage& fs, const String& value ) +{ + writeScalar(fs, value); +} + +template static inline +void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template static inline +void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template static inline +void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template static inline +void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template static inline +void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template static inline +void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template static inline +void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +static inline +void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +template static inline +void write( FileStorage& fs, const std::vector<_Tp>& vec ) +{ + cv::internal::VecWriterProxy<_Tp, traits::SafeFmt<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline +void write(FileStorage& fs, const String& name, const Point_<_Tp>& pt ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, pt); +} + +template static inline +void write(FileStorage& fs, const String& name, const Point3_<_Tp>& pt ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, pt); +} + +template static inline +void write(FileStorage& fs, const String& name, const Size_<_Tp>& sz ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, sz); +} + +template static inline +void write(FileStorage& fs, const String& name, const Complex<_Tp>& c ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, c); +} + +template static inline +void write(FileStorage& fs, const String& name, const Rect_<_Tp>& r ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, r); +} + +template static inline +void write(FileStorage& fs, const String& name, const Vec<_Tp, cn>& v ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, v); +} + +template static inline +void write(FileStorage& fs, const String& name, const Scalar_<_Tp>& s ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, s); +} + +static inline +void write(FileStorage& fs, const String& name, const Range& r ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, r); +} + +static inline +void write(FileStorage& fs, const String& name, const KeyPoint& kpt) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, kpt.pt.x); + write(fs, kpt.pt.y); + write(fs, kpt.size); + write(fs, kpt.angle); + write(fs, kpt.response); + write(fs, kpt.octave); + write(fs, kpt.class_id); +} + +static inline +void write(FileStorage& fs, const String& name, const DMatch& m) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+FileNode::FLOW); + write(fs, m.queryIdx); + write(fs, m.trainIdx); + write(fs, m.imgIdx); + write(fs, m.distance); +} + +template static inline +void write( FileStorage& fs, const String& name, const std::vector<_Tp>& vec ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ+(traits::SafeFmt<_Tp>::fmt != 0 ? FileNode::FLOW : 0)); + write(fs, vec); +} + +template static inline +void write( FileStorage& fs, const String& name, const std::vector< std::vector<_Tp> >& vec ) +{ + cv::internal::WriteStructContext ws(fs, name, FileNode::SEQ); + for(size_t i = 0; i < vec.size(); i++) + { + cv::internal::WriteStructContext ws_(fs, name, FileNode::SEQ+(traits::SafeFmt<_Tp>::fmt != 0 ? FileNode::FLOW : 0)); + write(fs, vec[i]); + } +} + +#ifdef CV__LEGACY_PERSISTENCE +// This code is not needed anymore, but it is preserved here to keep source compatibility +// Implementation is similar to templates instantiations +static inline void write(FileStorage& fs, const KeyPoint& kpt) { write(fs, String(), kpt); } +static inline void write(FileStorage& fs, const DMatch& m) { write(fs, String(), m); } +static inline void write(FileStorage& fs, const std::vector& vec) +{ + cv::internal::VecWriterProxy w(&fs); + w(vec); +} +static inline void write(FileStorage& fs, const std::vector& vec) +{ + cv::internal::VecWriterProxy w(&fs); + w(vec); + +} +#endif + +//! @} FileStorage + +//! @relates cv::FileNode +//! @{ + +static inline +void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; + read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline +void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; + read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline +void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; + read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline +void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; + read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline +void read(const FileNode& node, short& value, short default_value) +{ + int temp; + read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +template static inline +void read( FileNodeIterator& it, std::vector<_Tp>& vec, size_t maxCount = (size_t)INT_MAX ) +{ + cv::internal::VecReaderProxy<_Tp, traits::SafeFmt<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline +void read( const FileNode& node, std::vector<_Tp>& vec, const std::vector<_Tp>& default_value = std::vector<_Tp>() ) +{ + if(!node.node) + vec = default_value; + else + { + FileNodeIterator it = node.begin(); + read( it, vec ); + } +} + +static inline +void read( const FileNode& node, std::vector& vec, const std::vector& default_value ) +{ + if(!node.node) + vec = default_value; + else + read(node, vec); +} + +static inline +void read( const FileNode& node, std::vector& vec, const std::vector& default_value ) +{ + if(!node.node) + vec = default_value; + else + read(node, vec); +} + +//! @} FileNode + +//! @relates cv::FileStorage +//! @{ + +/** @brief Writes data to a file storage. + */ +template static inline +FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( Error::StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +/** @brief Writes data to a file storage. + */ +static inline +FileStorage& operator << (FileStorage& fs, const char* str) +{ + return (fs << String(str)); +} + +/** @brief Writes data to a file storage. + */ +static inline +FileStorage& operator << (FileStorage& fs, char* value) +{ + return (fs << String(value)); +} + +//! @} FileStorage + +//! @relates cv::FileNodeIterator +//! @{ + +/** @brief Reads data from a file storage. + */ +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ + read( *it, value, _Tp()); + return ++it; +} + +/** @brief Reads data from a file storage. + */ +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, std::vector<_Tp>& vec) +{ + cv::internal::VecReaderProxy<_Tp, traits::SafeFmt<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +//! @} FileNodeIterator + +//! @relates cv::FileNode +//! @{ + +/** @brief Reads data from a file storage. + */ +template static inline +void operator >> (const FileNode& n, _Tp& value) +{ + read( n, value, _Tp()); +} + +/** @brief Reads data from a file storage. + */ +template static inline +void operator >> (const FileNode& n, std::vector<_Tp>& vec) +{ + FileNodeIterator it = n.begin(); + it >> vec; +} + +/** @brief Reads KeyPoint from a file storage. +*/ +//It needs special handling because it contains two types of fields, int & float. +static inline +void operator >> (const FileNode& n, KeyPoint& kpt) +{ + FileNodeIterator it = n.begin(); + it >> kpt.pt.x >> kpt.pt.y >> kpt.size >> kpt.angle >> kpt.response >> kpt.octave >> kpt.class_id; +} + +#ifdef CV__LEGACY_PERSISTENCE +static inline +void operator >> (const FileNode& n, std::vector& vec) +{ + read(n, vec); +} +static inline +void operator >> (const FileNode& n, std::vector& vec) +{ + read(n, vec); +} +#endif + +/** @brief Reads DMatch from a file storage. +*/ +//It needs special handling because it contains two types of fields, int & float. +static inline +void operator >> (const FileNode& n, DMatch& m) +{ + FileNodeIterator it = n.begin(); + it >> m.queryIdx >> m.trainIdx >> m.imgIdx >> m.distance; +} + +//! @} FileNode + +//! @relates cv::FileNodeIterator +//! @{ + +static inline +bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline +bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline +ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline +bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +//! @} FileNodeIterator + +//! @cond IGNORED + +inline FileNode FileStorage::getFirstTopLevelNode() const { FileNode r = root(); FileNodeIterator it = r.begin(); return it != r.end() ? *it : FileNode(); } +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) : fs(_fs), node(_node) {} +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} +inline FileNode& FileNode::operator=(const FileNode& _node) { fs = _node.fs; node = _node.node; return *this; } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } +inline FileNode::operator int() const { int value; read(*this, value, 0); return value; } +inline FileNode::operator float() const { float value; read(*this, value, 0.f); return value; } +inline FileNode::operator double() const { double value; read(*this, value, 0.); return value; } +inline FileNode::operator String() const { String value; read(*this, value, value); return value; } +inline double FileNode::real() const { return double(*this); } +inline String FileNode::string() const { return String(*this); } +inline Mat FileNode::mat() const { Mat value; read(*this, value, value); return value; } +inline FileNodeIterator FileNode::begin() const { return FileNodeIterator(fs, node); } +inline FileNodeIterator FileNode::end() const { return FileNodeIterator(fs, node, size()); } +inline void FileNode::readRaw( const String& fmt, uchar* vec, size_t len ) const { begin().readRaw( fmt, vec, len ); } +inline FileNode FileNodeIterator::operator *() const { return FileNode(fs, (const CvFileNode*)(const void*)reader.ptr); } +inline FileNode FileNodeIterator::operator ->() const { return FileNode(fs, (const CvFileNode*)(const void*)reader.ptr); } +inline String::String(const FileNode& fn): cstr_(0), len_(0) { read(fn, *this, *this); } + +//! @endcond + + +CV_EXPORTS void cvStartWriteRawData_Base64(::CvFileStorage * fs, const char* name, int len, const char* dt); + +CV_EXPORTS void cvWriteRawData_Base64(::CvFileStorage * fs, const void* _data, int len); + +CV_EXPORTS void cvEndWriteRawData_Base64(::CvFileStorage * fs); + +CV_EXPORTS void cvWriteMat_Base64(::CvFileStorage* fs, const char* name, const ::CvMat* mat); + +CV_EXPORTS void cvWriteMatND_Base64(::CvFileStorage* fs, const char* name, const ::CvMatND* mat); + +} // cv + +#endif // OPENCV_CORE_PERSISTENCE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/ptr.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/ptr.inl.hpp new file mode 100644 index 00000000..466f6342 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/ptr.inl.hpp @@ -0,0 +1,379 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, NVIDIA Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the copyright holders or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_PTR_INL_HPP +#define OPENCV_CORE_PTR_INL_HPP + +#include + +//! @cond IGNORED + +namespace cv { + +template +void DefaultDeleter::operator () (Y* p) const +{ + delete p; +} + +namespace detail +{ + +struct PtrOwner +{ + PtrOwner() : refCount(1) + {} + + void incRef() + { + CV_XADD(&refCount, 1); + } + + void decRef() + { + if (CV_XADD(&refCount, -1) == 1) deleteSelf(); + } + +protected: + /* This doesn't really need to be virtual, since PtrOwner is never deleted + directly, but it doesn't hurt and it helps avoid warnings. */ + virtual ~PtrOwner() + {} + + virtual void deleteSelf() = 0; + +private: + unsigned int refCount; + + // noncopyable + PtrOwner(const PtrOwner&); + PtrOwner& operator = (const PtrOwner&); +}; + +template +struct PtrOwnerImpl CV_FINAL : PtrOwner +{ + PtrOwnerImpl(Y* p, D d) : owned(p), deleter(d) + {} + + void deleteSelf() CV_OVERRIDE + { + deleter(owned); + delete this; + } + +private: + Y* owned; + D deleter; +}; + + +} + +template +Ptr::Ptr() : owner(NULL), stored(NULL) +{} + +template +template +Ptr::Ptr(Y* p) + : owner(p + ? new detail::PtrOwnerImpl >(p, DefaultDeleter()) + : NULL), + stored(p) +{} + +template +template +Ptr::Ptr(Y* p, D d) + : owner(p + ? new detail::PtrOwnerImpl(p, d) + : NULL), + stored(p) +{} + +template +Ptr::Ptr(const Ptr& o) : owner(o.owner), stored(o.stored) +{ + if (owner) owner->incRef(); +} + +template +template +Ptr::Ptr(const Ptr& o) : owner(o.owner), stored(o.stored) +{ + if (owner) owner->incRef(); +} + +template +template +Ptr::Ptr(const Ptr& o, T* p) : owner(o.owner), stored(p) +{ + if (owner) owner->incRef(); +} + +template +Ptr::~Ptr() +{ + release(); +} + +template +Ptr& Ptr::operator = (const Ptr& o) +{ + Ptr(o).swap(*this); + return *this; +} + +template +template +Ptr& Ptr::operator = (const Ptr& o) +{ + Ptr(o).swap(*this); + return *this; +} + +template +void Ptr::release() +{ + if (owner) owner->decRef(); + owner = NULL; + stored = NULL; +} + +template +template +void Ptr::reset(Y* p) +{ + Ptr(p).swap(*this); +} + +template +template +void Ptr::reset(Y* p, D d) +{ + Ptr(p, d).swap(*this); +} + +template +void Ptr::swap(Ptr& o) +{ + std::swap(owner, o.owner); + std::swap(stored, o.stored); +} + +template +T* Ptr::get() const +{ + return stored; +} + +template +typename detail::RefOrVoid::type Ptr::operator * () const +{ + return *stored; +} + +template +T* Ptr::operator -> () const +{ + return stored; +} + +template +Ptr::operator T* () const +{ + return stored; +} + + +template +bool Ptr::empty() const +{ + return !stored; +} + +template +template +Ptr Ptr::staticCast() const +{ + return Ptr(*this, static_cast(stored)); +} + +template +template +Ptr Ptr::constCast() const +{ + return Ptr(*this, const_cast(stored)); +} + +template +template +Ptr Ptr::dynamicCast() const +{ + return Ptr(*this, dynamic_cast(stored)); +} + +#ifdef CV_CXX_MOVE_SEMANTICS + +template +Ptr::Ptr(Ptr&& o) : owner(o.owner), stored(o.stored) +{ + o.owner = NULL; + o.stored = NULL; +} + +template +Ptr& Ptr::operator = (Ptr&& o) +{ + if (this == &o) + return *this; + + release(); + owner = o.owner; + stored = o.stored; + o.owner = NULL; + o.stored = NULL; + return *this; +} + +#endif + + +template +void swap(Ptr& ptr1, Ptr& ptr2){ + ptr1.swap(ptr2); +} + +template +bool operator == (const Ptr& ptr1, const Ptr& ptr2) +{ + return ptr1.get() == ptr2.get(); +} + +template +bool operator != (const Ptr& ptr1, const Ptr& ptr2) +{ + return ptr1.get() != ptr2.get(); +} + +template +Ptr makePtr() +{ + return Ptr(new T()); +} + +template +Ptr makePtr(const A1& a1) +{ + return Ptr(new T(a1)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2) +{ + return Ptr(new T(a1, a2)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3) +{ + return Ptr(new T(a1, a2, a3)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4) +{ + return Ptr(new T(a1, a2, a3, a4)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5) +{ + return Ptr(new T(a1, a2, a3, a4, a5)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10, const A11& a11) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11)); +} + +template +Ptr makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10, const A11& a11, const A12& a12) +{ + return Ptr(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)); +} +} // namespace cv + +//! @endcond + +#endif // OPENCV_CORE_PTR_INL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/saturate.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/saturate.hpp new file mode 100644 index 00000000..36d31215 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/saturate.hpp @@ -0,0 +1,163 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_SATURATE_HPP +#define OPENCV_CORE_SATURATE_HPP + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/fast_math.hpp" + +namespace cv +{ + +//! @addtogroup core_utils +//! @{ + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +/** @brief Template function for accurate conversion from one primitive type to another. + + The function saturate_cast resembles the standard C++ cast operations, such as static_cast\() + and others. It perform an efficient and accurate conversion from one primitive type to another + (see the introduction chapter). saturate in the name means that when the input value v is out of the + range of the target type, the result is not formed just by taking low bits of the input, but instead + the value is clipped. For example: + @code + uchar a = saturate_cast(-100); // a = 0 (UCHAR_MIN) + short b = saturate_cast(33333.33333); // b = 32767 (SHRT_MAX) + @endcode + Such clipping is done when the target type is unsigned char , signed char , unsigned short or + signed short . For 32-bit integers, no clipping is done. + + When the parameter is a floating-point value and the target type is an integer (8-, 16- or 32-bit), + the floating-point value is first rounded to the nearest integer and then clipped if needed (when + the target type is 8- or 16-bit). + + @param v Function parameter. + @sa add, subtract, multiply, divide, Mat::convertTo + */ +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(double v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(int64 v) { return _Tp(v); } +/** @overload */ +template static inline _Tp saturate_cast(uint64 v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) { return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) { return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) { return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) { return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) { return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(int64 v) { return (uchar)((uint64)v <= (uint64)UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(uint64 v) { return (uchar)std::min(v, (uint64)UCHAR_MAX); } + +template<> inline schar saturate_cast(uchar v) { return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) { return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) { return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); } +template<> inline schar saturate_cast(short v) { return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) { return (schar)std::min(v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(float v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(int64 v) { return (schar)((uint64)((int64)v-SCHAR_MIN) <= (uint64)UCHAR_MAX ? v : v > 0 ? SCHAR_MAX : SCHAR_MIN); } +template<> inline schar saturate_cast(uint64 v) { return (schar)std::min(v, (uint64)SCHAR_MAX); } + +template<> inline ushort saturate_cast(schar v) { return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) { return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) { return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) { return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(int64 v) { return (ushort)((uint64)v <= (uint64)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(uint64 v) { return (ushort)std::min(v, (uint64)USHRT_MAX); } + +template<> inline short saturate_cast(ushort v) { return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) { return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); } +template<> inline short saturate_cast(unsigned v) { return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) { int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(int64 v) { return (short)((uint64)((int64)v - SHRT_MIN) <= (uint64)USHRT_MAX ? v : v > 0 ? SHRT_MAX : SHRT_MIN); } +template<> inline short saturate_cast(uint64 v) { return (short)std::min(v, (uint64)SHRT_MAX); } + +template<> inline int saturate_cast(unsigned v) { return (int)std::min(v, (unsigned)INT_MAX); } +template<> inline int saturate_cast(int64 v) { return (int)((uint64)(v - INT_MIN) <= (uint64)UINT_MAX ? v : v > 0 ? INT_MAX : INT_MIN); } +template<> inline int saturate_cast(uint64 v) { return (int)std::min(v, (uint64)INT_MAX); } +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +template<> inline unsigned saturate_cast(schar v) { return (unsigned)std::max(v, (schar)0); } +template<> inline unsigned saturate_cast(short v) { return (unsigned)std::max(v, (short)0); } +template<> inline unsigned saturate_cast(int v) { return (unsigned)std::max(v, (int)0); } +template<> inline unsigned saturate_cast(int64 v) { return (unsigned)((uint64)v <= (uint64)UINT_MAX ? v : v > 0 ? UINT_MAX : 0); } +template<> inline unsigned saturate_cast(uint64 v) { return (unsigned)std::min(v, (uint64)UINT_MAX); } +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v) { return static_cast(cvRound(v)); } +template<> inline unsigned saturate_cast(double v) { return static_cast(cvRound(v)); } + +template<> inline uint64 saturate_cast(schar v) { return (uint64)std::max(v, (schar)0); } +template<> inline uint64 saturate_cast(short v) { return (uint64)std::max(v, (short)0); } +template<> inline uint64 saturate_cast(int v) { return (uint64)std::max(v, (int)0); } +template<> inline uint64 saturate_cast(int64 v) { return (uint64)std::max(v, (int64)0); } + +template<> inline int64 saturate_cast(uint64 v) { return (int64)std::min(v, (uint64)LLONG_MAX); } + +//! @} + +} // cv + +#endif // OPENCV_CORE_SATURATE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/simd_intrinsics.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/simd_intrinsics.hpp new file mode 100644 index 00000000..309202d1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/simd_intrinsics.hpp @@ -0,0 +1,87 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_SIMD_INTRINSICS_HPP +#define OPENCV_CORE_SIMD_INTRINSICS_HPP + +/** +Helper header to support SIMD intrinsics (universal intrinsics) in user code. +Intrinsics documentation: https://docs.opencv.org/3.4/df/d91/group__core__hal__intrin.html + + +Checks of target CPU instruction set based on compiler definitions don't work well enough. +More reliable solutions require utilization of configuration systems (like CMake). + +So, probably you need to specify your own configuration. + +You can do that via CMake in this way: + add_definitions(/DOPENCV_SIMD_CONFIG_HEADER=opencv_simd_config_custom.hpp) +or + add_definitions(/DOPENCV_SIMD_CONFIG_INCLUDE_DIR=1) + +Additionally you may need to add include directory to your files: + include_directories("${CMAKE_CURRENT_LIST_DIR}/opencv_config_${MYTARGET}") + +These files can be pre-generated for target configurations of your application +or generated by CMake on the fly (use CMAKE_BINARY_DIR for that). + +Notes: +- H/W capability checks are still responsibility of your application +- runtime dispatching is not covered by this helper header +*/ + +#ifdef __OPENCV_BUILD +#error "Use core/hal/intrin.hpp during OpenCV build" +#endif + +#ifdef OPENCV_HAL_INTRIN_HPP +#error "core/simd_intrinsics.hpp must be included before core/hal/intrin.hpp" +#endif + +#include "opencv2/core/cvdef.h" + +#ifdef OPENCV_SIMD_CONFIG_HEADER +#include CVAUX_STR(OPENCV_SIMD_CONFIG_HEADER) +#elif defined(OPENCV_SIMD_CONFIG_INCLUDE_DIR) +#include "opencv_simd_config.hpp" // corresponding directory should be added via -I compiler parameter +#else // custom config headers + +#if (!defined(CV_AVX_512F) || !CV_AVX_512F) && (defined(__AVX512__) || defined(__AVX512F__)) +# include +# undef CV_AVX_512F +# define CV_AVX_512F 1 +# ifndef OPENCV_SIMD_DONT_ASSUME_SKX // Skylake-X with AVX-512F/CD/BW/DQ/VL +# undef CV_AVX512_SKX +# define CV_AVX512_SKX 1 +# undef CV_AVX_512CD +# define CV_AVX_512CD 1 +# undef CV_AVX_512BW +# define CV_AVX_512BW 1 +# undef CV_AVX_512DQ +# define CV_AVX_512DQ 1 +# undef CV_AVX_512VL +# define CV_AVX_512VL 1 +# endif +#endif // AVX512 + +// GCC/Clang: -mavx2 +// MSVC: /arch:AVX2 +#if defined __AVX2__ +# include +# undef CV_AVX2 +# define CV_AVX2 1 +# if defined __F16C__ +# undef CV_FP16 +# define CV_FP16 1 +# endif +#endif + +#endif + +// SSE / NEON / VSX is handled by cv_cpu_dispatch.h compatibility block +#include "cv_cpu_dispatch.h" + +#include "hal/intrin.hpp" + +#endif // OPENCV_CORE_SIMD_INTRINSICS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/softfloat.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/softfloat.hpp new file mode 100644 index 00000000..485e15c4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/softfloat.hpp @@ -0,0 +1,514 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +// This file is based on files from package issued with the following license: + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3c, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#pragma once +#ifndef softfloat_h +#define softfloat_h 1 + +#include "cvdef.h" + +namespace cv +{ + +/** @addtogroup core_utils_softfloat + + [SoftFloat](http://www.jhauser.us/arithmetic/SoftFloat.html) is a software implementation + of floating-point calculations according to IEEE 754 standard. + All calculations are done in integers, that's why they are machine-independent and bit-exact. + This library can be useful in accuracy-critical parts like look-up tables generation, tests, etc. + OpenCV contains a subset of SoftFloat partially rewritten to C++. + + ### Types + + There are two basic types: @ref softfloat and @ref softdouble. + These types are binary compatible with float and double types respectively + and support conversions to/from them. + Other types from original SoftFloat library like fp16 or fp128 were thrown away + as well as quiet/signaling NaN support, on-the-fly rounding mode switch + and exception flags (though exceptions can be implemented in the future). + + ### Operations + + Both types support the following: + - Construction from signed and unsigned 32-bit and 64 integers, + float/double or raw binary representation + - Conversions between each other, to float or double and to int + using @ref cvRound, @ref cvTrunc, @ref cvFloor, @ref cvCeil or a bunch of + saturate_cast functions + - Add, subtract, multiply, divide, remainder, square root, FMA with absolute precision + - Comparison operations + - Explicit sign, exponent and significand manipulation through get/set methods, + number state indicators (isInf, isNan, isSubnormal) + - Type-specific constants like eps, minimum/maximum value, best pi approximation, etc. + - min(), max(), abs(), exp(), log() and pow() functions + +*/ +//! @{ + +struct softfloat; +struct softdouble; + +struct CV_EXPORTS softfloat +{ +public: + /** @brief Default constructor */ + softfloat() { v = 0; } + /** @brief Copy constructor */ + softfloat( const softfloat& c) { v = c.v; } + /** @brief Assign constructor */ + softfloat& operator=( const softfloat& c ) + { + if(&c != this) v = c.v; + return *this; + } + /** @brief Construct from raw + + Builds new value from raw binary representation + */ + static const softfloat fromRaw( const uint32_t a ) { softfloat x; x.v = a; return x; } + + /** @brief Construct from integer */ + explicit softfloat( const uint32_t ); + explicit softfloat( const uint64_t ); + explicit softfloat( const int32_t ); + explicit softfloat( const int64_t ); + +#ifdef CV_INT32_T_IS_LONG_INT + // for platforms with int32_t = long int + explicit softfloat( const int a ) { *this = softfloat(static_cast(a)); } +#endif + + /** @brief Construct from float */ + explicit softfloat( const float a ) { Cv32suf s; s.f = a; v = s.u; } + + /** @brief Type casts */ + operator softdouble() const; + operator float() const { Cv32suf s; s.u = v; return s.f; } + + /** @brief Basic arithmetics */ + softfloat operator + (const softfloat&) const; + softfloat operator - (const softfloat&) const; + softfloat operator * (const softfloat&) const; + softfloat operator / (const softfloat&) const; + softfloat operator - () const { softfloat x; x.v = v ^ (1U << 31); return x; } + + /** @brief Remainder operator + + A quote from original SoftFloat manual: + + > The IEEE Standard remainder operation computes the value + > a - n * b, where n is the integer closest to a / b. + > If a / b is exactly halfway between two integers, n is the even integer + > closest to a / b. The IEEE Standard’s remainder operation is always exact and so requires no rounding. + > Depending on the relative magnitudes of the operands, the remainder functions + > can take considerably longer to execute than the other SoftFloat functions. + > This is an inherent characteristic of the remainder operation itself and is not a flaw + > in the SoftFloat implementation. + */ + softfloat operator % (const softfloat&) const; + + softfloat& operator += (const softfloat& a) { *this = *this + a; return *this; } + softfloat& operator -= (const softfloat& a) { *this = *this - a; return *this; } + softfloat& operator *= (const softfloat& a) { *this = *this * a; return *this; } + softfloat& operator /= (const softfloat& a) { *this = *this / a; return *this; } + softfloat& operator %= (const softfloat& a) { *this = *this % a; return *this; } + + /** @brief Comparison operations + + - Any operation with NaN produces false + + The only exception is when x is NaN: x != y for any y. + - Positive and negative zeros are equal + */ + bool operator == ( const softfloat& ) const; + bool operator != ( const softfloat& ) const; + bool operator > ( const softfloat& ) const; + bool operator >= ( const softfloat& ) const; + bool operator < ( const softfloat& ) const; + bool operator <= ( const softfloat& ) const; + + /** @brief NaN state indicator */ + inline bool isNaN() const { return (v & 0x7fffffff) > 0x7f800000; } + /** @brief Inf state indicator */ + inline bool isInf() const { return (v & 0x7fffffff) == 0x7f800000; } + /** @brief Subnormal number indicator */ + inline bool isSubnormal() const { return ((v >> 23) & 0xFF) == 0; } + + /** @brief Get sign bit */ + inline bool getSign() const { return (v >> 31) != 0; } + /** @brief Construct a copy with new sign bit */ + inline softfloat setSign(bool sign) const { softfloat x; x.v = (v & ((1U << 31) - 1)) | ((uint32_t)sign << 31); return x; } + /** @brief Get 0-based exponent */ + inline int getExp() const { return ((v >> 23) & 0xFF) - 127; } + /** @brief Construct a copy with new 0-based exponent */ + inline softfloat setExp(int e) const { softfloat x; x.v = (v & 0x807fffff) | (((e + 127) & 0xFF) << 23 ); return x; } + + /** @brief Get a fraction part + + Returns a number 1 <= x < 2 with the same significand + */ + inline softfloat getFrac() const + { + uint_fast32_t vv = (v & 0x007fffff) | (127 << 23); + return softfloat::fromRaw(vv); + } + /** @brief Construct a copy with provided significand + + Constructs a copy of a number with significand taken from parameter + */ + inline softfloat setFrac(const softfloat& s) const + { + softfloat x; + x.v = (v & 0xff800000) | (s.v & 0x007fffff); + return x; + } + + /** @brief Zero constant */ + static softfloat zero() { return softfloat::fromRaw( 0 ); } + /** @brief Positive infinity constant */ + static softfloat inf() { return softfloat::fromRaw( 0xFF << 23 ); } + /** @brief Default NaN constant */ + static softfloat nan() { return softfloat::fromRaw( 0x7fffffff ); } + /** @brief One constant */ + static softfloat one() { return softfloat::fromRaw( 127 << 23 ); } + /** @brief Smallest normalized value */ + static softfloat min() { return softfloat::fromRaw( 0x01 << 23 ); } + /** @brief Difference between 1 and next representable value */ + static softfloat eps() { return softfloat::fromRaw( (127 - 23) << 23 ); } + /** @brief Biggest finite value */ + static softfloat max() { return softfloat::fromRaw( (0xFF << 23) - 1 ); } + /** @brief Correct pi approximation */ + static softfloat pi() { return softfloat::fromRaw( 0x40490fdb ); } + + uint32_t v; +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +struct CV_EXPORTS softdouble +{ +public: + /** @brief Default constructor */ + softdouble() : v(0) { } + /** @brief Copy constructor */ + softdouble( const softdouble& c) { v = c.v; } + /** @brief Assign constructor */ + softdouble& operator=( const softdouble& c ) + { + if(&c != this) v = c.v; + return *this; + } + /** @brief Construct from raw + + Builds new value from raw binary representation + */ + static softdouble fromRaw( const uint64_t a ) { softdouble x; x.v = a; return x; } + + /** @brief Construct from integer */ + explicit softdouble( const uint32_t ); + explicit softdouble( const uint64_t ); + explicit softdouble( const int32_t ); + explicit softdouble( const int64_t ); + +#ifdef CV_INT32_T_IS_LONG_INT + // for platforms with int32_t = long int + explicit softdouble( const int a ) { *this = softdouble(static_cast(a)); } +#endif + + /** @brief Construct from double */ + explicit softdouble( const double a ) { Cv64suf s; s.f = a; v = s.u; } + + /** @brief Type casts */ + operator softfloat() const; + operator double() const { Cv64suf s; s.u = v; return s.f; } + + /** @brief Basic arithmetics */ + softdouble operator + (const softdouble&) const; + softdouble operator - (const softdouble&) const; + softdouble operator * (const softdouble&) const; + softdouble operator / (const softdouble&) const; + softdouble operator - () const { softdouble x; x.v = v ^ (1ULL << 63); return x; } + + /** @brief Remainder operator + + A quote from original SoftFloat manual: + + > The IEEE Standard remainder operation computes the value + > a - n * b, where n is the integer closest to a / b. + > If a / b is exactly halfway between two integers, n is the even integer + > closest to a / b. The IEEE Standard’s remainder operation is always exact and so requires no rounding. + > Depending on the relative magnitudes of the operands, the remainder functions + > can take considerably longer to execute than the other SoftFloat functions. + > This is an inherent characteristic of the remainder operation itself and is not a flaw + > in the SoftFloat implementation. + */ + softdouble operator % (const softdouble&) const; + + softdouble& operator += (const softdouble& a) { *this = *this + a; return *this; } + softdouble& operator -= (const softdouble& a) { *this = *this - a; return *this; } + softdouble& operator *= (const softdouble& a) { *this = *this * a; return *this; } + softdouble& operator /= (const softdouble& a) { *this = *this / a; return *this; } + softdouble& operator %= (const softdouble& a) { *this = *this % a; return *this; } + + /** @brief Comparison operations + + - Any operation with NaN produces false + + The only exception is when x is NaN: x != y for any y. + - Positive and negative zeros are equal + */ + bool operator == ( const softdouble& ) const; + bool operator != ( const softdouble& ) const; + bool operator > ( const softdouble& ) const; + bool operator >= ( const softdouble& ) const; + bool operator < ( const softdouble& ) const; + bool operator <= ( const softdouble& ) const; + + /** @brief NaN state indicator */ + inline bool isNaN() const { return (v & 0x7fffffffffffffff) > 0x7ff0000000000000; } + /** @brief Inf state indicator */ + inline bool isInf() const { return (v & 0x7fffffffffffffff) == 0x7ff0000000000000; } + /** @brief Subnormal number indicator */ + inline bool isSubnormal() const { return ((v >> 52) & 0x7FF) == 0; } + + /** @brief Get sign bit */ + inline bool getSign() const { return (v >> 63) != 0; } + /** @brief Construct a copy with new sign bit */ + softdouble setSign(bool sign) const { softdouble x; x.v = (v & ((1ULL << 63) - 1)) | ((uint_fast64_t)(sign) << 63); return x; } + /** @brief Get 0-based exponent */ + inline int getExp() const { return ((v >> 52) & 0x7FF) - 1023; } + /** @brief Construct a copy with new 0-based exponent */ + inline softdouble setExp(int e) const + { + softdouble x; + x.v = (v & 0x800FFFFFFFFFFFFF) | ((uint_fast64_t)((e + 1023) & 0x7FF) << 52); + return x; + } + + /** @brief Get a fraction part + + Returns a number 1 <= x < 2 with the same significand + */ + inline softdouble getFrac() const + { + uint_fast64_t vv = (v & 0x000FFFFFFFFFFFFF) | ((uint_fast64_t)(1023) << 52); + return softdouble::fromRaw(vv); + } + /** @brief Construct a copy with provided significand + + Constructs a copy of a number with significand taken from parameter + */ + inline softdouble setFrac(const softdouble& s) const + { + softdouble x; + x.v = (v & 0xFFF0000000000000) | (s.v & 0x000FFFFFFFFFFFFF); + return x; + } + + /** @brief Zero constant */ + static softdouble zero() { return softdouble::fromRaw( 0 ); } + /** @brief Positive infinity constant */ + static softdouble inf() { return softdouble::fromRaw( (uint_fast64_t)(0x7FF) << 52 ); } + /** @brief Default NaN constant */ + static softdouble nan() { return softdouble::fromRaw( CV_BIG_INT(0x7FFFFFFFFFFFFFFF) ); } + /** @brief One constant */ + static softdouble one() { return softdouble::fromRaw( (uint_fast64_t)( 1023) << 52 ); } + /** @brief Smallest normalized value */ + static softdouble min() { return softdouble::fromRaw( (uint_fast64_t)( 0x01) << 52 ); } + /** @brief Difference between 1 and next representable value */ + static softdouble eps() { return softdouble::fromRaw( (uint_fast64_t)( 1023 - 52 ) << 52 ); } + /** @brief Biggest finite value */ + static softdouble max() { return softdouble::fromRaw( ((uint_fast64_t)(0x7FF) << 52) - 1 ); } + /** @brief Correct pi approximation */ + static softdouble pi() { return softdouble::fromRaw( CV_BIG_INT(0x400921FB54442D18) ); } + + uint64_t v; +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +/** @brief Fused Multiplication and Addition + +Computes (a*b)+c with single rounding +*/ +CV_EXPORTS softfloat mulAdd( const softfloat& a, const softfloat& b, const softfloat & c); +CV_EXPORTS softdouble mulAdd( const softdouble& a, const softdouble& b, const softdouble& c); + +/** @brief Square root */ +CV_EXPORTS softfloat sqrt( const softfloat& a ); +CV_EXPORTS softdouble sqrt( const softdouble& a ); +} + +/*---------------------------------------------------------------------------- +| Ported from OpenCV and added for usability +*----------------------------------------------------------------------------*/ + +/** @brief Truncates number to integer with minimum magnitude */ +CV_EXPORTS int cvTrunc(const cv::softfloat& a); +CV_EXPORTS int cvTrunc(const cv::softdouble& a); + +/** @brief Rounds a number to nearest even integer */ +CV_EXPORTS int cvRound(const cv::softfloat& a); +CV_EXPORTS int cvRound(const cv::softdouble& a); + +/** @brief Rounds a number to nearest even long long integer */ +CV_EXPORTS int64_t cvRound64(const cv::softdouble& a); + +/** @brief Rounds a number down to integer */ +CV_EXPORTS int cvFloor(const cv::softfloat& a); +CV_EXPORTS int cvFloor(const cv::softdouble& a); + +/** @brief Rounds number up to integer */ +CV_EXPORTS int cvCeil(const cv::softfloat& a); +CV_EXPORTS int cvCeil(const cv::softdouble& a); + +namespace cv +{ +/** @brief Saturate casts */ +template static inline _Tp saturate_cast(softfloat a) { return _Tp(a); } +template static inline _Tp saturate_cast(softdouble a) { return _Tp(a); } + +template<> inline uchar saturate_cast(softfloat a) { return (uchar)std::max(std::min(cvRound(a), (int)UCHAR_MAX), 0); } +template<> inline uchar saturate_cast(softdouble a) { return (uchar)std::max(std::min(cvRound(a), (int)UCHAR_MAX), 0); } + +template<> inline schar saturate_cast(softfloat a) { return (schar)std::min(std::max(cvRound(a), (int)SCHAR_MIN), (int)SCHAR_MAX); } +template<> inline schar saturate_cast(softdouble a) { return (schar)std::min(std::max(cvRound(a), (int)SCHAR_MIN), (int)SCHAR_MAX); } + +template<> inline ushort saturate_cast(softfloat a) { return (ushort)std::max(std::min(cvRound(a), (int)USHRT_MAX), 0); } +template<> inline ushort saturate_cast(softdouble a) { return (ushort)std::max(std::min(cvRound(a), (int)USHRT_MAX), 0); } + +template<> inline short saturate_cast(softfloat a) { return (short)std::min(std::max(cvRound(a), (int)SHRT_MIN), (int)SHRT_MAX); } +template<> inline short saturate_cast(softdouble a) { return (short)std::min(std::max(cvRound(a), (int)SHRT_MIN), (int)SHRT_MAX); } + +template<> inline int saturate_cast(softfloat a) { return cvRound(a); } +template<> inline int saturate_cast(softdouble a) { return cvRound(a); } + +template<> inline int64_t saturate_cast(softfloat a) { return cvRound(a); } +template<> inline int64_t saturate_cast(softdouble a) { return cvRound64(a); } + +/** @brief Saturate cast to unsigned integer and unsigned long long integer +We intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +*/ +template<> inline unsigned saturate_cast(softfloat a) { return cvRound(a); } +template<> inline unsigned saturate_cast(softdouble a) { return cvRound(a); } + +template<> inline uint64_t saturate_cast(softfloat a) { return cvRound(a); } +template<> inline uint64_t saturate_cast(softdouble a) { return cvRound64(a); } + +/** @brief Min and Max functions */ +inline softfloat min(const softfloat& a, const softfloat& b) { return (a > b) ? b : a; } +inline softdouble min(const softdouble& a, const softdouble& b) { return (a > b) ? b : a; } + +inline softfloat max(const softfloat& a, const softfloat& b) { return (a > b) ? a : b; } +inline softdouble max(const softdouble& a, const softdouble& b) { return (a > b) ? a : b; } + +/** @brief Absolute value */ +inline softfloat abs( softfloat a) { softfloat x; x.v = a.v & ((1U << 31) - 1); return x; } +inline softdouble abs( softdouble a) { softdouble x; x.v = a.v & ((1ULL << 63) - 1); return x; } + +/** @brief Exponent + +Special cases: +- exp(NaN) is NaN +- exp(-Inf) == 0 +- exp(+Inf) == +Inf +*/ +CV_EXPORTS softfloat exp( const softfloat& a); +CV_EXPORTS softdouble exp( const softdouble& a); + +/** @brief Natural logarithm + +Special cases: +- log(NaN), log(x < 0) are NaN +- log(0) == -Inf +*/ +CV_EXPORTS softfloat log( const softfloat& a ); +CV_EXPORTS softdouble log( const softdouble& a ); + +/** @brief Raising to the power + +Special cases: +- x**NaN is NaN for any x +- ( |x| == 1 )**Inf is NaN +- ( |x| > 1 )**+Inf or ( |x| < 1 )**-Inf is +Inf +- ( |x| > 1 )**-Inf or ( |x| < 1 )**+Inf is 0 +- x ** 0 == 1 for any x +- x ** 1 == 1 for any x +- NaN ** y is NaN for any other y +- Inf**(y < 0) == 0 +- Inf ** y is +Inf for any other y +- (x < 0)**y is NaN for any other y if x can't be correctly rounded to integer +- 0 ** 0 == 1 +- 0 ** (y < 0) is +Inf +- 0 ** (y > 0) is 0 +*/ +CV_EXPORTS softfloat pow( const softfloat& a, const softfloat& b); +CV_EXPORTS softdouble pow( const softdouble& a, const softdouble& b); + +/** @brief Cube root + +Special cases: +- cbrt(NaN) is NaN +- cbrt(+/-Inf) is +/-Inf +*/ +CV_EXPORTS softfloat cbrt( const softfloat& a ); + +/** @brief Sine + +Special cases: +- sin(Inf) or sin(NaN) is NaN +- sin(x) == x when sin(x) is close to zero +*/ +CV_EXPORTS softdouble sin( const softdouble& a ); + +/** @brief Cosine + * +Special cases: +- cos(Inf) or cos(NaN) is NaN +- cos(x) == +/- 1 when cos(x) is close to +/- 1 +*/ +CV_EXPORTS softdouble cos( const softdouble& a ); + +//! @} core_utils_softfloat + +} // cv:: + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/sse_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/sse_utils.hpp new file mode 100644 index 00000000..0906583e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/sse_utils.hpp @@ -0,0 +1,652 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_SSE_UTILS_HPP +#define OPENCV_CORE_SSE_UTILS_HPP + +#ifndef __cplusplus +# error sse_utils.hpp header must be compiled as C++ +#endif + +#include "opencv2/core/cvdef.h" + +//! @addtogroup core_utils_sse +//! @{ + +#if CV_SSE2 + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk3); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk2); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk3); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk3); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk2); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk2); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk3); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk3); +} + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk5); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk3); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk4); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk5); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk5); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk3); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk3); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk4); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk4); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk5); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk5); +} + +inline void _mm_deinterleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi8(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi8(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi8(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi8(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi8(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi8(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi8(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi8(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi8(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi8(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi8(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi8(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi8(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi8(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi8(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi8(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi8(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi8(layer2_chunk3, layer2_chunk7); + + __m128i layer4_chunk0 = _mm_unpacklo_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk1 = _mm_unpackhi_epi8(layer3_chunk0, layer3_chunk4); + __m128i layer4_chunk2 = _mm_unpacklo_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk3 = _mm_unpackhi_epi8(layer3_chunk1, layer3_chunk5); + __m128i layer4_chunk4 = _mm_unpacklo_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk5 = _mm_unpackhi_epi8(layer3_chunk2, layer3_chunk6); + __m128i layer4_chunk6 = _mm_unpacklo_epi8(layer3_chunk3, layer3_chunk7); + __m128i layer4_chunk7 = _mm_unpackhi_epi8(layer3_chunk3, layer3_chunk7); + + v_r0 = _mm_unpacklo_epi8(layer4_chunk0, layer4_chunk4); + v_r1 = _mm_unpackhi_epi8(layer4_chunk0, layer4_chunk4); + v_g0 = _mm_unpacklo_epi8(layer4_chunk1, layer4_chunk5); + v_g1 = _mm_unpackhi_epi8(layer4_chunk1, layer4_chunk5); + v_b0 = _mm_unpacklo_epi8(layer4_chunk2, layer4_chunk6); + v_b1 = _mm_unpackhi_epi8(layer4_chunk2, layer4_chunk6); + v_a0 = _mm_unpacklo_epi8(layer4_chunk3, layer4_chunk7); + v_a1 = _mm_unpackhi_epi8(layer4_chunk3, layer4_chunk7); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); +} + +inline void _mm_interleave_epi8(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi16(0x00ff); + + __m128i layer4_chunk0 = _mm_packus_epi16(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer4_chunk4 = _mm_packus_epi16(_mm_srli_epi16(v_r0, 8), _mm_srli_epi16(v_r1, 8)); + __m128i layer4_chunk1 = _mm_packus_epi16(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer4_chunk5 = _mm_packus_epi16(_mm_srli_epi16(v_g0, 8), _mm_srli_epi16(v_g1, 8)); + __m128i layer4_chunk2 = _mm_packus_epi16(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer4_chunk6 = _mm_packus_epi16(_mm_srli_epi16(v_b0, 8), _mm_srli_epi16(v_b1, 8)); + __m128i layer4_chunk3 = _mm_packus_epi16(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer4_chunk7 = _mm_packus_epi16(_mm_srli_epi16(v_a0, 8), _mm_srli_epi16(v_a1, 8)); + + __m128i layer3_chunk0 = _mm_packus_epi16(_mm_and_si128(layer4_chunk0, v_mask), _mm_and_si128(layer4_chunk1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk0, 8), _mm_srli_epi16(layer4_chunk1, 8)); + __m128i layer3_chunk1 = _mm_packus_epi16(_mm_and_si128(layer4_chunk2, v_mask), _mm_and_si128(layer4_chunk3, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk2, 8), _mm_srli_epi16(layer4_chunk3, 8)); + __m128i layer3_chunk2 = _mm_packus_epi16(_mm_and_si128(layer4_chunk4, v_mask), _mm_and_si128(layer4_chunk5, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk4, 8), _mm_srli_epi16(layer4_chunk5, 8)); + __m128i layer3_chunk3 = _mm_packus_epi16(_mm_and_si128(layer4_chunk6, v_mask), _mm_and_si128(layer4_chunk7, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer4_chunk6, 8), _mm_srli_epi16(layer4_chunk7, 8)); + + __m128i layer2_chunk0 = _mm_packus_epi16(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk0, 8), _mm_srli_epi16(layer3_chunk1, 8)); + __m128i layer2_chunk1 = _mm_packus_epi16(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk2, 8), _mm_srli_epi16(layer3_chunk3, 8)); + __m128i layer2_chunk2 = _mm_packus_epi16(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk4, 8), _mm_srli_epi16(layer3_chunk5, 8)); + __m128i layer2_chunk3 = _mm_packus_epi16(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer3_chunk6, 8), _mm_srli_epi16(layer3_chunk7, 8)); + + __m128i layer1_chunk0 = _mm_packus_epi16(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk0, 8), _mm_srli_epi16(layer2_chunk1, 8)); + __m128i layer1_chunk1 = _mm_packus_epi16(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk2, 8), _mm_srli_epi16(layer2_chunk3, 8)); + __m128i layer1_chunk2 = _mm_packus_epi16(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk4, 8), _mm_srli_epi16(layer2_chunk5, 8)); + __m128i layer1_chunk3 = _mm_packus_epi16(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi16(_mm_srli_epi16(layer2_chunk6, 8), _mm_srli_epi16(layer2_chunk7, 8)); + + v_r0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk0, 8), _mm_srli_epi16(layer1_chunk1, 8)); + v_r1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk2, 8), _mm_srli_epi16(layer1_chunk3, 8)); + v_g0 = _mm_packus_epi16(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk4, 8), _mm_srli_epi16(layer1_chunk5, 8)); + v_g1 = _mm_packus_epi16(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi16(_mm_srli_epi16(layer1_chunk6, 8), _mm_srli_epi16(layer1_chunk7, 8)); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_g1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_g1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk2); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk3); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk3); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk2); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk3); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk3); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk2); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk2); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk3); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk3); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_g1); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_g1); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b0); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b0); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_b1); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_b1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk3); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk4); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk5); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk5); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk3); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk4); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk5); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk5); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk3); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk3); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk4); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk4); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk5); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk5); +} + +inline void _mm_deinterleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i layer1_chunk0 = _mm_unpacklo_epi16(v_r0, v_b0); + __m128i layer1_chunk1 = _mm_unpackhi_epi16(v_r0, v_b0); + __m128i layer1_chunk2 = _mm_unpacklo_epi16(v_r1, v_b1); + __m128i layer1_chunk3 = _mm_unpackhi_epi16(v_r1, v_b1); + __m128i layer1_chunk4 = _mm_unpacklo_epi16(v_g0, v_a0); + __m128i layer1_chunk5 = _mm_unpackhi_epi16(v_g0, v_a0); + __m128i layer1_chunk6 = _mm_unpacklo_epi16(v_g1, v_a1); + __m128i layer1_chunk7 = _mm_unpackhi_epi16(v_g1, v_a1); + + __m128i layer2_chunk0 = _mm_unpacklo_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk1 = _mm_unpackhi_epi16(layer1_chunk0, layer1_chunk4); + __m128i layer2_chunk2 = _mm_unpacklo_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk3 = _mm_unpackhi_epi16(layer1_chunk1, layer1_chunk5); + __m128i layer2_chunk4 = _mm_unpacklo_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk5 = _mm_unpackhi_epi16(layer1_chunk2, layer1_chunk6); + __m128i layer2_chunk6 = _mm_unpacklo_epi16(layer1_chunk3, layer1_chunk7); + __m128i layer2_chunk7 = _mm_unpackhi_epi16(layer1_chunk3, layer1_chunk7); + + __m128i layer3_chunk0 = _mm_unpacklo_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk1 = _mm_unpackhi_epi16(layer2_chunk0, layer2_chunk4); + __m128i layer3_chunk2 = _mm_unpacklo_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk3 = _mm_unpackhi_epi16(layer2_chunk1, layer2_chunk5); + __m128i layer3_chunk4 = _mm_unpacklo_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk5 = _mm_unpackhi_epi16(layer2_chunk2, layer2_chunk6); + __m128i layer3_chunk6 = _mm_unpacklo_epi16(layer2_chunk3, layer2_chunk7); + __m128i layer3_chunk7 = _mm_unpackhi_epi16(layer2_chunk3, layer2_chunk7); + + v_r0 = _mm_unpacklo_epi16(layer3_chunk0, layer3_chunk4); + v_r1 = _mm_unpackhi_epi16(layer3_chunk0, layer3_chunk4); + v_g0 = _mm_unpacklo_epi16(layer3_chunk1, layer3_chunk5); + v_g1 = _mm_unpackhi_epi16(layer3_chunk1, layer3_chunk5); + v_b0 = _mm_unpacklo_epi16(layer3_chunk2, layer3_chunk6); + v_b1 = _mm_unpackhi_epi16(layer3_chunk2, layer3_chunk6); + v_a0 = _mm_unpacklo_epi16(layer3_chunk3, layer3_chunk7); + v_a1 = _mm_unpackhi_epi16(layer3_chunk3, layer3_chunk7); +} + +#if CV_SSE4_1 + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); +} + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, + __m128i & v_g1, __m128i & v_b0, __m128i & v_b1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_g1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); +} + +inline void _mm_interleave_epi16(__m128i & v_r0, __m128i & v_r1, __m128i & v_g0, __m128i & v_g1, + __m128i & v_b0, __m128i & v_b1, __m128i & v_a0, __m128i & v_a1) +{ + __m128i v_mask = _mm_set1_epi32(0x0000ffff); + + __m128i layer3_chunk0 = _mm_packus_epi32(_mm_and_si128(v_r0, v_mask), _mm_and_si128(v_r1, v_mask)); + __m128i layer3_chunk4 = _mm_packus_epi32(_mm_srli_epi32(v_r0, 16), _mm_srli_epi32(v_r1, 16)); + __m128i layer3_chunk1 = _mm_packus_epi32(_mm_and_si128(v_g0, v_mask), _mm_and_si128(v_g1, v_mask)); + __m128i layer3_chunk5 = _mm_packus_epi32(_mm_srli_epi32(v_g0, 16), _mm_srli_epi32(v_g1, 16)); + __m128i layer3_chunk2 = _mm_packus_epi32(_mm_and_si128(v_b0, v_mask), _mm_and_si128(v_b1, v_mask)); + __m128i layer3_chunk6 = _mm_packus_epi32(_mm_srli_epi32(v_b0, 16), _mm_srli_epi32(v_b1, 16)); + __m128i layer3_chunk3 = _mm_packus_epi32(_mm_and_si128(v_a0, v_mask), _mm_and_si128(v_a1, v_mask)); + __m128i layer3_chunk7 = _mm_packus_epi32(_mm_srli_epi32(v_a0, 16), _mm_srli_epi32(v_a1, 16)); + + __m128i layer2_chunk0 = _mm_packus_epi32(_mm_and_si128(layer3_chunk0, v_mask), _mm_and_si128(layer3_chunk1, v_mask)); + __m128i layer2_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk0, 16), _mm_srli_epi32(layer3_chunk1, 16)); + __m128i layer2_chunk1 = _mm_packus_epi32(_mm_and_si128(layer3_chunk2, v_mask), _mm_and_si128(layer3_chunk3, v_mask)); + __m128i layer2_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk2, 16), _mm_srli_epi32(layer3_chunk3, 16)); + __m128i layer2_chunk2 = _mm_packus_epi32(_mm_and_si128(layer3_chunk4, v_mask), _mm_and_si128(layer3_chunk5, v_mask)); + __m128i layer2_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk4, 16), _mm_srli_epi32(layer3_chunk5, 16)); + __m128i layer2_chunk3 = _mm_packus_epi32(_mm_and_si128(layer3_chunk6, v_mask), _mm_and_si128(layer3_chunk7, v_mask)); + __m128i layer2_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer3_chunk6, 16), _mm_srli_epi32(layer3_chunk7, 16)); + + __m128i layer1_chunk0 = _mm_packus_epi32(_mm_and_si128(layer2_chunk0, v_mask), _mm_and_si128(layer2_chunk1, v_mask)); + __m128i layer1_chunk4 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk0, 16), _mm_srli_epi32(layer2_chunk1, 16)); + __m128i layer1_chunk1 = _mm_packus_epi32(_mm_and_si128(layer2_chunk2, v_mask), _mm_and_si128(layer2_chunk3, v_mask)); + __m128i layer1_chunk5 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk2, 16), _mm_srli_epi32(layer2_chunk3, 16)); + __m128i layer1_chunk2 = _mm_packus_epi32(_mm_and_si128(layer2_chunk4, v_mask), _mm_and_si128(layer2_chunk5, v_mask)); + __m128i layer1_chunk6 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk4, 16), _mm_srli_epi32(layer2_chunk5, 16)); + __m128i layer1_chunk3 = _mm_packus_epi32(_mm_and_si128(layer2_chunk6, v_mask), _mm_and_si128(layer2_chunk7, v_mask)); + __m128i layer1_chunk7 = _mm_packus_epi32(_mm_srli_epi32(layer2_chunk6, 16), _mm_srli_epi32(layer2_chunk7, 16)); + + v_r0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk0, v_mask), _mm_and_si128(layer1_chunk1, v_mask)); + v_b0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk0, 16), _mm_srli_epi32(layer1_chunk1, 16)); + v_r1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk2, v_mask), _mm_and_si128(layer1_chunk3, v_mask)); + v_b1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk2, 16), _mm_srli_epi32(layer1_chunk3, 16)); + v_g0 = _mm_packus_epi32(_mm_and_si128(layer1_chunk4, v_mask), _mm_and_si128(layer1_chunk5, v_mask)); + v_a0 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk4, 16), _mm_srli_epi32(layer1_chunk5, 16)); + v_g1 = _mm_packus_epi32(_mm_and_si128(layer1_chunk6, v_mask), _mm_and_si128(layer1_chunk7, v_mask)); + v_a1 = _mm_packus_epi32(_mm_srli_epi32(layer1_chunk6, 16), _mm_srli_epi32(layer1_chunk7, 16)); +} + +#endif // CV_SSE4_1 + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_g1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_g1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk2); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk3); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk3); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk2); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk2); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk3); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk3); +} + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_g1); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_g1); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b0); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b0); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_b1); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_b1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk3); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk4); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk5); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk5); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk3); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk3); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk4); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk4); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk5); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk5); +} + +inline void _mm_deinterleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + __m128 layer1_chunk0 = _mm_unpacklo_ps(v_r0, v_b0); + __m128 layer1_chunk1 = _mm_unpackhi_ps(v_r0, v_b0); + __m128 layer1_chunk2 = _mm_unpacklo_ps(v_r1, v_b1); + __m128 layer1_chunk3 = _mm_unpackhi_ps(v_r1, v_b1); + __m128 layer1_chunk4 = _mm_unpacklo_ps(v_g0, v_a0); + __m128 layer1_chunk5 = _mm_unpackhi_ps(v_g0, v_a0); + __m128 layer1_chunk6 = _mm_unpacklo_ps(v_g1, v_a1); + __m128 layer1_chunk7 = _mm_unpackhi_ps(v_g1, v_a1); + + __m128 layer2_chunk0 = _mm_unpacklo_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk1 = _mm_unpackhi_ps(layer1_chunk0, layer1_chunk4); + __m128 layer2_chunk2 = _mm_unpacklo_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk3 = _mm_unpackhi_ps(layer1_chunk1, layer1_chunk5); + __m128 layer2_chunk4 = _mm_unpacklo_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk5 = _mm_unpackhi_ps(layer1_chunk2, layer1_chunk6); + __m128 layer2_chunk6 = _mm_unpacklo_ps(layer1_chunk3, layer1_chunk7); + __m128 layer2_chunk7 = _mm_unpackhi_ps(layer1_chunk3, layer1_chunk7); + + v_r0 = _mm_unpacklo_ps(layer2_chunk0, layer2_chunk4); + v_r1 = _mm_unpackhi_ps(layer2_chunk0, layer2_chunk4); + v_g0 = _mm_unpacklo_ps(layer2_chunk1, layer2_chunk5); + v_g1 = _mm_unpackhi_ps(layer2_chunk1, layer2_chunk5); + v_b0 = _mm_unpacklo_ps(layer2_chunk2, layer2_chunk6); + v_b1 = _mm_unpackhi_ps(layer2_chunk2, layer2_chunk6); + v_a0 = _mm_unpacklo_ps(layer2_chunk3, layer2_chunk7); + v_a1 = _mm_unpackhi_ps(layer2_chunk3, layer2_chunk7); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1) +{ + enum { mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1) }; + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, + __m128 & v_g1, __m128 & v_b0, __m128 & v_b1) +{ + enum { mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1) }; + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_g1 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); +} + +inline void _mm_interleave_ps(__m128 & v_r0, __m128 & v_r1, __m128 & v_g0, __m128 & v_g1, + __m128 & v_b0, __m128 & v_b1, __m128 & v_a0, __m128 & v_a1) +{ + enum { mask_lo = _MM_SHUFFLE(2, 0, 2, 0), mask_hi = _MM_SHUFFLE(3, 1, 3, 1) }; + + __m128 layer2_chunk0 = _mm_shuffle_ps(v_r0, v_r1, mask_lo); + __m128 layer2_chunk4 = _mm_shuffle_ps(v_r0, v_r1, mask_hi); + __m128 layer2_chunk1 = _mm_shuffle_ps(v_g0, v_g1, mask_lo); + __m128 layer2_chunk5 = _mm_shuffle_ps(v_g0, v_g1, mask_hi); + __m128 layer2_chunk2 = _mm_shuffle_ps(v_b0, v_b1, mask_lo); + __m128 layer2_chunk6 = _mm_shuffle_ps(v_b0, v_b1, mask_hi); + __m128 layer2_chunk3 = _mm_shuffle_ps(v_a0, v_a1, mask_lo); + __m128 layer2_chunk7 = _mm_shuffle_ps(v_a0, v_a1, mask_hi); + + __m128 layer1_chunk0 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_lo); + __m128 layer1_chunk4 = _mm_shuffle_ps(layer2_chunk0, layer2_chunk1, mask_hi); + __m128 layer1_chunk1 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_lo); + __m128 layer1_chunk5 = _mm_shuffle_ps(layer2_chunk2, layer2_chunk3, mask_hi); + __m128 layer1_chunk2 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_lo); + __m128 layer1_chunk6 = _mm_shuffle_ps(layer2_chunk4, layer2_chunk5, mask_hi); + __m128 layer1_chunk3 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_lo); + __m128 layer1_chunk7 = _mm_shuffle_ps(layer2_chunk6, layer2_chunk7, mask_hi); + + v_r0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_lo); + v_b0 = _mm_shuffle_ps(layer1_chunk0, layer1_chunk1, mask_hi); + v_r1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_lo); + v_b1 = _mm_shuffle_ps(layer1_chunk2, layer1_chunk3, mask_hi); + v_g0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_lo); + v_a0 = _mm_shuffle_ps(layer1_chunk4, layer1_chunk5, mask_hi); + v_g1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_lo); + v_a1 = _mm_shuffle_ps(layer1_chunk6, layer1_chunk7, mask_hi); +} + +#endif // CV_SSE2 + +//! @} + +#endif //OPENCV_CORE_SSE_UTILS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/traits.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/traits.hpp new file mode 100644 index 00000000..6cb10f44 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/traits.hpp @@ -0,0 +1,397 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_TRAITS_HPP +#define OPENCV_CORE_TRAITS_HPP + +#include "opencv2/core/cvdef.h" + +namespace cv +{ + +//#define OPENCV_TRAITS_ENABLE_DEPRECATED + +//! @addtogroup core_basic +//! @{ + +/** @brief Template "trait" class for OpenCV primitive data types. + +@note Deprecated. This is replaced by "single purpose" traits: traits::Type and traits::Depth + +A primitive OpenCV data type is one of unsigned char, bool, signed char, unsigned short, signed +short, int, float, double, or a tuple of values of one of these types, where all the values in the +tuple have the same type. Any primitive type from the list can be defined by an identifier in the +form CV_\{U|S|F}C(\), for example: uchar \~ CV_8UC1, 3-element +floating-point tuple \~ CV_32FC3, and so on. A universal OpenCV structure that is able to store a +single instance of such a primitive data type is Vec. Multiple instances of such a type can be +stored in a std::vector, Mat, Mat_, SparseMat, SparseMat_, or any other container that is able to +store Vec instances. + +The DataType class is basically used to provide a description of such primitive data types without +adding any fields or methods to the corresponding classes (and it is actually impossible to add +anything to primitive C/C++ data types). This technique is known in C++ as class traits. It is not +DataType itself that is used but its specialized versions, such as: +@code + template<> class DataType + { + typedef uchar value_type; + typedef int work_type; + typedef uchar channel_type; + enum { channel_type = CV_8U, channels = 1, fmt='u', type = CV_8U }; + }; + ... + template DataType > + { + typedef std::complex<_Tp> value_type; + typedef std::complex<_Tp> work_type; + typedef _Tp channel_type; + // DataDepth is another helper trait class + enum { depth = DataDepth<_Tp>::value, channels=2, + fmt=(channels-1)*256+DataDepth<_Tp>::fmt, + type=CV_MAKETYPE(depth, channels) }; + }; + ... +@endcode +The main purpose of this class is to convert compilation-time type information to an +OpenCV-compatible data type identifier, for example: +@code + // allocates a 30x40 floating-point matrix + Mat A(30, 40, DataType::type); + + Mat B = Mat_ >(3, 3); + // the statement below will print 6, 2 , that is depth == CV_64F, channels == 2 + cout << B.depth() << ", " << B.channels() << endl; +@endcode +So, such traits are used to tell OpenCV which data type you are working with, even if such a type is +not native to OpenCV. For example, the matrix B initialization above is compiled because OpenCV +defines the proper specialized template class DataType\ \> . This mechanism is also +useful (and used in OpenCV this way) for generic algorithms implementations. + +@note Default values were dropped to stop confusing developers about using of unsupported types (see #7599) +*/ +template class DataType +{ +public: +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 1, + depth = -1, + channels = 1, + fmt = 0, + type = CV_MAKETYPE(depth, channels) + }; +#endif +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_8U, + channels = 1, + fmt = (int)'u', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_8U, + channels = 1, + fmt = (int)'u', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_8S, + channels = 1, + fmt = (int)'c', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_8S, + channels = 1, + fmt = (int)'c', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_16U, + channels = 1, + fmt = (int)'w', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_16S, + channels = 1, + fmt = (int)'s', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_32S, + channels = 1, + fmt = (int)'i', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_32F, + channels = 1, + fmt = (int)'f', + type = CV_MAKETYPE(depth, channels) + }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, + depth = CV_64F, + channels = 1, + fmt = (int)'d', + type = CV_MAKETYPE(depth, channels) + }; +}; + + +/** @brief A helper class for cv::DataType + +The class is specialized for each fundamental numerical data type supported by OpenCV. It provides +DataDepth::value constant. +*/ +template class DataDepth +{ +public: + enum + { + value = DataType<_Tp>::depth, + fmt = DataType<_Tp>::fmt + }; +}; + + +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + +template class TypeDepth +{ +#ifdef OPENCV_TRAITS_ENABLE_LEGACY_DEFAULTS + enum { depth = CV_USRTYPE1 }; + typedef void value_type; +#endif +}; + +template<> class TypeDepth +{ + enum { depth = CV_8U }; + typedef uchar value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_8S }; + typedef schar value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_16U }; + typedef ushort value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_16S }; + typedef short value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_32S }; + typedef int value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_32F }; + typedef float value_type; +}; + +template<> class TypeDepth +{ + enum { depth = CV_64F }; + typedef double value_type; +}; + +#endif + +//! @} + +namespace traits { + +namespace internal { +#define CV_CREATE_MEMBER_CHECK(X) \ +template class CheckMember_##X { \ + struct Fallback { int X; }; \ + struct Derived : T, Fallback { }; \ + template struct Check; \ + typedef char CV_NO[1]; \ + typedef char CV_YES[2]; \ + template static CV_NO & func(Check *); \ + template static CV_YES & func(...); \ +public: \ + typedef CheckMember_##X type; \ + enum { value = sizeof(func(0)) == sizeof(CV_YES) }; \ +}; + +CV_CREATE_MEMBER_CHECK(fmt) +CV_CREATE_MEMBER_CHECK(type) + +} // namespace internal + + +template +struct Depth +{ enum { value = DataType::depth }; }; + +template +struct Type +{ enum { value = DataType::type }; }; + +/** Similar to traits::Type but has value = -1 in case of unknown type (instead of compiler error) */ +template >::value > +struct SafeType {}; + +template +struct SafeType +{ enum { value = -1 }; }; + +template +struct SafeType +{ enum { value = Type::value }; }; + + +template >::value > +struct SafeFmt {}; + +template +struct SafeFmt +{ enum { fmt = 0 }; }; + +template +struct SafeFmt +{ enum { fmt = DataType::fmt }; }; + + +} // namespace + +} // cv + +#endif // OPENCV_CORE_TRAITS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/types.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/types.hpp new file mode 100644 index 00000000..0b1d9481 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/types.hpp @@ -0,0 +1,2391 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_TYPES_HPP +#define OPENCV_CORE_TYPES_HPP + +#ifndef __cplusplus +# error types.hpp header must be compiled as C++ +#endif + +#include +#include +#include +#include + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/cvstd.hpp" +#include "opencv2/core/matx.hpp" + +namespace cv +{ + +//! @addtogroup core_basic +//! @{ + +//////////////////////////////// Complex ////////////////////////////// + +/** @brief A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class Complex +{ +public: + + //! default constructor + Complex(); + Complex( _Tp _re, _Tp _im = 0 ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + + _Tp re, im; //< the real and the imaginary parts +}; + +typedef Complex Complexf; +typedef Complex Complexd; + +template class DataType< Complex<_Tp> > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 2, + fmt = DataType::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Complex<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Complex<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 2) }; }; +} // namespace + + +//////////////////////////////// Point_ //////////////////////////////// + +/** @brief Template class for 2D points specified by its coordinates `x` and `y`. + +An instance of the class is interchangeable with C structures, CvPoint and CvPoint2D32f . There is +also a cast operator to convert point coordinates to the specified type. The conversion from +floating-point coordinates to integer coordinates is done by rounding. Commonly, the conversion +uses this operation for each of the coordinates. Besides the class members listed in the +declaration above, the following operations on points are implemented: +@code + pt1 = pt2 + pt3; + pt1 = pt2 - pt3; + pt1 = pt2 * a; + pt1 = a * pt2; + pt1 = pt2 / a; + pt1 += pt2; + pt1 -= pt2; + pt1 *= a; + pt1 /= a; + double value = norm(pt); // L2 norm + pt1 == pt2; + pt1 != pt2; +@endcode +For your convenience, the following type aliases are defined: +@code + typedef Point_ Point2i; + typedef Point2i Point; + typedef Point_ Point2f; + typedef Point_ Point2d; +@endcode +Example: +@code + Point2f a(0.3f, 0.f), b(0.f, 0.4f); + Point pt = (a + b)*10.f; + cout << pt.x << ", " << pt.y << endl; +@endcode +*/ +template class Point_ +{ +public: + typedef _Tp value_type; + + //! default constructor + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! cross-product + double cross(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + _Tp x; //!< x coordinate of the point + _Tp y; //!< y coordinate of the point +}; + +typedef Point_ Point2i; +typedef Point_ Point2l; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Point2i Point; + +template class DataType< Point_<_Tp> > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 2, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Point_<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Point_<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 2) }; }; +} // namespace + + +//////////////////////////////// Point3_ //////////////////////////////// + +/** @brief Template class for 3D points specified by its coordinates `x`, `y` and `z`. + +An instance of the class is interchangeable with the C structure CvPoint2D32f . Similarly to +Point_ , the coordinates of 3D points can be converted to another type. The vector arithmetic and +comparison operations are also supported. + +The following Point3_\<\> aliases are available: +@code + typedef Point3_ Point3i; + typedef Point3_ Point3f; + typedef Point3_ Point3d; +@endcode +@see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class Point3_ +{ +public: + typedef _Tp value_type; + + //! default constructor + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to cv::Vec<> +#if OPENCV_ABI_COMPATIBILITY > 300 + template operator Vec<_Tp2, 3>() const; +#else + operator Vec<_Tp, 3>() const; +#endif + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + _Tp x; //!< x coordinate of the 3D point + _Tp y; //!< y coordinate of the 3D point + _Tp z; //!< z coordinate of the 3D point +}; + +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + +template class DataType< Point3_<_Tp> > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 3, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Point3_<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Point3_<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 3) }; }; +} // namespace + +//////////////////////////////// Size_ //////////////////////////////// + +/** @brief Template class for specifying the size of an image or rectangle. + +The class includes two members called width and height. The structure can be converted to and from +the old OpenCV structures CvSize and CvSize2D32f . The same set of arithmetic and comparison +operations as for Point_ is available. + +OpenCV defines the following Size_\<\> aliases: +@code + typedef Size_ Size2i; + typedef Size2i Size; + typedef Size_ Size2f; +@endcode +*/ +template class Size_ +{ +public: + typedef _Tp value_type; + + //! default constructor + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + //! true if empty + bool empty() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + _Tp width; //!< the width + _Tp height; //!< the height +}; + +typedef Size_ Size2i; +typedef Size_ Size2l; +typedef Size_ Size2f; +typedef Size_ Size2d; +typedef Size2i Size; + +template class DataType< Size_<_Tp> > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 2, + fmt = DataType::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Size_<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Size_<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 2) }; }; +} // namespace + +//////////////////////////////// Rect_ //////////////////////////////// + +/** @brief Template class for 2D rectangles + +described by the following parameters: +- Coordinates of the top-left corner. This is a default interpretation of Rect_::x and Rect_::y + in OpenCV. Though, in your algorithms you may count x and y from the bottom-left corner. +- Rectangle width and height. + +OpenCV typically assumes that the top and left boundary of the rectangle are inclusive, while the +right and bottom boundaries are not. For example, the method Rect_::contains returns true if + +\f[x \leq pt.x < x+width, + y \leq pt.y < y+height\f] + +Virtually every loop over an image ROI in OpenCV (where ROI is specified by Rect_\ ) is +implemented as: +@code + for(int y = roi.y; y < roi.y + roi.height; y++) + for(int x = roi.x; x < roi.x + roi.width; x++) + { + // ... + } +@endcode +In addition to the class members, the following operations on rectangles are implemented: +- \f$\texttt{rect} = \texttt{rect} \pm \texttt{point}\f$ (shifting a rectangle by a certain offset) +- \f$\texttt{rect} = \texttt{rect} \pm \texttt{size}\f$ (expanding or shrinking a rectangle by a + certain amount) +- rect += point, rect -= point, rect += size, rect -= size (augmenting operations) +- rect = rect1 & rect2 (rectangle intersection) +- rect = rect1 | rect2 (minimum area rectangle containing rect1 and rect2 ) +- rect &= rect1, rect |= rect1 (and the corresponding augmenting operations) +- rect == rect1, rect != rect1 (rectangle comparison) + +This is an example how the partial ordering on rectangles can be established (rect1 \f$\subseteq\f$ +rect2): +@code + template inline bool + operator <= (const Rect_<_Tp>& r1, const Rect_<_Tp>& r2) + { + return (r1 & r2) == r1; + } +@endcode +For your convenience, the Rect_\<\> alias is available: cv::Rect +*/ +template class Rect_ +{ +public: + typedef _Tp value_type; + + //! default constructor + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + //! true if empty + bool empty() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x; //!< x coordinate of the top-left corner + _Tp y; //!< y coordinate of the top-left corner + _Tp width; //!< width of the rectangle + _Tp height; //!< height of the rectangle +}; + +typedef Rect_ Rect2i; +typedef Rect_ Rect2f; +typedef Rect_ Rect2d; +typedef Rect2i Rect; + +template class DataType< Rect_<_Tp> > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 4, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Rect_<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Rect_<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 4) }; }; +} // namespace + +///////////////////////////// RotatedRect ///////////////////////////// + +/** @brief The class represents rotated (i.e. not up-right) rectangles on a plane. + +Each rectangle is specified by the center point (mass center), length of each side (represented by +#Size2f structure) and the rotation angle in degrees. + +The sample below demonstrates how to use RotatedRect: +@snippet snippets/core_various.cpp RotatedRect_demo +![image](pics/rotatedrect.png) + +@sa CamShift, fitEllipse, minAreaRect, CvBox2D +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! default constructor + RotatedRect(); + /** full constructor + @param center The rectangle mass center. + @param size Width and height of the rectangle. + @param angle The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., + the rectangle becomes an up-right rectangle. + */ + RotatedRect(const Point2f& center, const Size2f& size, float angle); + /** + Any 3 end points of the RotatedRect. They must be given in order (either clockwise or + anticlockwise). + */ + RotatedRect(const Point2f& point1, const Point2f& point2, const Point2f& point3); + + /** returns 4 vertices of the rectangle + @param pts The points array for storing rectangle vertices. The order is bottomLeft, topLeft, topRight, bottomRight. + */ + void points(Point2f pts[]) const; + //! returns the minimal up-right integer rectangle containing the rotated rectangle + Rect boundingRect() const; + //! returns the minimal (exact) floating point rectangle containing the rotated rectangle, not intended for use with images + Rect_ boundingRect2f() const; + //! returns the rectangle mass center + Point2f center; + //! returns width and height of the rectangle + Size2f size; + //! returns the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. + float angle; +}; + +template<> class DataType< RotatedRect > +{ +public: + typedef RotatedRect value_type; + typedef value_type work_type; + typedef float channel_type; + + enum { generic_type = 0, + channels = (int)sizeof(value_type)/sizeof(channel_type), // 5 + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template<> +struct Depth< RotatedRect > { enum { value = Depth::value }; }; +template<> +struct Type< RotatedRect > { enum { value = CV_MAKETYPE(Depth::value, (int)sizeof(RotatedRect)/sizeof(float)) }; }; +} // namespace + + +//////////////////////////////// Range ///////////////////////////////// + +/** @brief Template class specifying a continuous subsequence (slice) of a sequence. + +The class is used to specify a row or a column span in a matrix ( Mat ) and for many other purposes. +Range(a,b) is basically the same as a:b in Matlab or a..b in Python. As in Python, start is an +inclusive left boundary of the range and end is an exclusive right boundary of the range. Such a +half-opened interval is usually denoted as \f$[start,end)\f$ . + +The static method Range::all() returns a special variable that means "the whole sequence" or "the +whole range", just like " : " in Matlab or " ... " in Python. All the methods and functions in +OpenCV that take Range support this special Range::all() value. But, of course, in case of your own +custom processing, you will probably have to check and handle it explicitly: +@code + void my_function(..., const Range& r, ....) + { + if(r == Range::all()) { + // process all the data + } + else { + // process [r.start, r.end) + } + } +@endcode +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + int size() const; + bool empty() const; + static Range all(); + + int start, end; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + + enum { generic_type = 0, + channels = 2, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template<> +struct Depth< Range > { enum { value = Depth::value }; }; +template<> +struct Type< Range > { enum { value = CV_MAKETYPE(Depth::value, 2) }; }; +} // namespace + + +//////////////////////////////// Scalar_ /////////////////////////////// + +/** @brief Template class for a 4-element vector derived from Vec. + +Being derived from Vec\<_Tp, 4\> , Scalar\_ and Scalar can be used just as typical 4-element +vectors. In addition, they can be converted to/from CvScalar . The type Scalar is widely used in +OpenCV to pass pixel values. +*/ +template class Scalar_ : public Vec<_Tp, 4> +{ +public: + //! default constructor + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(_Tp v0); + + template + Scalar_(const Vec<_Tp2, cn>& v); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& a, double scale=1 ) const; + + //! returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + //! returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +template class DataType< Scalar_<_Tp> > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + + enum { generic_type = 0, + channels = 4, + fmt = traits::SafeFmt::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template +struct Depth< Scalar_<_Tp> > { enum { value = Depth<_Tp>::value }; }; +template +struct Type< Scalar_<_Tp> > { enum { value = CV_MAKETYPE(Depth<_Tp>::value, 4) }; }; +} // namespace + + +/////////////////////////////// KeyPoint //////////////////////////////// + +/** @brief Data structure for salient point detectors. + +The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint +detectors, such as Harris corner detector, #FAST, %StarDetector, %SURF, %SIFT etc. + +The keypoint is characterized by the 2D position, scale (proportional to the diameter of the +neighborhood that needs to be taken into account), orientation and some other parameters. The +keypoint neighborhood is then analyzed by another algorithm that builds a descriptor (usually +represented as a feature vector). The keypoints representing the same object in different images +can then be matched using %KDTree or another method. +*/ +class CV_EXPORTS_W_SIMPLE KeyPoint +{ +public: + //! the default constructor + CV_WRAP KeyPoint(); + /** + @param pt x & y coordinates of the keypoint + @param size keypoint diameter + @param angle keypoint orientation + @param response keypoint detector response on the keypoint (that is, strength of the keypoint) + @param octave pyramid octave in which the keypoint has been detected + @param class_id object id + */ + KeyPoint(Point2f pt, float size, float angle=-1, float response=0, int octave=0, int class_id=-1); + /** + @param x x-coordinate of the keypoint + @param y y-coordinate of the keypoint + @param size keypoint diameter + @param angle keypoint orientation + @param response keypoint detector response on the keypoint (that is, strength of the keypoint) + @param octave pyramid octave in which the keypoint has been detected + @param class_id object id + */ + CV_WRAP KeyPoint(float x, float y, float size, float angle=-1, float response=0, int octave=0, int class_id=-1); + + size_t hash() const; + + /** + This method converts vector of keypoints to vector of points or the reverse, where each keypoint is + assigned the same size and the same orientation. + + @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB + @param points2f Array of (x,y) coordinates of each keypoint + @param keypointIndexes Array of indexes of keypoints to be converted to points. (Acts like a mask to + convert only specified keypoints) + */ + CV_WRAP static void convert(const std::vector& keypoints, + CV_OUT std::vector& points2f, + const std::vector& keypointIndexes=std::vector()); + /** @overload + @param points2f Array of (x,y) coordinates of each keypoint + @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB + @param size keypoint diameter + @param response keypoint detector response on the keypoint (that is, strength of the keypoint) + @param octave pyramid octave in which the keypoint has been detected + @param class_id object id + */ + CV_WRAP static void convert(const std::vector& points2f, + CV_OUT std::vector& keypoints, + float size=1, float response=1, int octave=0, int class_id=-1); + + /** + This method computes overlap for pair of keypoints. Overlap is the ratio between area of keypoint + regions' intersection and area of keypoint regions' union (considering keypoint region as circle). + If they don't overlap, we get zero. If they coincide at same location with same size, we get 1. + @param kp1 First keypoint + @param kp2 Second keypoint + */ + CV_WRAP static float overlap(const KeyPoint& kp1, const KeyPoint& kp2); + + CV_PROP_RW Point2f pt; //!< coordinates of the keypoints + CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood + CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable); + //!< it's in [0,360) degrees and measured relative to + //!< image coordinate system, ie in clockwise. + CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling + CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted + CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to) +}; + +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED +template<> class DataType +{ +public: + typedef KeyPoint value_type; + typedef float work_type; + typedef float channel_type; + + enum { generic_type = 0, + depth = DataType::depth, + channels = (int)(sizeof(value_type)/sizeof(channel_type)), // 7 + fmt = DataType::fmt + ((channels - 1) << 8), + type = CV_MAKETYPE(depth, channels) + }; + + typedef Vec vec_type; +}; +#endif + + +//////////////////////////////// DMatch ///////////////////////////////// + +/** @brief Class for matching keypoint descriptors + +query descriptor index, train descriptor index, train image index, and distance between +descriptors. +*/ +class CV_EXPORTS_W_SIMPLE DMatch +{ +public: + CV_WRAP DMatch(); + CV_WRAP DMatch(int _queryIdx, int _trainIdx, float _distance); + CV_WRAP DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance); + + CV_PROP_RW int queryIdx; //!< query descriptor index + CV_PROP_RW int trainIdx; //!< train descriptor index + CV_PROP_RW int imgIdx; //!< train image index + + CV_PROP_RW float distance; + + // less is better + bool operator<(const DMatch &m) const; +}; + +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED +template<> class DataType +{ +public: + typedef DMatch value_type; + typedef int work_type; + typedef int channel_type; + + enum { generic_type = 0, + depth = DataType::depth, + channels = (int)(sizeof(value_type)/sizeof(channel_type)), // 4 + fmt = DataType::fmt + ((channels - 1) << 8), + type = CV_MAKETYPE(depth, channels) + }; + + typedef Vec vec_type; +}; +#endif + + +///////////////////////////// TermCriteria ////////////////////////////// + +/** @brief The class defining termination criteria for iterative algorithms. + +You can initialize it by default constructor and then override any parameters, or the structure may +be fully initialized using the advanced variant of the constructor. +*/ +class CV_EXPORTS TermCriteria +{ +public: + /** + Criteria type, can be one of: COUNT, EPS or COUNT + EPS + */ + enum Type + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + /** + @param type The type of termination criteria, one of TermCriteria::Type + @param maxCount The maximum number of iterations or elements to compute. + @param epsilon The desired accuracy or change in parameters at which the iterative algorithm stops. + */ + TermCriteria(int type, int maxCount, double epsilon); + + inline bool isValid() const + { + const bool isCount = (type & COUNT) && maxCount > 0; + const bool isEps = (type & EPS) && !cvIsNaN(epsilon); + return isCount || isEps; + } + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; //!< the maximum number of iterations/elements + double epsilon; //!< the desired accuracy +}; + + +//! @} core_basic + +///////////////////////// raster image moments ////////////////////////// + +//! @addtogroup imgproc_shape +//! @{ + +/** @brief struct returned by cv::moments + +The spatial moments \f$\texttt{Moments::m}_{ji}\f$ are computed as: + +\f[\texttt{m} _{ji}= \sum _{x,y} \left ( \texttt{array} (x,y) \cdot x^j \cdot y^i \right )\f] + +The central moments \f$\texttt{Moments::mu}_{ji}\f$ are computed as: + +\f[\texttt{mu} _{ji}= \sum _{x,y} \left ( \texttt{array} (x,y) \cdot (x - \bar{x} )^j \cdot (y - \bar{y} )^i \right )\f] + +where \f$(\bar{x}, \bar{y})\f$ is the mass center: + +\f[\bar{x} = \frac{\texttt{m}_{10}}{\texttt{m}_{00}} , \; \bar{y} = \frac{\texttt{m}_{01}}{\texttt{m}_{00}}\f] + +The normalized central moments \f$\texttt{Moments::nu}_{ij}\f$ are computed as: + +\f[\texttt{nu} _{ji}= \frac{\texttt{mu}_{ji}}{\texttt{m}_{00}^{(i+j)/2+1}} .\f] + +@note +\f$\texttt{mu}_{00}=\texttt{m}_{00}\f$, \f$\texttt{nu}_{00}=1\f$ +\f$\texttt{nu}_{10}=\texttt{mu}_{10}=\texttt{mu}_{01}=\texttt{mu}_{10}=0\f$ , hence the values are not +stored. + +The moments of a contour are defined in the same way but computed using the Green's formula (see +). So, due to a limited raster resolution, the moments +computed for a contour are slightly different from the moments computed for the same rasterized +contour. + +@note +Since the contour moments are computed using Green formula, you may get seemingly odd results for +contours with self-intersections, e.g. a zero area (m00) for butterfly-shaped contours. + */ +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + ////! the conversion from CvMoments + //Moments( const CvMoments& moments ); + ////! the conversion to CvMoments + //operator CvMoments() const; + + //! @name spatial moments + //! @{ + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! @} + + //! @name central moments + //! @{ + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! @} + + //! @name central normalized moments + //! @{ + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; + //! @} +}; + +template<> class DataType +{ +public: + typedef Moments value_type; + typedef double work_type; + typedef double channel_type; + + enum { generic_type = 0, + channels = (int)(sizeof(value_type)/sizeof(channel_type)), // 24 + fmt = DataType::fmt + ((channels - 1) << 8) +#ifdef OPENCV_TRAITS_ENABLE_DEPRECATED + ,depth = DataType::depth + ,type = CV_MAKETYPE(depth, channels) +#endif + }; + + typedef Vec vec_type; +}; + +namespace traits { +template<> +struct Depth< Moments > { enum { value = Depth::value }; }; +template<> +struct Type< Moments > { enum { value = CV_MAKETYPE(Depth::value, (int)(sizeof(Moments)/sizeof(double))) }; }; +} // namespace + +//! @} imgproc_shape + +//! @cond IGNORED + +///////////////////////////////////////////////////////////////////////// +///////////////////////////// Implementation //////////////////////////// +///////////////////////////////////////////////////////////////////////// + +//////////////////////////////// Complex //////////////////////////////// + +template inline +Complex<_Tp>::Complex() + : re(0), im(0) {} + +template inline +Complex<_Tp>::Complex( _Tp _re, _Tp _im ) + : re(_re), im(_im) {} + +template template inline +Complex<_Tp>::operator Complex() const +{ + return Complex(saturate_cast(re), saturate_cast(im)); +} + +template inline +Complex<_Tp> Complex<_Tp>::conj() const +{ + return Complex<_Tp>(re, -im); +} + + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return a.re == b.re && a.im == b.im; +} + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return a.re != b.re || a.im != b.im; +} + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return Complex<_Tp>( a.re + b.re, a.im + b.im ); +} + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + a.re += b.re; a.im += b.im; + return a; +} + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return Complex<_Tp>( a.re - b.re, a.im - b.im ); +} + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + a.re -= b.re; a.im -= b.im; + return a; +} + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ + return Complex<_Tp>(-a.re, -a.im); +} + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); +} + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ + return Complex<_Tp>( a.re*b, a.im*b ); +} + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>( a.re*b, a.im*b ); +} + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ + return Complex<_Tp>( a.re + b, a.im ); +} + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>( a.re + b, a.im ); +} + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>( b - a.re, -a.im ); +} + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ + a.re += b; return a; +} + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ + a.re -= b; return a; +} + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ + a.re *= b; a.im *= b; return a; +} + +template static inline +double abs(const Complex<_Tp>& a) +{ + return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + a = a / b; + return a; +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + + + +//////////////////////////////// 2D Point /////////////////////////////// + +template inline +Point_<_Tp>::Point_() + : x(0), y(0) {} + +template inline +Point_<_Tp>::Point_(_Tp _x, _Tp _y) + : x(_x), y(_y) {} + +template inline +Point_<_Tp>::Point_(const Point_& pt) + : x(pt.x), y(pt.y) {} + +template inline +Point_<_Tp>::Point_(const Size_<_Tp>& sz) + : x(sz.width), y(sz.height) {} + +template inline +Point_<_Tp>::Point_(const Vec<_Tp,2>& v) + : x(v[0]), y(v[1]) {} + +template inline +Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ + x = pt.x; y = pt.y; + return *this; +} + +template template inline +Point_<_Tp>::operator Point_<_Tp2>() const +{ + return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); +} + +template inline +Point_<_Tp>::operator Vec<_Tp, 2>() const +{ + return Vec<_Tp, 2>(x, y); +} + +template inline +_Tp Point_<_Tp>::dot(const Point_& pt) const +{ + return saturate_cast<_Tp>(x*pt.x + y*pt.y); +} + +template inline +double Point_<_Tp>::ddot(const Point_& pt) const +{ + return (double)x*(double)(pt.x) + (double)y*(double)(pt.y); +} + +template inline +double Point_<_Tp>::cross(const Point_& pt) const +{ + return (double)x*pt.y - (double)y*pt.x; +} + +template inline bool +Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + + +template static inline +Point_<_Tp>& operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x += b.x; + a.y += b.y; + return a; +} + +template static inline +Point_<_Tp>& operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x -= b.x; + a.y -= b.y; + return a; +} + +template static inline +Point_<_Tp>& operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + return a; +} + +template static inline +Point_<_Tp>& operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + return a; +} + +template static inline +Point_<_Tp>& operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + return a; +} + +template static inline +Point_<_Tp>& operator /= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + return a; +} + +template static inline +Point_<_Tp>& operator /= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + return a; +} + +template static inline +Point_<_Tp>& operator /= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + return a; +} + +template static inline +double norm(const Point_<_Tp>& pt) +{ + return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); +} + +template static inline +bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y; +} + +template static inline +bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y; +} + +template static inline +Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); +} + +template static inline +Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); +} + +template static inline +Point_<_Tp> operator - (const Point_<_Tp>& a) +{ + return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); +} + +template static inline +Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); +} + +template static inline +Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); +} + +template static inline +Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); +} + +template static inline +Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); +} + +template static inline +Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); +} + +template static inline +Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ + return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); +} + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 2, 1> tmp = a * Vec<_Tp,2>(b.x, b.y); + return Point_<_Tp>(tmp.val[0], tmp.val[1]); +} + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a * Vec<_Tp,3>(b.x, b.y, 1); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + +template static inline +Point_<_Tp> operator / (const Point_<_Tp>& a, int b) +{ + Point_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + +template static inline +Point_<_Tp> operator / (const Point_<_Tp>& a, float b) +{ + Point_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + +template static inline +Point_<_Tp> operator / (const Point_<_Tp>& a, double b) +{ + Point_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + + +template static inline _AccTp normL2Sqr(const Point_& pt); +template static inline _AccTp normL2Sqr(const Point_& pt); +template static inline _AccTp normL2Sqr(const Point_& pt); +template static inline _AccTp normL2Sqr(const Point_& pt); + +template<> inline int normL2Sqr(const Point_& pt) { return pt.dot(pt); } +template<> inline int64 normL2Sqr(const Point_& pt) { return pt.dot(pt); } +template<> inline float normL2Sqr(const Point_& pt) { return pt.dot(pt); } +template<> inline double normL2Sqr(const Point_& pt) { return pt.dot(pt); } + +template<> inline double normL2Sqr(const Point_& pt) { return pt.ddot(pt); } +template<> inline double normL2Sqr(const Point_& pt) { return pt.ddot(pt); } + + + +//////////////////////////////// 3D Point /////////////////////////////// + +template inline +Point3_<_Tp>::Point3_() + : x(0), y(0), z(0) {} + +template inline +Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) + : x(_x), y(_y), z(_z) {} + +template inline +Point3_<_Tp>::Point3_(const Point3_& pt) + : x(pt.x), y(pt.y), z(pt.z) {} + +template inline +Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) + : x(pt.x), y(pt.y), z(_Tp()) {} + +template inline +Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) + : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline +Point3_<_Tp>::operator Point3_<_Tp2>() const +{ + return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); +} + +#if OPENCV_ABI_COMPATIBILITY > 300 +template template inline +Point3_<_Tp>::operator Vec<_Tp2, 3>() const +{ + return Vec<_Tp2, 3>(x, y, z); +} +#else +template inline +Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ + return Vec<_Tp, 3>(x, y, z); +} +#endif + +template inline +Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ + x = pt.x; y = pt.y; z = pt.z; + return *this; +} + +template inline +_Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ + return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); +} + +template inline +double Point3_<_Tp>::ddot(const Point3_& pt) const +{ + return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; +} + +template inline +Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + + +template static inline +Point3_<_Tp>& operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x += b.x; + a.y += b.y; + a.z += b.z; + return a; +} + +template static inline +Point3_<_Tp>& operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x -= b.x; + a.y -= b.y; + a.z -= b.z; + return a; +} + +template static inline +Point3_<_Tp>& operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + a.z = saturate_cast<_Tp>(a.z * b); + return a; +} + +template static inline +Point3_<_Tp>& operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + a.z = saturate_cast<_Tp>(a.z * b); + return a; +} + +template static inline +Point3_<_Tp>& operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x * b); + a.y = saturate_cast<_Tp>(a.y * b); + a.z = saturate_cast<_Tp>(a.z * b); + return a; +} + +template static inline +Point3_<_Tp>& operator /= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + a.z = saturate_cast<_Tp>(a.z / b); + return a; +} + +template static inline +Point3_<_Tp>& operator /= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + a.z = saturate_cast<_Tp>(a.z / b); + return a; +} + +template static inline +Point3_<_Tp>& operator /= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x / b); + a.y = saturate_cast<_Tp>(a.y / b); + a.z = saturate_cast<_Tp>(a.z / b); + return a; +} + +template static inline +double norm(const Point3_<_Tp>& pt) +{ + return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); +} + +template static inline +bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.z == b.z; +} + +template static inline +bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.z != b.z; +} + +template static inline +Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y), saturate_cast<_Tp>(a.z + b.z)); +} + +template static inline +Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y), saturate_cast<_Tp>(a.z - b.z)); +} + +template static inline +Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y), saturate_cast<_Tp>(-a.z) ); +} + +template static inline +Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b), saturate_cast<_Tp>(a.z*b) ); +} + +template static inline +Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) ); +} + +template static inline +Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(a.x * b), saturate_cast<_Tp>(a.y * b), saturate_cast<_Tp>(a.z * b) ); +} + +template static inline +Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) ); +} + +template static inline +Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(a.x * b), saturate_cast<_Tp>(a.y * b), saturate_cast<_Tp>(a.z * b) ); +} + +template static inline +Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>( saturate_cast<_Tp>(b.x * a), saturate_cast<_Tp>(b.y * a), saturate_cast<_Tp>(b.z * a) ); +} + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a * Vec<_Tp,3>(b.x, b.y, b.z); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a * Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + +template static inline +Point3_<_Tp> operator / (const Point3_<_Tp>& a, int b) +{ + Point3_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + +template static inline +Point3_<_Tp> operator / (const Point3_<_Tp>& a, float b) +{ + Point3_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + +template static inline +Point3_<_Tp> operator / (const Point3_<_Tp>& a, double b) +{ + Point3_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + + + +////////////////////////////////// Size ///////////////////////////////// + +template inline +Size_<_Tp>::Size_() + : width(0), height(0) {} + +template inline +Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} + +template inline +Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} + +template inline +Size_<_Tp>::Size_(const Point_<_Tp>& pt) + : width(pt.x), height(pt.y) {} + +template template inline +Size_<_Tp>::operator Size_<_Tp2>() const +{ + return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); +} + +template inline +Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ + width = sz.width; height = sz.height; + return *this; +} + +template inline +_Tp Size_<_Tp>::area() const +{ + const _Tp result = width * height; + CV_DbgAssert(!std::numeric_limits<_Tp>::is_integer + || width == 0 || result / width == height); // make sure the result fits in the return value + return result; +} + +template inline +bool Size_<_Tp>::empty() const +{ + return width <= 0 || height <= 0; +} + + +template static inline +Size_<_Tp>& operator *= (Size_<_Tp>& a, _Tp b) +{ + a.width *= b; + a.height *= b; + return a; +} + +template static inline +Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ + Size_<_Tp> tmp(a); + tmp *= b; + return tmp; +} + +template static inline +Size_<_Tp>& operator /= (Size_<_Tp>& a, _Tp b) +{ + a.width /= b; + a.height /= b; + return a; +} + +template static inline +Size_<_Tp> operator / (const Size_<_Tp>& a, _Tp b) +{ + Size_<_Tp> tmp(a); + tmp /= b; + return tmp; +} + +template static inline +Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ + a.width += b.width; + a.height += b.height; + return a; +} + +template static inline +Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ + Size_<_Tp> tmp(a); + tmp += b; + return tmp; +} + +template static inline +Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ + a.width -= b.width; + a.height -= b.height; + return a; +} + +template static inline +Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ + Size_<_Tp> tmp(a); + tmp -= b; + return tmp; +} + +template static inline +bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ + return a.width == b.width && a.height == b.height; +} + +template static inline +bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ + return !(a == b); +} + + + +////////////////////////////////// Rect ///////////////////////////////// + +template inline +Rect_<_Tp>::Rect_() + : x(0), y(0), width(0), height(0) {} + +template inline +Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) + : x(_x), y(_y), width(_width), height(_height) {} + +template inline +Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) + : x(r.x), y(r.y), width(r.width), height(r.height) {} + +template inline +Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) + : x(org.x), y(org.y), width(sz.width), height(sz.height) {} + +template inline +Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); + y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; + height = std::max(pt1.y, pt2.y) - y; +} + +template inline +Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ + x = r.x; + y = r.y; + width = r.width; + height = r.height; + return *this; +} + +template inline +Point_<_Tp> Rect_<_Tp>::tl() const +{ + return Point_<_Tp>(x,y); +} + +template inline +Point_<_Tp> Rect_<_Tp>::br() const +{ + return Point_<_Tp>(x + width, y + height); +} + +template inline +Size_<_Tp> Rect_<_Tp>::size() const +{ + return Size_<_Tp>(width, height); +} + +template inline +_Tp Rect_<_Tp>::area() const +{ + const _Tp result = width * height; + CV_DbgAssert(!std::numeric_limits<_Tp>::is_integer + || width == 0 || result / width == height); // make sure the result fits in the return value + return result; +} + +template inline +bool Rect_<_Tp>::empty() const +{ + return width <= 0 || height <= 0; +} + +template template inline +Rect_<_Tp>::operator Rect_<_Tp2>() const +{ + return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); +} + +template inline +bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ + return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; +} + + +template static inline +Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ + a.x += b.x; + a.y += b.y; + return a; +} + +template static inline +Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ + a.x -= b.x; + a.y -= b.y; + return a; +} + +template static inline +Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ + a.width += b.width; + a.height += b.height; + return a; +} + +template static inline +Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ + const _Tp width = a.width - b.width; + const _Tp height = a.height - b.height; + CV_DbgAssert(width >= 0 && height >= 0); + a.width = width; + a.height = height; + return a; +} + +template static inline +Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x); + _Tp y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; + a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline +Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + if (a.empty()) { + a = b; + } + else if (!b.empty()) { + _Tp x1 = std::min(a.x, b.x); + _Tp y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; + a.y = y1; + } + return a; +} + +template static inline +bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline +bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline +Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline +Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline +Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline +Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + const _Tp width = a.width - b.width; + const _Tp height = a.height - b.height; + CV_DbgAssert(width >= 0 && height >= 0); + return Rect_<_Tp>( a.x, a.y, width, height ); +} + +template static inline +Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline +Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +/** + * @brief measure dissimilarity between two sample sets + * + * computes the complement of the Jaccard Index as described in . + * For rectangles this reduces to computing the intersection over the union. + */ +template static inline +double jaccardDistance(const Rect_<_Tp>& a, const Rect_<_Tp>& b) { + _Tp Aa = a.area(); + _Tp Ab = b.area(); + + if ((Aa + Ab) <= std::numeric_limits<_Tp>::epsilon()) { + // jaccard_index = 1 -> distance = 0 + return 0.0; + } + + double Aab = (a & b).area(); + // distance = 1 - jaccard_index + return 1.0 - Aab / (Aa + Ab - Aab); +} + +////////////////////////////// RotatedRect ////////////////////////////// + +inline +RotatedRect::RotatedRect() + : center(), size(), angle(0) {} + +inline +RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} + + + +///////////////////////////////// Range ///////////////////////////////// + +inline +Range::Range() + : start(0), end(0) {} + +inline +Range::Range(int _start, int _end) + : start(_start), end(_end) {} + +inline +int Range::size() const +{ + return end - start; +} + +inline +bool Range::empty() const +{ + return start == end; +} + +inline +Range Range::all() +{ + return Range(INT_MIN, INT_MAX); +} + + +static inline +bool operator == (const Range& r1, const Range& r2) +{ + return r1.start == r2.start && r1.end == r2.end; +} + +static inline +bool operator != (const Range& r1, const Range& r2) +{ + return !(r1 == r2); +} + +static inline +bool operator !(const Range& r) +{ + return r.start == r.end; +} + +static inline +Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline +Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline +Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline +Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline +Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + + + +///////////////////////////////// Scalar //////////////////////////////// + +template inline +Scalar_<_Tp>::Scalar_() +{ + this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; +} + +template inline +Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + this->val[0] = v0; + this->val[1] = v1; + this->val[2] = v2; + this->val[3] = v3; +} + +template template inline +Scalar_<_Tp>::Scalar_(const Vec<_Tp2, cn>& v) +{ + int i; + for( i = 0; i < (cn < 4 ? cn : 4); i++ ) + this->val[i] = cv::saturate_cast<_Tp>(v.val[i]); + for( ; i < 4; i++ ) + this->val[i] = 0; +} + +template inline +Scalar_<_Tp>::Scalar_(_Tp v0) +{ + this->val[0] = v0; + this->val[1] = this->val[2] = this->val[3] = 0; +} + +template inline +Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ + return Scalar_<_Tp>(v0, v0, v0, v0); +} + + +template inline +Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& a, double scale ) const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0] * a.val[0] * scale), + saturate_cast<_Tp>(this->val[1] * a.val[1] * scale), + saturate_cast<_Tp>(this->val[2] * a.val[2] * scale), + saturate_cast<_Tp>(this->val[3] * a.val[3] * scale)); +} + +template inline +Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>( this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline +bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + + +template template inline +Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + + +template static inline +Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] += b.val[0]; + a.val[1] += b.val[1]; + a.val[2] += b.val[2]; + a.val[3] += b.val[3]; + return a; +} + +template static inline +Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] -= b.val[0]; + a.val[1] -= b.val[1]; + a.val[2] -= b.val[2]; + a.val[3] -= b.val[3]; + return a; +} + +template static inline +Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] *= v; + a.val[1] *= v; + a.val[2] *= v; + a.val[3] *= v; + return a; +} + +template static inline +bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline +bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline +Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(a.val[0] + b.val[0], + a.val[1] + b.val[1], + a.val[2] + b.val[2], + a.val[3] + b.val[3]); +} + +template static inline +Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline +Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(a.val[0] * alpha, + a.val[1] * alpha, + a.val[2] * alpha, + a.val[3] * alpha); +} + +template static inline +Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline +Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), + saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), + saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline +Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0])); +} + +template static inline +Scalar_<_Tp>& operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a * b; + return a; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(a.val[0] / alpha, + a.val[1] / alpha, + a.val[2] / alpha, + a.val[3] / alpha); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1 / alpha; + return Scalar_(a.val[0] * s, a.val[1] * s, a.val[2] * s, a.val[3] * s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1 / alpha; + return Scalar_(a.val[0] * s, a.val[1] * s, a.val[2] * s, a.val[3] * s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a / alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a / (b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj() * s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a * ((_Tp)1 / b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a / b; + return a; +} + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + Matx c((Matx)a, b, Matx_MatMulOp()); + return reinterpret_cast(c); +} + +template<> inline +Scalar operator * (const Matx& a, const Scalar& b) +{ + Matx c(a, b, Matx_MatMulOp()); + return reinterpret_cast(c); +} + + + +//////////////////////////////// KeyPoint /////////////////////////////// + +inline +KeyPoint::KeyPoint() + : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {} + +inline +KeyPoint::KeyPoint(Point2f _pt, float _size, float _angle, float _response, int _octave, int _class_id) + : pt(_pt), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {} + +inline +KeyPoint::KeyPoint(float x, float y, float _size, float _angle, float _response, int _octave, int _class_id) + : pt(x, y), size(_size), angle(_angle), response(_response), octave(_octave), class_id(_class_id) {} + + + +///////////////////////////////// DMatch //////////////////////////////// + +inline +DMatch::DMatch() + : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {} + +inline +DMatch::DMatch(int _queryIdx, int _trainIdx, float _distance) + : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {} + +inline +DMatch::DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance) + : queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {} + +inline +bool DMatch::operator < (const DMatch &m) const +{ + return distance < m.distance; +} + + + +////////////////////////////// TermCriteria ///////////////////////////// + +inline +TermCriteria::TermCriteria() + : type(0), maxCount(0), epsilon(0) {} + +inline +TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} + +//! @endcond + +} // cv + +#endif //OPENCV_CORE_TYPES_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/types_c.h b/third_party/opencv/uos/sw_64/include/opencv2/core/types_c.h new file mode 100644 index 00000000..fda0f058 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/types_c.h @@ -0,0 +1,2145 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_TYPES_H +#define OPENCV_CORE_TYPES_H + +#ifdef CV__ENABLE_C_API_CTORS // invalid C API ctors (must be removed) +#if defined(_WIN32) && !defined(CV__SKIP_MESSAGE_MALFORMED_C_API_CTORS) +#error "C API ctors don't work on Win32: https://github.com/opencv/opencv/issues/15990" +#endif +#endif + +//#define CV__VALIDATE_UNUNITIALIZED_VARS 1 // C++11 & GCC only + +#ifdef __cplusplus + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#define CV_STRUCT_INITIALIZER {0,} +#else +#if defined(__GNUC__) && __GNUC__ == 4 // GCC 4.x warns on "= {}" initialization, fixed in GCC 5.0 +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif +#define CV_STRUCT_INITIALIZER {} +#endif + +#else +#define CV_STRUCT_INITIALIZER {0} +#endif + + +#ifdef HAVE_IPL +# ifndef __IPL_H__ +# if defined _WIN32 +# include +# else +# include +# endif +# endif +#elif defined __IPL_H__ +# define HAVE_IPL +#endif + +#include "opencv2/core/cvdef.h" + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +#if defined _WIN32 +# define CV_CDECL __cdecl +# define CV_STDCALL __stdcall +#else +# define CV_CDECL +# define CV_STDCALL +#endif + +#ifndef CV_DEFAULT +# ifdef __cplusplus +# define CV_DEFAULT(val) = val +# else +# define CV_DEFAULT(val) +# endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR +# ifdef __cplusplus +# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } +# else +# define CV_EXTERN_C_FUNCPTR(x) typedef x +# endif +#endif + +#ifndef CVAPI +# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#ifndef CV_IMPL +# define CV_IMPL CV_EXTERN_C +#endif + +#ifdef __cplusplus +# include "opencv2/core.hpp" +#endif + +/** @addtogroup core_c + @{ +*/ + +/** @brief This is the "metatype" used *only* as a function parameter. + +It denotes that the function accepts arrays of multiple types, such as IplImage*, CvMat* or even +CvSeq* sometimes. The particular array type is determined at runtime by analyzing the first 4 +bytes of the header. In C++ interface the role of CvArr is played by InputArray and OutputArray. + */ +typedef void CvArr; + +typedef int CVStatus; + +/** @see cv::Error::Code */ +enum { + CV_StsOk= 0, /**< everything is ok */ + CV_StsBackTrace= -1, /**< pseudo error for back trace */ + CV_StsError= -2, /**< unknown /unspecified error */ + CV_StsInternal= -3, /**< internal error (bad state) */ + CV_StsNoMem= -4, /**< insufficient memory */ + CV_StsBadArg= -5, /**< function arg/param is bad */ + CV_StsBadFunc= -6, /**< unsupported function */ + CV_StsNoConv= -7, /**< iter. didn't converge */ + CV_StsAutoTrace= -8, /**< tracing */ + CV_HeaderIsNull= -9, /**< image header is NULL */ + CV_BadImageSize= -10, /**< image size is invalid */ + CV_BadOffset= -11, /**< offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**< image step is wrong, this may happen for a non-continuous matrix */ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**< bad number of channels, for example, some functions accept only single channel matrices */ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**< input image depth is not supported by the function */ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**< number of dimensions is out of range */ + CV_BadOrigin= -20, /**< incorrect input origin */ + CV_BadAlign= -21, /**< incorrect input align */ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**< input COI is not supported */ + CV_BadROISize= -25, /**< incorrect input roi */ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /**< null pointer */ + CV_StsVecLengthErr= -28, /**< incorrect vector length */ + CV_StsFilterStructContentErr= -29, /**< incorrect filter structure content */ + CV_StsKernelStructContentErr= -30, /**< incorrect transform kernel content */ + CV_StsFilterOffsetErr= -31, /**< incorrect filter offset value */ + CV_StsBadSize= -201, /**< the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /**< division by zero */ + CV_StsInplaceNotSupported= -203, /**< in-place operation is not supported */ + CV_StsObjectNotFound= -204, /**< request can't be completed */ + CV_StsUnmatchedFormats= -205, /**< formats of input/output arrays differ */ + CV_StsBadFlag= -206, /**< flag is wrong or not supported */ + CV_StsBadPoint= -207, /**< bad CvPoint */ + CV_StsBadMask= -208, /**< bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /**< sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /**< the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /**< some of parameters are out of range */ + CV_StsParseError= -212, /**< invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /**< the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /**< an allocated block has been corrupted */ + CV_StsAssert= -215, /**< assertion failed */ + CV_GpuNotSupported= -216, /**< no CUDA support */ + CV_GpuApiCallError= -217, /**< GPU API call error */ + CV_OpenGlNotSupported= -218, /**< no OpenGL support */ + CV_OpenGlApiCallError= -219, /**< OpenGL API call error */ + CV_OpenCLApiCallError= -220, /**< OpenCL API call error */ + CV_OpenCLDoubleNotSupported= -221, + CV_OpenCLInitError= -222, /**< OpenCL initialization error */ + CV_OpenCLNoAMDBlasFft= -223 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +/** min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/** absolute value without jumps */ +#ifndef __cplusplus +# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +# define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +/** @brief Initializes a random number generator state. + +The function initializes a random number generator and returns the state. The pointer to the state +can be then passed to the cvRandInt, cvRandReal and cvRandArr functions. In the current +implementation a multiply-with-carry generator is used. +@param seed 64-bit value used to initiate a random sequence +@sa the C++ class RNG replaced CvRNG. + */ +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/** @brief Returns a 32-bit unsigned integer and updates RNG. + +The function returns a uniformly-distributed random 32-bit unsigned integer and updates the RNG +state. It is similar to the rand() function from the C runtime library, except that OpenCV functions +always generates a 32-bit random number, regardless of the platform. +@param rng CvRNG state initialized by cvRNG. + */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/** @brief Returns a floating-point random number and updates RNG. + +The function returns a uniformly-distributed random floating-point number between 0 and 1 (1 is not +included). +@param rng RNG state initialized by cvRNG + */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +#ifdef __cplusplus +typedef struct _IplImage IplImage; +CV_EXPORTS _IplImage cvIplImage(const cv::Mat& m); +#endif + +/** The IplImage is taken from the Intel Image Processing Library, in which the format is native. OpenCV +only supports a subset of possible IplImage formats, as outlined in the parameter list above. + +In addition to the above restrictions, OpenCV handles ROIs differently. OpenCV functions require +that the image size or ROI size of all source and destination images match exactly. On the other +hand, the Intel Image Processing Library processes the area of intersection between the source and +destination images (or ROIs), allowing them to vary independently. +*/ +typedef struct +_IplImage +{ + int nSize; /**< sizeof(IplImage) */ + int ID; /**< version (=0)*/ + int nChannels; /**< Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /**< Ignored by OpenCV */ + int depth; /**< Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /**< Ignored by OpenCV */ + char channelSeq[4]; /**< ditto */ + int dataOrder; /**< 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /**< 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /**< Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /**< Image width in pixels. */ + int height; /**< Image height in pixels. */ + struct _IplROI *roi; /**< Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /**< Must be NULL. */ + void *imageId; /**< " " */ + struct _IplTileInfo *tileInfo; /**< " " */ + int imageSize; /**< Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /**< Pointer to aligned image data. */ + int widthStep; /**< Size of aligned image row in bytes. */ + int BorderMode[4]; /**< Ignored by OpenCV. */ + int BorderConst[4]; /**< Ditto. */ + char *imageDataOrigin; /**< Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ + +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + _IplImage() + { + memset(this, 0, sizeof(*this)); // valid for POD structure + nSize = sizeof(IplImage); + } + _IplImage(const cv::Mat& m) { *this = cvIplImage(m); } +#endif +} +IplImage; + +CV_INLINE IplImage cvIplImage() +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + IplImage self = CV_STRUCT_INITIALIZER; self.nSize = sizeof(IplImage); return self; +#else + return _IplImage(); +#endif +} + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /**< 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/** extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/** for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/** get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +#ifdef __cplusplus +typedef struct CvMat CvMat; +CV_INLINE CvMat cvMat(const cv::Mat& m); +#endif + +/** Matrix elements are stored row by row. Element (i, j) (i - 0-based row index, j - 0-based column +index) of a matrix can be retrieved or modified using CV_MAT_ELEM macro: + + uchar pixval = CV_MAT_ELEM(grayimg, uchar, i, j) + CV_MAT_ELEM(cameraMatrix, float, 0, 2) = image.width*0.5f; + +To access multiple-channel matrices, you can use +CV_MAT_ELEM(matrix, type, i, j\*nchannels + channel_idx). + +@deprecated CvMat is now obsolete; consider using Mat instead. + */ +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvMat() {} + CvMat(const cv::Mat& m) { *this = cvMat(m); } +#endif +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/** Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + +#ifdef __cplusplus + +CV_INLINE CvMat cvMat(const cv::Mat& m) +{ + CvMat self; + CV_DbgAssert(m.dims <= 2); + self = cvMat(m.rows, m.dims == 1 ? 1 : m.cols, m.type(), m.data); + self.step = (int)m.step[0]; + self.type = (self.type & ~cv::Mat::CONTINUOUS_FLAG) | (m.flags & cv::Mat::CONTINUOUS_FLAG); + return self; +} +CV_INLINE CvMat cvMat() +{ +#if !defined(CV__ENABLE_C_API_CTORS) + CvMat self = CV_STRUCT_INITIALIZER; return self; +#else + return CvMat(); +#endif +} +CV_INLINE CvMat cvMat(const CvMat& m) +{ +#if !defined(CV__ENABLE_C_API_CTORS) + CvMat self = CV_STRUCT_INITIALIZER; memcpy(&self, &m, sizeof(self)); return self; +#else + return CvMat(m); +#endif +} + +#endif // __cplusplus + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + +/** @brief Returns the particular element of single-channel floating-point matrix. + +The function is a fast replacement for cvGetReal2D in the case of single-channel floating-point +matrices. It is faster because it is inline, it does fewer checks for array type and array element +type, and it checks for the row and column ranges only in debug mode. +@param mat Input matrix +@param row The zero-based index of row +@param col The zero-based index of column + */ +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + +/** @brief Sets a specific element of a single-channel floating-point matrix. + +The function is a fast replacement for cvSetReal2D in the case of single-channel floating-point +matrices. It is faster because it is inline, it does fewer checks for array type and array element +type, and it checks for the row and column ranges only in debug mode. +@param mat The matrix +@param row The zero-based index of row +@param col The zero-based index of column +@param value The new value of the matrix element + */ +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 + +#ifdef __cplusplus +typedef struct CvMatND CvMatND; +CV_EXPORTS CvMatND cvMatND(const cv::Mat& m); +#endif + +/** + @deprecated consider using cv::Mat instead + */ +typedef struct +CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; + +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvMatND() {} + CvMatND(const cv::Mat& m) { *this = cvMatND(m); } +#endif +} +CvMatND; + + +CV_INLINE CvMatND cvMatND() +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvMatND self = CV_STRUCT_INITIALIZER; return self; +#else + return CvMatND(); +#endif +} + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; + +#ifdef __cplusplus + CV_EXPORTS void copyToSparseMat(cv::SparseMat& m) const; +#endif +} +CvSparseMat; + +#ifdef __cplusplus +CV_EXPORTS CvSparseMat* cvCreateSparseMat(const cv::SparseMat& m); +#endif + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/** indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/** should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /**< For uniform histograms. */ + float** thresh2; /**< For non-uniform histograms. */ + CvMatND mat; /**< Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ +/** @sa Rect_ */ +typedef struct CvRect +{ + int x; + int y; + int width; + int height; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvRect() __attribute__(( warning("Non-initialized variable") )) {}; + template CvRect(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 4); + x = y = width = height = 0; + if (list.size() == 4) + { + x = list.begin()[0]; y = list.begin()[1]; width = list.begin()[2]; height = list.begin()[3]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvRect(int _x = 0, int _y = 0, int w = 0, int h = 0): x(_x), y(_y), width(w), height(h) {} + template + CvRect(const cv::Rect_<_Tp>& r): x(cv::saturate_cast(r.x)), y(cv::saturate_cast(r.y)), width(cv::saturate_cast(r.width)), height(cv::saturate_cast(r.height)) {} +#endif +#ifdef __cplusplus + template + operator cv::Rect_<_Tp>() const { return cv::Rect_<_Tp>((_Tp)x, (_Tp)y, (_Tp)width, (_Tp)height); } +#endif +} +CvRect; + +/** constructs CvRect structure. */ +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvRect r = {x, y, width, height}; +#else + CvRect r(x, y , width, height); +#endif + return r; +} +#ifdef __cplusplus +CV_INLINE CvRect cvRect(const cv::Rect& rc) { return cvRect(rc.x, rc.y, rc.width, rc.height); } +#endif + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +/** @sa TermCriteria + */ +typedef struct CvTermCriteria +{ + int type; /**< may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvTermCriteria(int _type = 0, int _iter = 0, double _eps = 0) : type(_type), max_iter(_iter), epsilon(_eps) {} + CvTermCriteria(const cv::TermCriteria& t) : type(t.type), max_iter(t.maxCount), epsilon(t.epsilon) {} +#endif +#ifdef __cplusplus + operator cv::TermCriteria() const { return cv::TermCriteria(type, max_iter, epsilon); } +#endif +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvTermCriteria t = { type, max_iter, (float)epsilon}; +#else + CvTermCriteria t(type, max_iter, epsilon); +#endif + return t; +} +#ifdef __cplusplus +CV_INLINE CvTermCriteria cvTermCriteria(const cv::TermCriteria& t) { return cvTermCriteria(t.type, t.maxCount, t.epsilon); } +#endif + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvPoint() __attribute__(( warning("Non-initialized variable") )) {} + template CvPoint(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + x = y = 0; + if (list.size() == 2) + { + x = list.begin()[0]; y = list.begin()[1]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvPoint(int _x = 0, int _y = 0): x(_x), y(_y) {} + template + CvPoint(const cv::Point_<_Tp>& pt): x((int)pt.x), y((int)pt.y) {} +#endif +#ifdef __cplusplus + template + operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); } +#endif +} +CvPoint; + +/** constructs CvPoint structure. */ +CV_INLINE CvPoint cvPoint( int x, int y ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint p = {x, y}; +#else + CvPoint p(x, y); +#endif + return p; +} +#ifdef __cplusplus +CV_INLINE CvPoint cvPoint(const cv::Point& pt) { return cvPoint(pt.x, pt.y); } +#endif + +typedef struct CvPoint2D32f +{ + float x; + float y; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvPoint2D32f() __attribute__(( warning("Non-initialized variable") )) {} + template CvPoint2D32f(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + x = y = 0; + if (list.size() == 2) + { + x = list.begin()[0]; y = list.begin()[1]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvPoint2D32f(float _x = 0, float _y = 0): x(_x), y(_y) {} + template + CvPoint2D32f(const cv::Point_<_Tp>& pt): x((float)pt.x), y((float)pt.y) {} +#endif +#ifdef __cplusplus + template + operator cv::Point_<_Tp>() const { return cv::Point_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y)); } +#endif +} +CvPoint2D32f; + +/** constructs CvPoint2D32f structure. */ +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint2D32f p = { (float)x, (float)y }; +#else + CvPoint2D32f p((float)x, (float)y); +#endif + return p; +} + +#ifdef __cplusplus +template +CvPoint2D32f cvPoint2D32f(const cv::Point_<_Tp>& pt) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint2D32f p = { (float)pt.x, (float)pt.y }; +#else + CvPoint2D32f p((float)pt.x, (float)pt.y); +#endif + return p; +} +#endif + +/** converts CvPoint to CvPoint2D32f. */ +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + +/** converts CvPoint2D32f to CvPoint. */ +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint ipt = { cvRound(point.x), cvRound(point.y) }; +#else + CvPoint ipt(cvRound(point.x), cvRound(point.y)); +#endif + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvPoint3D32f() __attribute__(( warning("Non-initialized variable") )) {} + template CvPoint3D32f(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 3); + x = y = z = 0; + if (list.size() == 3) + { + x = list.begin()[0]; y = list.begin()[1]; z = list.begin()[2]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvPoint3D32f(float _x = 0, float _y = 0, float _z = 0): x(_x), y(_y), z(_z) {} + template + CvPoint3D32f(const cv::Point3_<_Tp>& pt): x((float)pt.x), y((float)pt.y), z((float)pt.z) {} +#endif +#ifdef __cplusplus + template + operator cv::Point3_<_Tp>() const { return cv::Point3_<_Tp>(cv::saturate_cast<_Tp>(x), cv::saturate_cast<_Tp>(y), cv::saturate_cast<_Tp>(z)); } +#endif +} +CvPoint3D32f; + +/** constructs CvPoint3D32f structure. */ +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint3D32f p = { (float)x, (float)y, (float)z }; +#else + CvPoint3D32f p((float)x, (float)y, (float)z); +#endif + return p; +} + +#ifdef __cplusplus +template +CvPoint3D32f cvPoint3D32f(const cv::Point3_<_Tp>& pt) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvPoint3D32f p = { (float)pt.x, (float)pt.y, (float)pt.z }; +#else + CvPoint3D32f p((float)pt.x, (float)pt.y, (float)pt.z); +#endif + return p; +} +#endif + + +typedef struct CvPoint2D64f +{ + double x; + double y; +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvPoint2D64f() __attribute__(( warning("Non-initialized variable") )) {} + template CvPoint2D64f(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + x = y = 0; + if (list.size() == 2) + { + x = list.begin()[0]; y = list.begin()[1]; + } + }; +#endif +} +CvPoint2D64f; + +/** constructs CvPoint2D64f structure.*/ +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p = { x, y }; + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvPoint3D64f() __attribute__(( warning("Non-initialized variable") )) {} + template CvPoint3D64f(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 3); + x = y = z = 0; + if (list.size() == 3) + { + x = list.begin()[0]; y = list.begin()[1]; z = list.begin()[2]; + } + }; +#endif +} +CvPoint3D64f; + +/** constructs CvPoint3D64f structure. */ +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p = { x, y, z }; + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct CvSize +{ + int width; + int height; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvSize() __attribute__(( warning("Non-initialized variable") )) {} + template CvSize(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + width = 0; height = 0; + if (list.size() == 2) + { + width = list.begin()[0]; height = list.begin()[1]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvSize(int w = 0, int h = 0): width(w), height(h) {} + template + CvSize(const cv::Size_<_Tp>& sz): width(cv::saturate_cast(sz.width)), height(cv::saturate_cast(sz.height)) {} +#endif +#ifdef __cplusplus + template + operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); } +#endif +} +CvSize; + +/** constructs CvSize structure. */ +CV_INLINE CvSize cvSize( int width, int height ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvSize s = { width, height }; +#else + CvSize s(width, height); +#endif + return s; +} + +#ifdef __cplusplus +CV_INLINE CvSize cvSize(const cv::Size& sz) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvSize s = { sz.width, sz.height }; +#else + CvSize s(sz.width, sz.height); +#endif + return s; +} +#endif + +typedef struct CvSize2D32f +{ + float width; + float height; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvSize2D32f() __attribute__(( warning("Non-initialized variable") )) {} + template CvSize2D32f(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + width = 0; height = 0; + if (list.size() == 2) + { + width = list.begin()[0]; height = list.begin()[1]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvSize2D32f(float w = 0, float h = 0): width(w), height(h) {} + template + CvSize2D32f(const cv::Size_<_Tp>& sz): width(cv::saturate_cast(sz.width)), height(cv::saturate_cast(sz.height)) {} +#endif +#ifdef __cplusplus + template + operator cv::Size_<_Tp>() const { return cv::Size_<_Tp>(cv::saturate_cast<_Tp>(width), cv::saturate_cast<_Tp>(height)); } +#endif +} +CvSize2D32f; + +/** constructs CvSize2D32f structure. */ +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvSize2D32f s = { (float)width, (float)height }; +#else + CvSize2D32f s((float)width, (float)height); +#endif + return s; +} +#ifdef __cplusplus +template +CvSize2D32f cvSize2D32f(const cv::Size_<_Tp>& sz) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvSize2D32f s = { (float)sz.width, (float)sz.height }; +#else + CvSize2D32f s((float)sz.width, (float)sz.height); +#endif + return s; +} +#endif + +/** @sa RotatedRect + */ +typedef struct CvBox2D +{ + CvPoint2D32f center; /**< Center of the box. */ + CvSize2D32f size; /**< Box width and length. */ + float angle; /**< Angle between the horizontal axis */ + /**< and the first side (i.e. length) in degrees */ + +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvBox2D(CvPoint2D32f c = CvPoint2D32f(), CvSize2D32f s = CvSize2D32f(), float a = 0) : center(c), size(s), angle(a) {} + CvBox2D(const cv::RotatedRect& rr) : center(rr.center), size(rr.size), angle(rr.angle) {} +#endif +#ifdef __cplusplus + operator cv::RotatedRect() const { return cv::RotatedRect(center, size, angle); } +#endif +} +CvBox2D; + + +#ifdef __cplusplus +CV_INLINE CvBox2D cvBox2D(CvPoint2D32f c = CvPoint2D32f(), CvSize2D32f s = CvSize2D32f(), float a = 0) +{ + CvBox2D self; + self.center = c; + self.size = s; + self.angle = a; + return self; +} +CV_INLINE CvBox2D cvBox2D(const cv::RotatedRect& rr) +{ + CvBox2D self; + self.center = cvPoint2D32f(rr.center); + self.size = cvSize2D32f(rr.size); + self.angle = rr.angle; + return self; +} +#endif + + +/** Line iterator state: */ +typedef struct CvLineIterator +{ + /** Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + +typedef struct CvSlice +{ + int start_index, end_index; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvSlice() __attribute__(( warning("Non-initialized variable") )) {} + template CvSlice(const std::initializer_list<_Tp> list) + { + CV_Assert(list.size() == 0 || list.size() == 2); + start_index = end_index = 0; + if (list.size() == 2) + { + start_index = list.begin()[0]; end_index = list.begin()[1]; + } + }; +#endif +#if defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) && !defined(__CUDACC__) + CvSlice(int start = 0, int end = 0) : start_index(start), end_index(end) {} + CvSlice(const cv::Range& r) { *this = (r.start != INT_MIN && r.end != INT_MAX) ? CvSlice(r.start, r.end) : CvSlice(0, CV_WHOLE_SEQ_END_INDEX); } + operator cv::Range() const { return (start_index == 0 && end_index == CV_WHOLE_SEQ_END_INDEX ) ? cv::Range::all() : cv::Range(start_index, end_index); } +#endif +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) && !defined(__CUDACC__)) + CvSlice slice = { start, end }; +#else + CvSlice slice(start, end); +#endif + return slice; +} + +#if defined(__cplusplus) +CV_INLINE CvSlice cvSlice(const cv::Range& r) +{ + CvSlice slice = (r.start != INT_MIN && r.end != INT_MAX) ? cvSlice(r.start, r.end) : cvSlice(0, CV_WHOLE_SEQ_END_INDEX); + return slice; +} +#endif + + +/************************************* CvScalar *****************************************/ +/** @sa Scalar_ + */ +typedef struct CvScalar +{ + double val[4]; + +#ifdef CV__VALIDATE_UNUNITIALIZED_VARS + CvScalar() __attribute__(( warning("Non-initialized variable") )) {} + CvScalar(const std::initializer_list list) + { + CV_Assert(list.size() == 0 || list.size() == 4); + val[0] = val[1] = val[2] = val[3] = 0; + if (list.size() == 4) + { + val[0] = list.begin()[0]; val[1] = list.begin()[1]; val[2] = list.begin()[2]; val[3] = list.begin()[3]; + } + }; +#elif defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus) + CvScalar() {} + CvScalar(double d0, double d1 = 0, double d2 = 0, double d3 = 0) { val[0] = d0; val[1] = d1; val[2] = d2; val[3] = d3; } + template + CvScalar(const cv::Scalar_<_Tp>& s) { val[0] = s.val[0]; val[1] = s.val[1]; val[2] = s.val[2]; val[3] = s.val[3]; } + template + CvScalar(const cv::Vec<_Tp, cn>& v) + { + int i; + for( i = 0; i < (cn < 4 ? cn : 4); i++ ) val[i] = v.val[i]; + for( ; i < 4; i++ ) val[i] = 0; + } +#endif +#ifdef __cplusplus + template + operator cv::Scalar_<_Tp>() const { return cv::Scalar_<_Tp>(cv::saturate_cast<_Tp>(val[0]), cv::saturate_cast<_Tp>(val[1]), cv::saturate_cast<_Tp>(val[2]), cv::saturate_cast<_Tp>(val[3])); } +#endif +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvScalar scalar = CV_STRUCT_INITIALIZER; +#else + CvScalar scalar; +#endif + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + +#ifdef __cplusplus +CV_INLINE CvScalar cvScalar() +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvScalar scalar = CV_STRUCT_INITIALIZER; +#else + CvScalar scalar; +#endif + scalar.val[0] = scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} +CV_INLINE CvScalar cvScalar(const cv::Scalar& s) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvScalar scalar = CV_STRUCT_INITIALIZER; +#else + CvScalar scalar; +#endif + scalar.val[0] = s.val[0]; + scalar.val[1] = s.val[1]; + scalar.val[2] = s.val[2]; + scalar.val[3] = s.val[3]; + return scalar; +} +#endif + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvScalar scalar = CV_STRUCT_INITIALIZER; +#else + CvScalar scalar; +#endif + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ +#if !(defined(CV__ENABLE_C_API_CTORS) && defined(__cplusplus)) + CvScalar scalar = CV_STRUCT_INITIALIZER; +#else + CvScalar scalar; +#endif + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /**< First allocated block. */ + CvMemBlock* top; /**< Current memory block - top of the stack. */ + struct CvMemStorage* parent; /**< We get new blocks from parent as needed. */ + int block_size; /**< Block size. */ + int free_space; /**< Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /**< Previous sequence block. */ + struct CvSeqBlock* next; /**< Next sequence block. */ + int start_index; /**< Index of the first element in the block + */ + /**< sequence->first->start_index. */ + int count; /**< Number of elements in the block. */ + schar* data; /**< Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /**< Miscellaneous flags. */ \ + int header_size; /**< Size of sequence header. */ \ + struct node_type* h_prev; /**< Previous sequence. */ \ + struct node_type* h_next; /**< Next sequence. */ \ + struct node_type* v_prev; /**< 2nd previous sequence. */ \ + struct node_type* v_next /**< 2nd next sequence. */ + +/** + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /**< Total number of elements. */ \ + int elem_size; /**< Size of sequence element in bytes. */ \ + schar* block_max; /**< Maximal bound of the last block. */ \ + schar* ptr; /**< Current write pointer. */ \ + int delta_elems; /**< Grow seq this many at a time. */ \ + CvMemStorage* storage; /**< Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /**< Free blocks list. */ \ + CvSeqBlock* first; /**< Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/** @brief Set + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/** Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/** @name Graph + +We represent a graph as a set of vertices. Vertices contain their adjacency lists (more exactly, +pointers to first incoming or outcoming edge (or 0 if isolated vertex)). Edges are stored in +another set. There is a singly-linked list of incoming/outcoming edges for each vertex. + +Each edge consists of: + +- Two pointers to the starting and ending vertices (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between vertex i to vertex j are not +distinguished during search operations. + +- Two pointers to next edges for the starting and ending vertices, where next[0] points to the +next edge in the vtx[0] adjacency list and next[1] points to the next edge in the vtx[1] +adjacency list. + +@see CvGraphEdge, CvGraphVtx, CvGraphVtx2D, CvGraph +@{ +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/** + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/** @} */ + +/*********************************** Chain/Contour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /**< (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /**< freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /**< &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /**< #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /**< &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /**< first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /**< vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /**< connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /**< (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/** flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/** type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /**< the sequence written */ \ + CvSeqBlock* block; /**< current block */ \ + schar* ptr; /**< pointer to free space */ \ + schar* block_min; /**< pointer to the beginning of block*/\ + schar* block_max; /**< pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /**< sequence, beign read */ \ + CvSeqBlock* block; /**< current block */ \ + schar* ptr; /**< pointer to element be read next */ \ + schar* block_min; /**< pointer to the beginning of block */\ + schar* block_max; /**< pointer to the end of block */ \ + int delta_index;/**< = seq->first->start_index */ \ + schar* prev_elem; /**< pointer to previous element */ + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/** assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/** Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/** Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/** Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/** Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/** Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/** Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/** "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/** Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 +#define CV_STORAGE_MEMORY 4 +#define CV_STORAGE_FORMAT_MASK (7<<3) +#define CV_STORAGE_FORMAT_AUTO 0 +#define CV_STORAGE_FORMAT_XML 8 +#define CV_STORAGE_FORMAT_YAML 16 +#define CV_STORAGE_FORMAT_JSON 24 +#define CV_STORAGE_BASE64 64 +#define CV_STORAGE_WRITE_BASE64 (CV_STORAGE_BASE64 | CV_STORAGE_WRITE) + +/** @brief List of attributes. : + +In the current implementation, attributes are used to pass extra parameters when writing user +objects (see cvWrite). XML attributes inside tags are not supported, aside from the object type +specification (type_id attribute). +@see cvAttrList, cvAttrValue + */ +typedef struct CvAttrList +{ + const char** attr; /**< NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /**< Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +/** initializes CvAttrList structure */ +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /**< not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/** file node flags */ +#define CV_NODE_FLOW 8 /**= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/** All the keys (names) of elements in the read file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/** Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /**< type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /**< scalar floating-point number */ + int i; /**< scalar integer number */ + CvString str; /**< text string */ + CvSeq* seq; /**< sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /**< map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +/** @brief Type information + +The structure contains information about one of the standard or user-defined types. Instances of the +type may or may not contain a pointer to the corresponding CvTypeInfo structure. In any case, there +is a way to find the type info structure for a given object using the cvTypeOf function. +Alternatively, type info can be found by type name using cvFindType, which is used when an object +is read from file storage. The user can register a new type with cvRegisterType that adds the type +information structure into the beginning of the type list. Thus, it is possible to create +specialized types from generic standard types and override the basic methods. + */ +typedef struct CvTypeInfo +{ + int flags; /**< not used */ + int header_size; /**< sizeof(CvTypeInfo) */ + struct CvTypeInfo* prev; /**< previous registered type in the list */ + struct CvTypeInfo* next; /**< next registered type in the list */ + const char* type_name; /**< type name, written to file storage */ + CvIsInstanceFunc is_instance; /**< checks if the passed object belongs to the type */ + CvReleaseFunc release; /**< releases object (memory etc.) */ + CvReadFunc read; /**< reads object from file storage */ + CvWriteFunc write; /**< writes object to file storage */ + CvCloneFunc clone; /**< creates a copy of the object */ +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +/** @} */ + +#endif /*OPENCV_CORE_TYPES_H*/ + +/* End of file. */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utility.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utility.hpp new file mode 100644 index 00000000..b97458d2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utility.hpp @@ -0,0 +1,1237 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2015, Itseez Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_CORE_UTILITY_H +#define OPENCV_CORE_UTILITY_H + +#ifndef __cplusplus +# error utility.hpp header must be compiled as C++ +#endif + +#if defined(check) +# warning Detected Apple 'check' macro definition, it can cause build conflicts. Please, include this header before any Apple headers. +#endif + +#include "opencv2/core.hpp" +#include + +#ifdef CV_CXX11 +#include +#endif + +namespace cv +{ + +//! @addtogroup core_utils +//! @{ + +/** @brief Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf(1000); // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +#ifdef OPENCV_ENABLE_MEMORY_SANITIZER +template class AutoBuffer +#else +template class AutoBuffer +#endif +{ +public: + typedef _Tp value_type; + + //! the default constructor + AutoBuffer(); + //! constructor taking the real buffer size + explicit AutoBuffer(size_t _size); + + //! the copy constructor + AutoBuffer(const AutoBuffer<_Tp, fixed_size>& buf); + //! the assignment operator + AutoBuffer<_Tp, fixed_size>& operator = (const AutoBuffer<_Tp, fixed_size>& buf); + + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! resizes the buffer and preserves the content + void resize(size_t _size); + //! returns the current buffer size + size_t size() const; + //! returns pointer to the real buffer, stack-allocated or heap-allocated + inline _Tp* data() { return ptr; } + //! returns read-only pointer to the real buffer, stack-allocated or heap-allocated + inline const _Tp* data() const { return ptr; } + +#if !defined(OPENCV_DISABLE_DEPRECATED_COMPATIBILITY) // use to .data() calls instead + //! returns pointer to the real buffer, stack-allocated or heap-allocated + operator _Tp* () { return ptr; } + //! returns read-only pointer to the real buffer, stack-allocated or heap-allocated + operator const _Tp* () const { return ptr; } +#else + //! returns a reference to the element at specified location. No bounds checking is performed in Release builds. + inline _Tp& operator[] (size_t i) { CV_DbgCheckLT(i, sz, "out of range"); return ptr[i]; } + //! returns a reference to the element at specified location. No bounds checking is performed in Release builds. + inline const _Tp& operator[] (size_t i) const { CV_DbgCheckLT(i, sz, "out of range"); return ptr[i]; } +#endif + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t sz; + //! pre-allocated buffer. At least 1 element to confirm C++ standard requirements + _Tp buf[(fixed_size > 0) ? fixed_size : 1]; +}; + +/** @brief Sets/resets the break-on-error mode. + +When the break-on-error mode is set, the default error handler issues a hardware exception, which +can make debugging more convenient. + +\return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +extern "C" typedef int (*ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + + +/** @brief Sets the new error handler and the optional user data. + + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, void* userdata=0, void** prevUserdata=0); + +CV_EXPORTS String tempfile( const char* suffix = 0); +CV_EXPORTS void glob(String pattern, std::vector& result, bool recursive = false); + +/** @brief OpenCV will try to set the number of threads for the next parallel region. + +If threads == 0, OpenCV will disable threading optimizations and run all it's functions +sequentially. Passing threads \< 0 will reset threads number to system default. This function must +be called outside of parallel region. + +OpenCV will try to run its functions with specified threads number, but some behaviour differs from +framework: +- `TBB` - User-defined parallel constructions will run with the same threads number, if + another is not specified. If later on user creates his own scheduler, OpenCV will use it. +- `OpenMP` - No special defined behaviour. +- `Concurrency` - If threads == 1, OpenCV will disable threading optimizations and run its + functions sequentially. +- `GCD` - Supports only values \<= 0. +- `C=` - No special defined behaviour. +@param nthreads Number of threads used by OpenCV. +@sa getNumThreads, getThreadNum + */ +CV_EXPORTS_W void setNumThreads(int nthreads); + +/** @brief Returns the number of threads used by OpenCV for parallel regions. + +Always returns 1 if OpenCV is built without threading support. + +The exact meaning of return value depends on the threading framework used by OpenCV library: +- `TBB` - The number of threads, that OpenCV will try to use for parallel regions. If there is + any tbb::thread_scheduler_init in user code conflicting with OpenCV, then function returns + default number of threads used by TBB library. +- `OpenMP` - An upper bound on the number of threads that could be used to form a new team. +- `Concurrency` - The number of threads, that OpenCV will try to use for parallel regions. +- `GCD` - Unsupported; returns the GCD thread pool limit (512) for compatibility. +- `C=` - The number of threads, that OpenCV will try to use for parallel regions, if before + called setNumThreads with threads \> 0, otherwise returns the number of logical CPUs, + available for the process. +@sa setNumThreads, getThreadNum + */ +CV_EXPORTS_W int getNumThreads(); + +/** @brief Returns the index of the currently executed thread within the current parallel region. Always +returns 0 if called outside of parallel region. + +@deprecated Current implementation doesn't corresponding to this documentation. + +The exact meaning of the return value depends on the threading framework used by OpenCV library: +- `TBB` - Unsupported with current 4.1 TBB release. Maybe will be supported in future. +- `OpenMP` - The thread number, within the current team, of the calling thread. +- `Concurrency` - An ID for the virtual processor that the current context is executing on (0 + for master thread and unique number for others, but not necessary 1,2,3,...). +- `GCD` - System calling thread's ID. Never returns 0 inside parallel region. +- `C=` - The index of the current parallel task. +@sa setNumThreads, getNumThreads + */ +CV_EXPORTS_W int getThreadNum(); + +/** @brief Returns full configuration time cmake output. + +Returned value is raw cmake output including version control system revision, compiler version, +compiler flags, enabled modules and third party libraries, etc. Output format depends on target +architecture. + */ +CV_EXPORTS_W const String& getBuildInformation(); + +/** @brief Returns library version string + +For example "3.4.1-dev". + +@sa getMajorVersion, getMinorVersion, getRevisionVersion +*/ +CV_EXPORTS_W String getVersionString(); + +/** @brief Returns major library version */ +CV_EXPORTS_W int getVersionMajor(); + +/** @brief Returns minor library version */ +CV_EXPORTS_W int getVersionMinor(); + +/** @brief Returns revision field of the library version */ +CV_EXPORTS_W int getVersionRevision(); + +/** @brief Returns the number of ticks. + +The function returns the number of ticks after the certain event (for example, when the machine was +turned on). It can be used to initialize RNG or to measure a function execution time by reading the +tick count before and after the function call. +@sa getTickFrequency, TickMeter + */ +CV_EXPORTS_W int64 getTickCount(); + +/** @brief Returns the number of ticks per second. + +The function returns the number of ticks per second. That is, the following code computes the +execution time in seconds: +@code + double t = (double)getTickCount(); + // do something ... + t = ((double)getTickCount() - t)/getTickFrequency(); +@endcode +@sa getTickCount, TickMeter + */ +CV_EXPORTS_W double getTickFrequency(); + +/** @brief a Class to measure passing time. + +The class computes passing time by counting the number of ticks per second. That is, the following code computes the +execution time in seconds: +@snippet snippets/core_various.cpp TickMeter_total + +It is also possible to compute the average time over multiple runs: +@snippet snippets/core_various.cpp TickMeter_average + +@sa getTickCount, getTickFrequency +*/ +class CV_EXPORTS_W TickMeter +{ +public: + //! the default constructor + CV_WRAP TickMeter() + { + reset(); + } + + //! starts counting ticks. + CV_WRAP void start() + { + startTime = cv::getTickCount(); + } + + //! stops counting ticks. + CV_WRAP void stop() + { + int64 time = cv::getTickCount(); + if (startTime == 0) + return; + ++counter; + sumTime += (time - startTime); + startTime = 0; + } + + //! returns counted ticks. + CV_WRAP int64 getTimeTicks() const + { + return sumTime; + } + + //! returns passed time in microseconds. + CV_WRAP double getTimeMicro() const + { + return getTimeMilli()*1e3; + } + + //! returns passed time in milliseconds. + CV_WRAP double getTimeMilli() const + { + return getTimeSec()*1e3; + } + + //! returns passed time in seconds. + CV_WRAP double getTimeSec() const + { + return (double)getTimeTicks() / getTickFrequency(); + } + + //! returns internal counter value. + CV_WRAP int64 getCounter() const + { + return counter; + } + + //! returns average FPS (frames per second) value. + CV_WRAP double getFPS() const + { + const double sec = getTimeSec(); + if (sec < DBL_EPSILON) + return 0.; + return counter / sec; + } + + //! returns average time in seconds + CV_WRAP double getAvgTimeSec() const + { + if (counter <= 0) + return 0.; + return getTimeSec() / counter; + } + + //! returns average time in milliseconds + CV_WRAP double getAvgTimeMilli() const + { + return getAvgTimeSec() * 1e3; + } + + //! resets internal values. + CV_WRAP void reset() + { + startTime = 0; + sumTime = 0; + counter = 0; + } + +private: + int64 counter; + int64 sumTime; + int64 startTime; +}; + +/** @brief output operator +@code +TickMeter tm; +tm.start(); +// do something ... +tm.stop(); +std::cout << tm; +@endcode +*/ + +static inline +std::ostream& operator << (std::ostream& out, const TickMeter& tm) +{ + return out << tm.getTimeSec() << "sec"; +} + +/** @brief Returns the number of CPU ticks. + +The function returns the current number of CPU ticks on some architectures (such as x86, x64, +PowerPC). On other platforms the function is equivalent to getTickCount. It can also be used for +very accurate time measurements, as well as for RNG initialization. Note that in case of multi-CPU +systems a thread, from which getCPUTickCount is called, can be suspended and resumed at another CPU +with its own counter. So, theoretically (and practically) the subsequent calls to the function do +not necessary return the monotonously increasing values. Also, since a modern CPU varies the CPU +frequency depending on the load, the number of CPU clocks spent in some code cannot be directly +converted to time units. Therefore, getTickCount is generally a preferable solution for measuring +execution time. + */ +CV_EXPORTS_W int64 getCPUTickCount(); + +/** @brief Returns true if the specified feature is supported by the host hardware. + +The function returns true if the host hardware supports the specified feature. When user calls +setUseOptimized(false), the subsequent calls to checkHardwareSupport() will return false until +setUseOptimized(true) is called. This way user can dynamically switch on and off the optimized code +in OpenCV. +@param feature The feature of interest, one of cv::CpuFeatures + */ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +/** @brief Returns feature name by ID + +Returns empty string if feature is not defined +*/ +CV_EXPORTS_W String getHardwareFeatureName(int feature); + +/** @brief Returns list of CPU features enabled during compilation. + +Returned value is a string containing space separated list of CPU features with following markers: + +- no markers - baseline features +- prefix `*` - features enabled in dispatcher +- suffix `?` - features enabled but not available in HW + +Example: `SSE SSE2 SSE3 *SSE4.1 *SSE4.2 *FP16 *AVX *AVX2 *AVX512-SKX?` +*/ +CV_EXPORTS_W std::string getCPUFeaturesLine(); + +/** @brief Returns the number of logical CPUs available for the process. + */ +CV_EXPORTS_W int getNumberOfCPUs(); + + +/** @brief Aligns a pointer to the specified number of bytes. + +The function returns the aligned pointer of the same type as the input pointer: +\f[\texttt{(_Tp*)(((size_t)ptr + n-1) & -n)}\f] +@param ptr Aligned pointer. +@param n Alignment size that must be a power of two. + */ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + CV_DbgAssert((n & (n - 1)) == 0); // n is a power of 2 + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/** @brief Aligns a buffer size to the specified number of bytes. + +The function returns the minimum number that is greater than or equal to sz and is divisible by n : +\f[\texttt{(sz + n-1) & -n}\f] +@param sz Buffer size to align. +@param n Alignment size that must be a power of two. + */ +static inline size_t alignSize(size_t sz, int n) +{ + CV_DbgAssert((n & (n - 1)) == 0); // n is a power of 2 + return (sz + n-1) & -n; +} + +/** @brief Integer division with result round up. + +Use this function instead of `ceil((float)a / b)` expressions. + +@sa alignSize +*/ +static inline int divUp(int a, unsigned int b) +{ + CV_DbgAssert(a >= 0); + return (a + b - 1) / b; +} +/** @overload */ +static inline size_t divUp(size_t a, unsigned int b) +{ + return (a + b - 1) / b; +} + +/** @brief Round first value up to the nearest multiple of second value. + +Use this function instead of `ceil((float)a / b) * b` expressions. + +@sa divUp +*/ +static inline int roundUp(int a, unsigned int b) +{ + CV_DbgAssert(a >= 0); + return a + b - 1 - (a + b -1) % b; +} +/** @overload */ +static inline size_t roundUp(size_t a, unsigned int b) +{ + return a + b - 1 - (a + b - 1) % b; +} + +/** @brief Alignment check of passed values + +Usage: `isAligned(...)` + +@note Alignment(N) must be a power of 2 (2**k, 2^k) +*/ +template static inline +bool isAligned(const T& data) +{ + CV_StaticAssert((N & (N - 1)) == 0, ""); // power of 2 + return (((size_t)data) & (N - 1)) == 0; +} +/** @overload */ +template static inline +bool isAligned(const void* p1) +{ + return isAligned((size_t)p1); +} +/** @overload */ +template static inline +bool isAligned(const void* p1, const void* p2) +{ + return isAligned(((size_t)p1)|((size_t)p2)); +} +/** @overload */ +template static inline +bool isAligned(const void* p1, const void* p2, const void* p3) +{ + return isAligned(((size_t)p1)|((size_t)p2)|((size_t)p3)); +} +/** @overload */ +template static inline +bool isAligned(const void* p1, const void* p2, const void* p3, const void* p4) +{ + return isAligned(((size_t)p1)|((size_t)p2)|((size_t)p3)|((size_t)p4)); +} + +/** @brief Enables or disables the optimized code. + +The function can be used to dynamically turn on and off optimized dispatched code (code that uses SSE4.2, AVX/AVX2, +and other instructions on the platforms that support it). It sets a global flag that is further +checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only +safe to call the function on the very top level in your application where you can be sure that no +other OpenCV function is currently executed. + +By default, the optimized code is enabled unless you disable it in CMake. The current status can be +retrieved using useOptimized. +@param onoff The boolean flag specifying whether the optimized code should be used (onoff=true) +or not (onoff=false). + */ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/** @brief Returns the status of optimized code usage. + +The function returns true if the optimized code is enabled. Otherwise, it returns false. + */ +CV_EXPORTS_W bool useOptimized(); + +static inline size_t getElemSize(int type) { return (size_t)CV_ELEM_SIZE(type); } + +/////////////////////////////// Parallel Primitives ////////////////////////////////// + +/** @brief Base class for parallel data processors +*/ +class CV_EXPORTS ParallelLoopBody +{ +public: + virtual ~ParallelLoopBody(); + virtual void operator() (const Range& range) const = 0; +}; + +/** @brief Parallel data processor +*/ +CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.); + +#ifdef CV_CXX11 +class ParallelLoopBodyLambdaWrapper : public ParallelLoopBody +{ +private: + std::function m_functor; +public: + ParallelLoopBodyLambdaWrapper(std::function functor) : + m_functor(functor) + { } + + virtual void operator() (const cv::Range& range) const CV_OVERRIDE + { + m_functor(range); + } +}; + +inline void parallel_for_(const Range& range, std::function functor, double nstripes=-1.) +{ + parallel_for_(range, ParallelLoopBodyLambdaWrapper(functor), nstripes); +} +#endif + +/////////////////////////////// forEach method of cv::Mat //////////////////////////// +template inline +void Mat::forEach_impl(const Functor& operation) { + if (false) { + operation(*reinterpret_cast<_Tp*>(0), reinterpret_cast(0)); + // If your compiler fails in this line. + // Please check that your functor signature is + // (_Tp&, const int*) <- multi-dimensional + // or (_Tp&, void*) <- in case you don't need current idx. + } + + CV_Assert(!empty()); + CV_Assert(this->total() / this->size[this->dims - 1] <= INT_MAX); + const int LINES = static_cast(this->total() / this->size[this->dims - 1]); + + class PixelOperationWrapper :public ParallelLoopBody + { + public: + PixelOperationWrapper(Mat_<_Tp>* const frame, const Functor& _operation) + : mat(frame), op(_operation) {} + virtual ~PixelOperationWrapper(){} + // ! Overloaded virtual operator + // convert range call to row call. + virtual void operator()(const Range &range) const CV_OVERRIDE + { + const int DIMS = mat->dims; + const int COLS = mat->size[DIMS - 1]; + if (DIMS <= 2) { + for (int row = range.start; row < range.end; ++row) { + this->rowCall2(row, COLS); + } + } else { + std::vector idx(DIMS); /// idx is modified in this->rowCall + idx[DIMS - 2] = range.start - 1; + + for (int line_num = range.start; line_num < range.end; ++line_num) { + idx[DIMS - 2]++; + for (int i = DIMS - 2; i >= 0; --i) { + if (idx[i] >= mat->size[i]) { + idx[i - 1] += idx[i] / mat->size[i]; + idx[i] %= mat->size[i]; + continue; // carry-over; + } + else { + break; + } + } + this->rowCall(&idx[0], COLS, DIMS); + } + } + } + private: + Mat_<_Tp>* const mat; + const Functor op; + // ! Call operator for each elements in this row. + inline void rowCall(int* const idx, const int COLS, const int DIMS) const { + int &col = idx[DIMS - 1]; + col = 0; + _Tp* pixel = &(mat->template at<_Tp>(idx)); + + while (col < COLS) { + op(*pixel, const_cast(idx)); + pixel++; col++; + } + col = 0; + } + // ! Call operator for each elements in this row. 2d mat special version. + inline void rowCall2(const int row, const int COLS) const { + union Index{ + int body[2]; + operator const int*() const { + return reinterpret_cast(this); + } + int& operator[](const int i) { + return body[i]; + } + } idx = {{row, 0}}; + // Special union is needed to avoid + // "error: array subscript is above array bounds [-Werror=array-bounds]" + // when call the functor `op` such that access idx[3]. + + _Tp* pixel = &(mat->template at<_Tp>(idx)); + const _Tp* const pixel_end = pixel + COLS; + while(pixel < pixel_end) { + op(*pixel++, static_cast(idx)); + idx[1]++; + } + } + PixelOperationWrapper& operator=(const PixelOperationWrapper &) { + CV_Assert(false); + // We can not remove this implementation because Visual Studio warning C4822. + return *this; + } + }; + + parallel_for_(cv::Range(0, LINES), PixelOperationWrapper(reinterpret_cast*>(this), operation)); +} + +/////////////////////////// Synchronization Primitives /////////////////////////////// + +class CV_EXPORTS Mutex +{ +public: + Mutex(); + ~Mutex(); + Mutex(const Mutex& m); + Mutex& operator = (const Mutex& m); + + void lock(); + bool trylock(); + void unlock(); + + struct Impl; +protected: + Impl* impl; +}; + +class CV_EXPORTS AutoLock +{ +public: + AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); } + ~AutoLock() { mutex->unlock(); } +protected: + Mutex* mutex; +private: + AutoLock(const AutoLock&); + AutoLock& operator = (const AutoLock&); +}; + + +/** @brief Designed for command line parsing + +The sample below demonstrates how to use CommandLineParser: +@code + CommandLineParser parser(argc, argv, keys); + parser.about("Application name v1.0.0"); + + if (parser.has("help")) + { + parser.printMessage(); + return 0; + } + + int N = parser.get("N"); + double fps = parser.get("fps"); + String path = parser.get("path"); + + use_time_stamp = parser.has("timestamp"); + + String img1 = parser.get(0); + String img2 = parser.get(1); + + int repeat = parser.get(2); + + if (!parser.check()) + { + parser.printErrors(); + return 0; + } +@endcode + +### Keys syntax + +The keys parameter is a string containing several blocks, each one is enclosed in curly braces and +describes one argument. Each argument contains three parts separated by the `|` symbol: + +-# argument names is a space-separated list of option synonyms (to mark argument as positional, prefix it with the `@` symbol) +-# default value will be used if the argument was not provided (can be empty) +-# help message (can be empty) + +For example: + +@code{.cpp} + const String keys = + "{help h usage ? | | print this message }" + "{@image1 | | image1 for compare }" + "{@image2 || image2 for compare }" + "{@repeat |1 | number }" + "{path |. | path to file }" + "{fps | -1.0 | fps for output video }" + "{N count |100 | count of objects }" + "{ts timestamp | | use time stamp }" + ; +} +@endcode + +Note that there are no default values for `help` and `timestamp` so we can check their presence using the `has()` method. +Arguments with default values are considered to be always present. Use the `get()` method in these cases to check their +actual value instead. + +String keys like `get("@image1")` return the empty string `""` by default - even with an empty default value. +Use the special `` default value to enforce that the returned string must not be empty. (like in `get("@image2")`) + +### Usage + +For the described keys: + +@code{.sh} + # Good call (3 positional parameters: image1, image2 and repeat; N is 200, ts is true) + $ ./app -N=200 1.png 2.jpg 19 -ts + + # Bad call + $ ./app -fps=aaa + ERRORS: + Parameter 'fps': can not convert: [aaa] to [double] +@endcode + */ +class CV_EXPORTS CommandLineParser +{ +public: + + /** @brief Constructor + + Initializes command line parser object + + @param argc number of command line arguments (from main()) + @param argv array of command line arguments (from main()) + @param keys string describing acceptable command line parameters (see class description for syntax) + */ + CommandLineParser(int argc, const char* const argv[], const String& keys); + + /** @brief Copy constructor */ + CommandLineParser(const CommandLineParser& parser); + + /** @brief Assignment operator */ + CommandLineParser& operator = (const CommandLineParser& parser); + + /** @brief Destructor */ + ~CommandLineParser(); + + /** @brief Returns application path + + This method returns the path to the executable from the command line (`argv[0]`). + + For example, if the application has been started with such a command: + @code{.sh} + $ ./bin/my-executable + @endcode + this method will return `./bin`. + */ + String getPathToApplication() const; + + /** @brief Access arguments by name + + Returns argument converted to selected type. If the argument is not known or can not be + converted to selected type, the error flag is set (can be checked with @ref check). + + For example, define: + @code{.cpp} + String keys = "{N count||}"; + @endcode + + Call: + @code{.sh} + $ ./my-app -N=20 + # or + $ ./my-app --count=20 + @endcode + + Access: + @code{.cpp} + int N = parser.get("N"); + @endcode + + @param name name of the argument + @param space_delete remove spaces from the left and right of the string + @tparam T the argument will be converted to this type if possible + + @note You can access positional arguments by their `@`-prefixed name: + @code{.cpp} + parser.get("@image"); + @endcode + */ + template + T get(const String& name, bool space_delete = true) const + { + T val = T(); + getByName(name, space_delete, ParamType::type, (void*)&val); + return val; + } + + /** @brief Access positional arguments by index + + Returns argument converted to selected type. Indexes are counted from zero. + + For example, define: + @code{.cpp} + String keys = "{@arg1||}{@arg2||}" + @endcode + + Call: + @code{.sh} + ./my-app abc qwe + @endcode + + Access arguments: + @code{.cpp} + String val_1 = parser.get(0); // returns "abc", arg1 + String val_2 = parser.get(1); // returns "qwe", arg2 + @endcode + + @param index index of the argument + @param space_delete remove spaces from the left and right of the string + @tparam T the argument will be converted to this type if possible + */ + template + T get(int index, bool space_delete = true) const + { + T val = T(); + getByIndex(index, space_delete, ParamType::type, (void*)&val); + return val; + } + + /** @brief Check if field was provided in the command line + + @param name argument name to check + */ + bool has(const String& name) const; + + /** @brief Check for parsing errors + + Returns false if error occurred while accessing the parameters (bad conversion, missing arguments, + etc.). Call @ref printErrors to print error messages list. + */ + bool check() const; + + /** @brief Set the about message + + The about message will be shown when @ref printMessage is called, right before arguments table. + */ + void about(const String& message); + + /** @brief Print help message + + This method will print standard help message containing the about message and arguments description. + + @sa about + */ + void printMessage() const; + + /** @brief Print list of errors occurred + + @sa check + */ + void printErrors() const; + +protected: + void getByName(const String& name, bool space_delete, int type, void* dst) const; + void getByIndex(int index, bool space_delete, int type, void* dst) const; + + struct Impl; + Impl* impl; +}; + +//! @} core_utils + +//! @cond IGNORED + +/////////////////////////////// AutoBuffer implementation //////////////////////////////////////// + +template inline +AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + sz = fixed_size; +} + +template inline +AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + sz = fixed_size; + allocate(_size); +} + +template inline +AutoBuffer<_Tp, fixed_size>::AutoBuffer(const AutoBuffer<_Tp, fixed_size>& abuf ) +{ + ptr = buf; + sz = fixed_size; + allocate(abuf.size()); + for( size_t i = 0; i < sz; i++ ) + ptr[i] = abuf.ptr[i]; +} + +template inline AutoBuffer<_Tp, fixed_size>& +AutoBuffer<_Tp, fixed_size>::operator = (const AutoBuffer<_Tp, fixed_size>& abuf) +{ + if( this != &abuf ) + { + deallocate(); + allocate(abuf.size()); + for( size_t i = 0; i < sz; i++ ) + ptr[i] = abuf.ptr[i]; + } + return *this; +} + +template inline +AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void +AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= sz) + { + sz = _size; + return; + } + deallocate(); + sz = _size; + if(_size > fixed_size) + { + ptr = new _Tp[_size]; + } +} + +template inline void +AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + delete[] ptr; + ptr = buf; + sz = fixed_size; + } +} + +template inline void +AutoBuffer<_Tp, fixed_size>::resize(size_t _size) +{ + if(_size <= sz) + { + sz = _size; + return; + } + size_t i, prevsize = sz, minsize = MIN(prevsize, _size); + _Tp* prevptr = ptr; + + ptr = _size > fixed_size ? new _Tp[_size] : buf; + sz = _size; + + if( ptr != prevptr ) + for( i = 0; i < minsize; i++ ) + ptr[i] = prevptr[i]; + for( i = prevsize; i < _size; i++ ) + ptr[i] = _Tp(); + + if( prevptr != buf ) + delete[] prevptr; +} + +template inline size_t +AutoBuffer<_Tp, fixed_size>::size() const +{ return sz; } + +template<> inline std::string CommandLineParser::get(int index, bool space_delete) const +{ + return get(index, space_delete); +} +template<> inline std::string CommandLineParser::get(const String& name, bool space_delete) const +{ + return get(name, space_delete); +} + +//! @endcond + + +// Basic Node class for tree building +template +class CV_EXPORTS Node +{ +public: + Node() + { + m_pParent = 0; + } + Node(OBJECT& payload) : m_payload(payload) + { + m_pParent = 0; + } + ~Node() + { + removeChilds(); + if (m_pParent) + { + int idx = m_pParent->findChild(this); + if (idx >= 0) + m_pParent->m_childs.erase(m_pParent->m_childs.begin() + idx); + } + } + + Node* findChild(OBJECT& payload) const + { + for(size_t i = 0; i < this->m_childs.size(); i++) + { + if(this->m_childs[i]->m_payload == payload) + return this->m_childs[i]; + } + return NULL; + } + + int findChild(Node *pNode) const + { + for (size_t i = 0; i < this->m_childs.size(); i++) + { + if(this->m_childs[i] == pNode) + return (int)i; + } + return -1; + } + + void addChild(Node *pNode) + { + if(!pNode) + return; + + CV_Assert(pNode->m_pParent == 0); + pNode->m_pParent = this; + this->m_childs.push_back(pNode); + } + + void removeChilds() + { + for(size_t i = 0; i < m_childs.size(); i++) + { + m_childs[i]->m_pParent = 0; // avoid excessive parent vector trimming + delete m_childs[i]; + } + m_childs.clear(); + } + + int getDepth() + { + int count = 0; + Node *pParent = m_pParent; + while(pParent) count++, pParent = pParent->m_pParent; + return count; + } + +public: + OBJECT m_payload; + Node* m_pParent; + std::vector*> m_childs; +}; + + +namespace samples { + +//! @addtogroup core_utils_samples +// This section describes utility functions for OpenCV samples. +// +// @note Implementation of these utilities is not thread-safe. +// +//! @{ + +/** @brief Try to find requested data file + +Search directories: + +1. Directories passed via `addSamplesDataSearchPath()` +2. OPENCV_SAMPLES_DATA_PATH_HINT environment variable +3. OPENCV_SAMPLES_DATA_PATH environment variable + If parameter value is not empty and nothing is found then stop searching. +4. Detects build/install path based on: + a. current working directory (CWD) + b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage) +5. Scan `/{,data,samples/data}` directories if build directory is detected or the current directory is in source tree. +6. Scan `/share/OpenCV` directory if install directory is detected. + +@see cv::utils::findDataFile + +@param relative_path Relative path to data file +@param required Specify "file not found" handling. + If true, function prints information message and raises cv::Exception. + If false, function returns empty result +@param silentMode Disables messages +@return Returns path (absolute or relative to the current directory) or empty string if file is not found +*/ +CV_EXPORTS_W cv::String findFile(const cv::String& relative_path, bool required = true, bool silentMode = false); + +CV_EXPORTS_W cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode = false); + +inline cv::String findFileOrKeep(const cv::String& relative_path, bool silentMode) +{ + cv::String res = findFile(relative_path, false, silentMode); + if (res.empty()) + return relative_path; + return res; +} + +/** @brief Override search data path by adding new search location + +Use this only to override default behavior +Passed paths are used in LIFO order. + +@param path Path to used samples data +*/ +CV_EXPORTS_W void addSamplesDataSearchPath(const cv::String& path); + +/** @brief Append samples search data sub directory + +General usage is to add OpenCV modules name (`/modules//samples/data` -> `/samples/data` + `modules//samples/data`). +Passed subdirectories are used in LIFO order. + +@param subdir samples data sub directory +*/ +CV_EXPORTS_W void addSamplesDataSearchSubDirectory(const cv::String& subdir); + +//! @} +} // namespace samples + +namespace utils { + +CV_EXPORTS int getThreadID(); + +} // namespace + +} //namespace cv + +#ifdef CV_COLLECT_IMPL_DATA +#include "opencv2/core/utils/instrumentation.hpp" +#else +/// Collect implementation data on OpenCV function call. Requires ENABLE_IMPL_COLLECTION build option. +#define CV_IMPL_ADD(impl) +#endif + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/core/core_c.h" +#endif + +#endif //OPENCV_CORE_UTILITY_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.hpp new file mode 100644 index 00000000..79e93382 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.hpp @@ -0,0 +1,29 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_ALLOCATOR_STATS_HPP +#define OPENCV_CORE_ALLOCATOR_STATS_HPP + +#include "../cvdef.h" + +namespace cv { namespace utils { + +class AllocatorStatisticsInterface +{ +protected: + AllocatorStatisticsInterface() {} + virtual ~AllocatorStatisticsInterface() {} +public: + virtual uint64_t getCurrentUsage() const = 0; + virtual uint64_t getTotalUsage() const = 0; + virtual uint64_t getNumberOfAllocations() const = 0; + virtual uint64_t getPeakUsage() const = 0; + + /** set peak usage = current usage */ + virtual void resetPeakUsage() = 0; +}; + +}} // namespace + +#endif // OPENCV_CORE_ALLOCATOR_STATS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.impl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.impl.hpp new file mode 100644 index 00000000..eb5ecde1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/allocator_stats.impl.hpp @@ -0,0 +1,158 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP +#define OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP + +#include "./allocator_stats.hpp" + +//#define OPENCV_DISABLE_ALLOCATOR_STATS + +#ifdef CV_CXX11 + +#include + +#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE +#if defined(__GNUC__) && (\ + (defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ == 4) || \ + (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)) \ + ) +#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE int +#endif +#endif + +#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE +#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE long long +#endif + +#else // CV_CXX11 + +#ifndef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE +#define OPENCV_ALLOCATOR_STATS_COUNTER_TYPE int // CV_XADD supports int only +#endif + +#endif // CV_CXX11 + +namespace cv { namespace utils { + +#ifdef CV__ALLOCATOR_STATS_LOG +namespace { +#endif + +class AllocatorStatistics : public AllocatorStatisticsInterface +{ +#ifdef OPENCV_DISABLE_ALLOCATOR_STATS + +public: + AllocatorStatistics() {} + ~AllocatorStatistics() CV_OVERRIDE {} + + uint64_t getCurrentUsage() const CV_OVERRIDE { return 0; } + uint64_t getTotalUsage() const CV_OVERRIDE { return 0; } + uint64_t getNumberOfAllocations() const CV_OVERRIDE { return 0; } + uint64_t getPeakUsage() const CV_OVERRIDE { return 0; } + + /** set peak usage = current usage */ + void resetPeakUsage() CV_OVERRIDE {}; + + void onAllocate(size_t /*sz*/) {} + void onFree(size_t /*sz*/) {} + +#elif defined(CV_CXX11) + +protected: + typedef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE counter_t; + std::atomic curr, total, total_allocs, peak; +public: + AllocatorStatistics() {} + ~AllocatorStatistics() CV_OVERRIDE {} + + uint64_t getCurrentUsage() const CV_OVERRIDE { return (uint64_t)curr.load(); } + uint64_t getTotalUsage() const CV_OVERRIDE { return (uint64_t)total.load(); } + uint64_t getNumberOfAllocations() const CV_OVERRIDE { return (uint64_t)total_allocs.load(); } + uint64_t getPeakUsage() const CV_OVERRIDE { return (uint64_t)peak.load(); } + + /** set peak usage = current usage */ + void resetPeakUsage() CV_OVERRIDE { peak.store(curr.load()); } + + // Controller interface + void onAllocate(size_t sz) + { +#ifdef CV__ALLOCATOR_STATS_LOG + CV__ALLOCATOR_STATS_LOG(cv::format("allocate: %lld (curr=%lld)", (long long int)sz, (long long int)curr.load())); +#endif + + counter_t new_curr = curr.fetch_add((counter_t)sz) + (counter_t)sz; + + // peak = std::max((uint64_t)peak, new_curr); + auto prev_peak = peak.load(); + while (prev_peak < new_curr) + { + if (peak.compare_exchange_weak(prev_peak, new_curr)) + break; + } + // end of peak = max(...) + + total += (counter_t)sz; + total_allocs++; + } + void onFree(size_t sz) + { +#ifdef CV__ALLOCATOR_STATS_LOG + CV__ALLOCATOR_STATS_LOG(cv::format("free: %lld (curr=%lld)", (long long int)sz, (long long int)curr.load())); +#endif + curr -= (counter_t)sz; + } + +#else // non C++11 + +protected: + typedef OPENCV_ALLOCATOR_STATS_COUNTER_TYPE counter_t; + volatile counter_t curr, total, total_allocs, peak; // overflow is possible, CV_XADD operates with 'int' only +public: + AllocatorStatistics() + : curr(0), total(0), total_allocs(0), peak(0) + {} + ~AllocatorStatistics() CV_OVERRIDE {} + + uint64_t getCurrentUsage() const CV_OVERRIDE { return (uint64_t)curr; } + uint64_t getTotalUsage() const CV_OVERRIDE { return (uint64_t)total; } + uint64_t getNumberOfAllocations() const CV_OVERRIDE { return (uint64_t)total_allocs; } + uint64_t getPeakUsage() const CV_OVERRIDE { return (uint64_t)peak; } + + void resetPeakUsage() CV_OVERRIDE { peak = curr; } + + // Controller interface + void onAllocate(size_t sz) + { +#ifdef CV__ALLOCATOR_STATS_LOG + CV__ALLOCATOR_STATS_LOG(cv::format("allocate: %lld (curr=%lld)", (long long int)sz, (long long int)curr)); +#endif + + counter_t new_curr = (counter_t)CV_XADD(&curr, (counter_t)sz) + (counter_t)sz; + + peak = std::max((counter_t)peak, new_curr); // non-thread safe + + //CV_XADD(&total, (uint64_t)sz); // overflow with int, non-reliable... + total += sz; + + CV_XADD(&total_allocs, (counter_t)1); + } + void onFree(size_t sz) + { +#ifdef CV__ALLOCATOR_STATS_LOG + CV__ALLOCATOR_STATS_LOG(cv::format("free: %lld (curr=%lld)", (long long int)sz, (long long int)curr)); +#endif + CV_XADD(&curr, (counter_t)-sz); + } +#endif +}; + +#ifdef CV__ALLOCATOR_STATS_LOG +} // namespace +#endif + +}} // namespace + +#endif // OPENCV_CORE_ALLOCATOR_STATS_IMPL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/filesystem.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/filesystem.hpp new file mode 100644 index 00000000..00b0dd1c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/filesystem.hpp @@ -0,0 +1,78 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_UTILS_FILESYSTEM_HPP +#define OPENCV_UTILS_FILESYSTEM_HPP + +namespace cv { namespace utils { namespace fs { + + +CV_EXPORTS bool exists(const cv::String& path); +CV_EXPORTS bool isDirectory(const cv::String& path); + +CV_EXPORTS void remove_all(const cv::String& path); + + +CV_EXPORTS cv::String getcwd(); + +/** @brief Converts path p to a canonical absolute path + * Symlinks are processed if there is support for them on running platform. + * + * @param path input path. Target file/directory should exist. + */ +CV_EXPORTS cv::String canonical(const cv::String& path); + +/** Join path components */ +CV_EXPORTS cv::String join(const cv::String& base, const cv::String& path); + +/** + * Generate a list of all files that match the globbing pattern. + * + * Result entries are prefixed by base directory path. + * + * @param directory base directory + * @param pattern filter pattern (based on '*'/'?' symbols). Use empty string to disable filtering and return all results + * @param[out] result result of globing. + * @param recursive scan nested directories too + * @param includeDirectories include directories into results list + */ +CV_EXPORTS void glob(const cv::String& directory, const cv::String& pattern, + CV_OUT std::vector& result, + bool recursive = false, bool includeDirectories = false); + +/** + * Generate a list of all files that match the globbing pattern. + * + * @param directory base directory + * @param pattern filter pattern (based on '*'/'?' symbols). Use empty string to disable filtering and return all results + * @param[out] result globbing result with relative paths from base directory + * @param recursive scan nested directories too + * @param includeDirectories include directories into results list + */ +CV_EXPORTS void glob_relative(const cv::String& directory, const cv::String& pattern, + CV_OUT std::vector& result, + bool recursive = false, bool includeDirectories = false); + + +CV_EXPORTS bool createDirectory(const cv::String& path); +CV_EXPORTS bool createDirectories(const cv::String& path); + +#ifdef __OPENCV_BUILD +// TODO +//CV_EXPORTS cv::String getTempDirectory(); + +/** + * @brief Returns directory to store OpenCV cache files + * Create sub-directory in common OpenCV cache directory if it doesn't exist. + * @param sub_directory_name name of sub-directory. NULL or "" value asks to return root cache directory. + * @param configuration_name optional name of configuration parameter name which overrides default behavior. + * @return Path to cache directory. Returns empty string if cache directories support is not available. Returns "disabled" if cache disabled by user. + */ +CV_EXPORTS cv::String getCacheDirectory(const char* sub_directory_name, const char* configuration_name = NULL); + +#endif + +}}} // namespace + +#endif // OPENCV_UTILS_FILESYSTEM_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/instrumentation.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/instrumentation.hpp new file mode 100644 index 00000000..36398670 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/instrumentation.hpp @@ -0,0 +1,125 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_UTILS_INSTR_HPP +#define OPENCV_UTILS_INSTR_HPP + +#include +#include + +namespace cv { + +//! @addtogroup core_utils +//! @{ + +#ifdef CV_COLLECT_IMPL_DATA +CV_EXPORTS void setImpl(int flags); // set implementation flags and reset storage arrays +CV_EXPORTS void addImpl(int flag, const char* func = 0); // add implementation and function name to storage arrays +// Get stored implementation flags and functions names arrays +// Each implementation entry correspond to function name entry, so you can find which implementation was executed in which function +CV_EXPORTS int getImpl(std::vector &impl, std::vector &funName); + +CV_EXPORTS bool useCollection(); // return implementation collection state +CV_EXPORTS void setUseCollection(bool flag); // set implementation collection state + +#define CV_IMPL_PLAIN 0x01 // native CPU OpenCV implementation +#define CV_IMPL_OCL 0x02 // OpenCL implementation +#define CV_IMPL_IPP 0x04 // IPP implementation +#define CV_IMPL_MT 0x10 // multithreaded implementation + +#undef CV_IMPL_ADD +#define CV_IMPL_ADD(impl) \ + if(cv::useCollection()) \ + { \ + cv::addImpl(impl, CV_Func); \ + } +#endif + +// Instrumentation external interface +namespace instr +{ + +#if !defined OPENCV_ABI_CHECK + +enum TYPE +{ + TYPE_GENERAL = 0, // OpenCV API function, e.g. exported function + TYPE_MARKER, // Information marker + TYPE_WRAPPER, // Wrapper function for implementation + TYPE_FUN, // Simple function call +}; + +enum IMPL +{ + IMPL_PLAIN = 0, + IMPL_IPP, + IMPL_OPENCL, +}; + +struct NodeDataTls +{ + NodeDataTls() + { + m_ticksTotal = 0; + } + uint64 m_ticksTotal; +}; + +class CV_EXPORTS NodeData +{ +public: + NodeData(const char* funName = 0, const char* fileName = NULL, int lineNum = 0, void* retAddress = NULL, bool alwaysExpand = false, cv::instr::TYPE instrType = TYPE_GENERAL, cv::instr::IMPL implType = IMPL_PLAIN); + NodeData(NodeData &ref); + ~NodeData(); + NodeData& operator=(const NodeData&); + + cv::String m_funName; + cv::instr::TYPE m_instrType; + cv::instr::IMPL m_implType; + const char* m_fileName; + int m_lineNum; + void* m_retAddress; + bool m_alwaysExpand; + bool m_funError; + + volatile int m_counter; + volatile uint64 m_ticksTotal; + TLSDataAccumulator m_tls; + int m_threads; + + // No synchronization + double getTotalMs() const { return ((double)m_ticksTotal / cv::getTickFrequency()) * 1000; } + double getMeanMs() const { return (((double)m_ticksTotal/m_counter) / cv::getTickFrequency()) * 1000; } +}; +bool operator==(const NodeData& lhs, const NodeData& rhs); + +typedef Node InstrNode; + +CV_EXPORTS InstrNode* getTrace(); + +#endif // !defined OPENCV_ABI_CHECK + + +CV_EXPORTS bool useInstrumentation(); +CV_EXPORTS void setUseInstrumentation(bool flag); +CV_EXPORTS void resetTrace(); + +enum FLAGS +{ + FLAGS_NONE = 0, + FLAGS_MAPPING = 0x01, + FLAGS_EXPAND_SAME_NAMES = 0x02, +}; + +CV_EXPORTS void setFlags(FLAGS modeFlags); +static inline void setFlags(int modeFlags) { setFlags((FLAGS)modeFlags); } +CV_EXPORTS FLAGS getFlags(); + +} // namespace instr + +//! @} + +} // namespace + +#endif // OPENCV_UTILS_TLS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.defines.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.defines.hpp new file mode 100644 index 00000000..b2dfc417 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.defines.hpp @@ -0,0 +1,22 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_LOGGER_DEFINES_HPP +#define OPENCV_LOGGER_DEFINES_HPP + +//! @addtogroup core_logging +//! @{ + +// Supported logging levels and their semantic +#define CV_LOG_LEVEL_SILENT 0 //!< for using in setLogLevel() call +#define CV_LOG_LEVEL_FATAL 1 //!< Fatal (critical) error (unrecoverable internal error) +#define CV_LOG_LEVEL_ERROR 2 //!< Error message +#define CV_LOG_LEVEL_WARN 3 //!< Warning message +#define CV_LOG_LEVEL_INFO 4 //!< Info message +#define CV_LOG_LEVEL_DEBUG 5 //!< Debug message. Disabled in the "Release" build. +#define CV_LOG_LEVEL_VERBOSE 6 //!< Verbose (trace) messages. Requires verbosity level. Disabled in the "Release" build. + +//! @} + +#endif // OPENCV_LOGGER_DEFINES_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.hpp new file mode 100644 index 00000000..501d1404 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/logger.hpp @@ -0,0 +1,159 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_LOGGER_HPP +#define OPENCV_LOGGER_HPP + +#include +#include +#include // INT_MAX + +#include "logger.defines.hpp" + +namespace cv { +namespace utils { +namespace logging { + +//! @addtogroup core_logging +//! @{ + +//! Supported logging levels and their semantic +enum LogLevel { + LOG_LEVEL_SILENT = 0, //!< for using in setLogVevel() call + LOG_LEVEL_FATAL = 1, //!< Fatal (critical) error (unrecoverable internal error) + LOG_LEVEL_ERROR = 2, //!< Error message + LOG_LEVEL_WARNING = 3, //!< Warning message + LOG_LEVEL_INFO = 4, //!< Info message + LOG_LEVEL_DEBUG = 5, //!< Debug message. Disabled in the "Release" build. + LOG_LEVEL_VERBOSE = 6, //!< Verbose (trace) messages. Requires verbosity level. Disabled in the "Release" build. +#ifndef CV_DOXYGEN + ENUM_LOG_LEVEL_FORCE_INT = INT_MAX +#endif +}; + +/** Set global logging level +@return previous logging level +*/ +CV_EXPORTS LogLevel setLogLevel(LogLevel logLevel); +/** Get global logging level */ +CV_EXPORTS LogLevel getLogLevel(); + +namespace internal { +/** Write log message */ +CV_EXPORTS void writeLogMessage(LogLevel logLevel, const char* message); +} // namespace + +/** + * \def CV_LOG_STRIP_LEVEL + * + * Define CV_LOG_STRIP_LEVEL=CV_LOG_LEVEL_[DEBUG|INFO|WARN|ERROR|FATAL|DISABLED] to compile out anything at that and before that logging level + */ +#ifndef CV_LOG_STRIP_LEVEL +# if defined NDEBUG +# define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG +# else +# define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE +# endif +#endif + +#define CV_LOG_WITH_TAG(tag, msgLevel, extra_check0, extra_check1, msg_prefix, ...) \ + for(;;) { \ + extra_check0; \ + if (cv::utils::logging::getLogLevel() < msgLevel) break; \ + extra_check1; \ + std::stringstream ss; ss msg_prefix << __VA_ARGS__; \ + cv::utils::logging::internal::writeLogMessage(msgLevel, ss.str().c_str()); \ + break; \ + } + +#define CV_LOG_FATAL(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_FATAL, , , , __VA_ARGS__) +#define CV_LOG_ERROR(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, , , , __VA_ARGS__) +#define CV_LOG_WARNING(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, , , , __VA_ARGS__) +#define CV_LOG_INFO(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, , , , __VA_ARGS__) +#define CV_LOG_DEBUG(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, , , , __VA_ARGS__) +#define CV_LOG_VERBOSE(tag, v, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_VERBOSE, , , << "[VERB" << v << ":" << cv::utils::getThreadID() << "] ", __VA_ARGS__) + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO +#undef CV_LOG_INFO +#define CV_LOG_INFO(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG +#undef CV_LOG_DEBUG +#define CV_LOG_DEBUG(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE +#undef CV_LOG_VERBOSE +#define CV_LOG_VERBOSE(tag, v, ...) +#endif + +//! @cond IGNORED +#define CV__LOG_ONCE_CHECK_PRE \ + static bool _cv_log_once_ ## __LINE__ = false; \ + if (_cv_log_once_ ## __LINE__) break; + +#define CV__LOG_ONCE_CHECK_POST \ + _cv_log_once_ ## __LINE__ = true; + +#define CV__LOG_IF_CHECK(logging_cond) \ + if (!(logging_cond)) break; + +//! @endcond + + +// CV_LOG_ONCE_XXX macros + +#define CV_LOG_ONCE_ERROR(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, , __VA_ARGS__) +#define CV_LOG_ONCE_WARNING(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, , __VA_ARGS__) +#define CV_LOG_ONCE_INFO(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, , __VA_ARGS__) +#define CV_LOG_ONCE_DEBUG(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, , __VA_ARGS__) +#define CV_LOG_ONCE_VERBOSE(tag, v, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_VERBOSE, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, << "[VERB" << v << ":" << cv::utils::getThreadID() << "] ", __VA_ARGS__) + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO +#undef CV_LOG_ONCE_INFO +#define CV_LOG_ONCE_INFO(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG +#undef CV_LOG_ONCE_DEBUG +#define CV_LOG_ONCE_DEBUG(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE +#undef CV_LOG_ONCE_VERBOSE +#define CV_LOG_ONCE_VERBOSE(tag, v, ...) +#endif + + +// CV_LOG_IF_XXX macros + +#define CV_LOG_IF_FATAL(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_FATAL, , CV__LOG_IF_CHECK(logging_cond), , __VA_ARGS__) +#define CV_LOG_IF_ERROR(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, , CV__LOG_IF_CHECK(logging_cond), , __VA_ARGS__) +#define CV_LOG_IF_WARNING(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, , CV__LOG_IF_CHECK(logging_cond), , __VA_ARGS__) +#define CV_LOG_IF_INFO(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, , CV__LOG_IF_CHECK(logging_cond), , __VA_ARGS__) +#define CV_LOG_IF_DEBUG(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, , CV__LOG_IF_CHECK(logging_cond), , __VA_ARGS__) +#define CV_LOG_IF_VERBOSE(tag, v, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_VERBOSE, , CV__LOG_IF_CHECK(logging_cond), << "[VERB" << v << ":" << cv::utils::getThreadID() << "] ", __VA_ARGS__) + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO +#undef CV_LOG_IF_INFO +#define CV_LOG_IF_INFO(tag, logging_cond, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG +#undef CV_LOG_IF_DEBUG +#define CV_LOG_IF_DEBUG(tag, logging_cond, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE +#undef CV_LOG_IF_VERBOSE +#define CV_LOG_IF_VERBOSE(tag, v, logging_cond, ...) +#endif + + +//! @} + +}}} // namespace + +#endif // OPENCV_LOGGER_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/tls.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/tls.hpp new file mode 100644 index 00000000..c0d2962c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/tls.hpp @@ -0,0 +1,239 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_UTILS_TLS_HPP +#define OPENCV_UTILS_TLS_HPP + +#ifndef OPENCV_CORE_UTILITY_H +#error "tls.hpp must be included after opencv2/core/utility.hpp or opencv2/core.hpp" +#endif + +namespace cv { + +//! @addtogroup core_utils +//! @{ + +namespace details { class TlsStorage; } + +/** TLS container base implementation + * + * Don't use directly. + * + * @sa TLSData, TLSDataAccumulator templates + */ +class CV_EXPORTS TLSDataContainer +{ +protected: + TLSDataContainer(); + virtual ~TLSDataContainer(); + + /// @deprecated use detachData() instead + void gatherData(std::vector &data) const; + /// get TLS data and detach all data from threads (similar to cleanup() call) + void detachData(std::vector& data); + + void* getData() const; + void release(); + +protected: + virtual void* createDataInstance() const = 0; + virtual void deleteDataInstance(void* pData) const = 0; + +#if OPENCV_ABI_COMPATIBILITY > 300 +private: +#else +public: +#endif + int key_; + + friend class cv::details::TlsStorage; // core/src/system.cpp + +public: + void cleanup(); //!< Release created TLS data container objects. It is similar to release() call, but it keeps TLS container valid. + +private: + // Disable copy/assign (noncopyable pattern) + TLSDataContainer(TLSDataContainer &); + TLSDataContainer& operator =(const TLSDataContainer &); +}; + + +/** @brief Simple TLS data class + * + * @sa TLSDataAccumulator + */ +template +class TLSData : protected TLSDataContainer +{ +public: + inline TLSData() {} + inline ~TLSData() { release(); } + + inline T* get() const { return (T*)getData(); } //!< Get data associated with key + inline T& getRef() const { T* ptr = (T*)getData(); CV_DbgAssert(ptr); return *ptr; } //!< Get data associated with key + + /// Release associated thread data + inline void cleanup() + { + TLSDataContainer::cleanup(); + } + +protected: + /// Wrapper to allocate data by template + virtual void* createDataInstance() const CV_OVERRIDE { return new T; } + /// Wrapper to release data by template + virtual void deleteDataInstance(void* pData) const CV_OVERRIDE { delete (T*)pData; } +}; + + +/// TLS data accumulator with gathering methods +template +class TLSDataAccumulator : public TLSData +{ + mutable cv::Mutex mutex; + mutable std::vector dataFromTerminatedThreads; + std::vector detachedData; + bool cleanupMode; +public: + TLSDataAccumulator() : cleanupMode(false) {} + ~TLSDataAccumulator() + { + release(); + } + + /** @brief Get data from all threads + * @deprecated replaced by detachData() + * + * Lifetime of vector data is valid until next detachData()/cleanup()/release() calls + * + * @param[out] data result buffer (should be empty) + */ + void gather(std::vector &data) const + { + CV_Assert(cleanupMode == false); // state is not valid + CV_Assert(data.empty()); + { + std::vector &dataVoid = reinterpret_cast&>(data); + TLSDataContainer::gatherData(dataVoid); + } + { + AutoLock lock(mutex); + data.reserve(data.size() + dataFromTerminatedThreads.size()); + for (typename std::vector::const_iterator i = dataFromTerminatedThreads.begin(); i != dataFromTerminatedThreads.end(); ++i) + { + data.push_back((T*)*i); + } + } + } + + /** @brief Get and detach data from all threads + * + * Call cleanupDetachedData() when returned vector is not needed anymore. + * + * @return Vector with associated data. Content is preserved (including lifetime of attached data pointers) until next detachData()/cleanupDetachedData()/cleanup()/release() calls + */ + std::vector& detachData() + { + CV_Assert(cleanupMode == false); // state is not valid + std::vector dataVoid; + { + TLSDataContainer::detachData(dataVoid); + } + { + AutoLock lock(mutex); + detachedData.reserve(dataVoid.size() + dataFromTerminatedThreads.size()); + for (typename std::vector::const_iterator i = dataFromTerminatedThreads.begin(); i != dataFromTerminatedThreads.end(); ++i) + { + detachedData.push_back((T*)*i); + } + dataFromTerminatedThreads.clear(); + for (typename std::vector::const_iterator i = dataVoid.begin(); i != dataVoid.end(); ++i) + { + detachedData.push_back((T*)(void*)*i); + } + } + dataVoid.clear(); + return detachedData; + } + + /// Release associated thread data returned by detachData() call + void cleanupDetachedData() + { + AutoLock lock(mutex); + cleanupMode = true; + _cleanupDetachedData(); + cleanupMode = false; + } + + /// Release associated thread data + void cleanup() + { + cleanupMode = true; + TLSDataContainer::cleanup(); + + AutoLock lock(mutex); + _cleanupDetachedData(); + _cleanupTerminatedData(); + cleanupMode = false; + } + + /// Release associated thread data and free TLS key + void release() + { + cleanupMode = true; + TLSDataContainer::release(); + { + AutoLock lock(mutex); + _cleanupDetachedData(); + _cleanupTerminatedData(); + } + } + +protected: + // synchronized + void _cleanupDetachedData() + { + for (typename std::vector::iterator i = detachedData.begin(); i != detachedData.end(); ++i) + { + deleteDataInstance((T*)*i); + } + detachedData.clear(); + } + + // synchronized + void _cleanupTerminatedData() + { + for (typename std::vector::iterator i = dataFromTerminatedThreads.begin(); i != dataFromTerminatedThreads.end(); ++i) + { + deleteDataInstance((T*)*i); + } + dataFromTerminatedThreads.clear(); + } + +protected: + virtual void* createDataInstance() const CV_OVERRIDE + { + // Note: we can collect all allocated data here, but this would require raced mutex locks + return new T; + } + virtual void deleteDataInstance(void* pData) const CV_OVERRIDE + { + if (cleanupMode) + { + delete (T*)pData; + } + else + { + AutoLock lock(mutex); + dataFromTerminatedThreads.push_back((T*)pData); + } + } +}; + + +//! @} + +} // namespace + +#endif // OPENCV_UTILS_TLS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/utils/trace.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/trace.hpp new file mode 100644 index 00000000..ef5d35b4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/utils/trace.hpp @@ -0,0 +1,252 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_TRACE_HPP +#define OPENCV_TRACE_HPP + +#include + +namespace cv { +namespace utils { +namespace trace { + +//! @addtogroup core_logging +//! @{ + +//! Macro to trace function +#define CV_TRACE_FUNCTION() + +#define CV_TRACE_FUNCTION_SKIP_NESTED() + +//! Trace code scope. +//! @note Dynamic names are not supported in this macro (on stack or heap). Use string literals here only, like "initialize". +#define CV_TRACE_REGION(name_as_static_string_literal) +//! mark completed of the current opened region and create new one +//! @note Dynamic names are not supported in this macro (on stack or heap). Use string literals here only, like "step1". +#define CV_TRACE_REGION_NEXT(name_as_static_string_literal) + +//! Macro to trace argument value +#define CV_TRACE_ARG(arg_id) + +//! Macro to trace argument value (expanded version) +#define CV_TRACE_ARG_VALUE(arg_id, arg_name, value) + +//! @cond IGNORED +#define CV_TRACE_NS cv::utils::trace + +#if !defined(OPENCV_DISABLE_TRACE) && defined(__EMSCRIPTEN__) +#define OPENCV_DISABLE_TRACE 1 +#endif + +namespace details { + +#ifndef __OPENCV_TRACE +# if defined __OPENCV_BUILD && !defined __OPENCV_TESTS && !defined __OPENCV_APPS +# define __OPENCV_TRACE 1 +# else +# define __OPENCV_TRACE 0 +# endif +#endif + +#ifndef CV_TRACE_FILENAME +# define CV_TRACE_FILENAME __FILE__ +#endif + +#ifndef CV__TRACE_FUNCTION +# if defined _MSC_VER +# define CV__TRACE_FUNCTION __FUNCSIG__ +# elif defined __GNUC__ +# define CV__TRACE_FUNCTION __PRETTY_FUNCTION__ +# else +# define CV__TRACE_FUNCTION "" +# endif +#endif + +//! Thread-local instance (usually allocated on stack) +class CV_EXPORTS Region +{ +public: + struct LocationExtraData; + struct LocationStaticStorage + { + LocationExtraData** ppExtra; //< implementation specific data + const char* name; //< region name (function name or other custom name) + const char* filename; //< source code filename + int line; //< source code line + int flags; //< flags (implementation code path: Plain, IPP, OpenCL) + }; + + Region(const LocationStaticStorage& location); + inline ~Region() + { + if (implFlags != 0) + destroy(); + CV_DbgAssert(implFlags == 0); + CV_DbgAssert(pImpl == NULL); + } + + class Impl; + Impl* pImpl; // NULL if current region is not active + int implFlags; // see RegionFlag, 0 if region is ignored + + bool isActive() const { return pImpl != NULL; } + + void destroy(); +private: + Region(const Region&); // disabled + Region& operator= (const Region&); // disabled +}; + +//! Specify region flags +enum RegionLocationFlag { + REGION_FLAG_FUNCTION = (1 << 0), //< region is function (=1) / nested named region (=0) + REGION_FLAG_APP_CODE = (1 << 1), //< region is Application code (=1) / OpenCV library code (=0) + REGION_FLAG_SKIP_NESTED = (1 << 2), //< avoid processing of nested regions + + REGION_FLAG_IMPL_IPP = (1 << 16), //< region is part of IPP code path + REGION_FLAG_IMPL_OPENCL = (2 << 16), //< region is part of OpenCL code path + REGION_FLAG_IMPL_OPENVX = (3 << 16), //< region is part of OpenVX code path + + REGION_FLAG_IMPL_MASK = (15 << 16), + + REGION_FLAG_REGION_FORCE = (1 << 30), + REGION_FLAG_REGION_NEXT = (1 << 31), //< close previous region (see #CV_TRACE_REGION_NEXT macro) + + ENUM_REGION_FLAG_FORCE_INT = INT_MAX +}; + +struct CV_EXPORTS TraceArg { +public: + struct ExtraData; + ExtraData** ppExtra; + const char* name; + int flags; +}; +/** @brief Add meta information to current region (function) + * See CV_TRACE_ARG macro + * @param arg argument information structure (global static cache) + * @param value argument value (can by dynamic string literal in case of string, static allocation is not required) + */ +CV_EXPORTS void traceArg(const TraceArg& arg, const char* value); +//! @overload +CV_EXPORTS void traceArg(const TraceArg& arg, int value); +//! @overload +CV_EXPORTS void traceArg(const TraceArg& arg, int64 value); +//! @overload +CV_EXPORTS void traceArg(const TraceArg& arg, double value); + +#define CV__TRACE_LOCATION_VARNAME(loc_id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_trace_location_, loc_id), __LINE__) +#define CV__TRACE_LOCATION_EXTRA_VARNAME(loc_id) CVAUX_CONCAT(CVAUX_CONCAT(__cv_trace_location_extra_, loc_id) , __LINE__) + +#define CV__TRACE_DEFINE_LOCATION_(loc_id, name, flags) \ + static CV_TRACE_NS::details::Region::LocationExtraData* CV__TRACE_LOCATION_EXTRA_VARNAME(loc_id) = 0; \ + static const CV_TRACE_NS::details::Region::LocationStaticStorage \ + CV__TRACE_LOCATION_VARNAME(loc_id) = { &(CV__TRACE_LOCATION_EXTRA_VARNAME(loc_id)), name, CV_TRACE_FILENAME, __LINE__, flags}; + +#define CV__TRACE_DEFINE_LOCATION_FN(name, flags) CV__TRACE_DEFINE_LOCATION_(fn, name, ((flags) | CV_TRACE_NS::details::REGION_FLAG_FUNCTION)) + + +#define CV__TRACE_OPENCV_FUNCTION() \ + CV__TRACE_DEFINE_LOCATION_FN(CV__TRACE_FUNCTION, 0); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + +#define CV__TRACE_OPENCV_FUNCTION_NAME(name) \ + CV__TRACE_DEFINE_LOCATION_FN(name, 0); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + +#define CV__TRACE_APP_FUNCTION() \ + CV__TRACE_DEFINE_LOCATION_FN(CV__TRACE_FUNCTION, CV_TRACE_NS::details::REGION_FLAG_APP_CODE); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + +#define CV__TRACE_APP_FUNCTION_NAME(name) \ + CV__TRACE_DEFINE_LOCATION_FN(name, CV_TRACE_NS::details::REGION_FLAG_APP_CODE); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + + +#define CV__TRACE_OPENCV_FUNCTION_SKIP_NESTED() \ + CV__TRACE_DEFINE_LOCATION_FN(CV__TRACE_FUNCTION, CV_TRACE_NS::details::REGION_FLAG_SKIP_NESTED); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + +#define CV__TRACE_OPENCV_FUNCTION_NAME_SKIP_NESTED(name) \ + CV__TRACE_DEFINE_LOCATION_FN(name, CV_TRACE_NS::details::REGION_FLAG_SKIP_NESTED); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + +#define CV__TRACE_APP_FUNCTION_SKIP_NESTED() \ + CV__TRACE_DEFINE_LOCATION_FN(CV__TRACE_FUNCTION, CV_TRACE_NS::details::REGION_FLAG_SKIP_NESTED | CV_TRACE_NS::details::REGION_FLAG_APP_CODE); \ + const CV_TRACE_NS::details::Region __region_fn(CV__TRACE_LOCATION_VARNAME(fn)); + + +#define CV__TRACE_REGION_(name_as_static_string_literal, flags) \ + CV__TRACE_DEFINE_LOCATION_(region, name_as_static_string_literal, flags); \ + CV_TRACE_NS::details::Region CVAUX_CONCAT(__region_, __LINE__)(CV__TRACE_LOCATION_VARNAME(region)); + +#define CV__TRACE_REGION(name_as_static_string_literal) CV__TRACE_REGION_(name_as_static_string_literal, 0) +#define CV__TRACE_REGION_NEXT(name_as_static_string_literal) CV__TRACE_REGION_(name_as_static_string_literal, CV_TRACE_NS::details::REGION_FLAG_REGION_NEXT) + +#define CV__TRACE_ARG_VARNAME(arg_id) CVAUX_CONCAT(__cv_trace_arg_ ## arg_id, __LINE__) +#define CV__TRACE_ARG_EXTRA_VARNAME(arg_id) CVAUX_CONCAT(__cv_trace_arg_extra_ ## arg_id, __LINE__) + +#define CV__TRACE_DEFINE_ARG_(arg_id, name, flags) \ + static CV_TRACE_NS::details::TraceArg::ExtraData* CV__TRACE_ARG_EXTRA_VARNAME(arg_id) = 0; \ + static const CV_TRACE_NS::details::TraceArg \ + CV__TRACE_ARG_VARNAME(arg_id) = { &(CV__TRACE_ARG_EXTRA_VARNAME(arg_id)), name, flags }; + +#define CV__TRACE_ARG_VALUE(arg_id, arg_name, value) \ + CV__TRACE_DEFINE_ARG_(arg_id, arg_name, 0); \ + CV_TRACE_NS::details::traceArg((CV__TRACE_ARG_VARNAME(arg_id)), value); + +#define CV__TRACE_ARG(arg_id) CV_TRACE_ARG_VALUE(arg_id, #arg_id, (arg_id)) + +} // namespace + +#ifndef OPENCV_DISABLE_TRACE +#undef CV_TRACE_FUNCTION +#undef CV_TRACE_FUNCTION_SKIP_NESTED +#if __OPENCV_TRACE +#define CV_TRACE_FUNCTION CV__TRACE_OPENCV_FUNCTION +#define CV_TRACE_FUNCTION_SKIP_NESTED CV__TRACE_OPENCV_FUNCTION_SKIP_NESTED +#else +#define CV_TRACE_FUNCTION CV__TRACE_APP_FUNCTION +#define CV_TRACE_FUNCTION_SKIP_NESTED CV__TRACE_APP_FUNCTION_SKIP_NESTED +#endif + +#undef CV_TRACE_REGION +#define CV_TRACE_REGION CV__TRACE_REGION + +#undef CV_TRACE_REGION_NEXT +#define CV_TRACE_REGION_NEXT CV__TRACE_REGION_NEXT + +#undef CV_TRACE_ARG_VALUE +#define CV_TRACE_ARG_VALUE(arg_id, arg_name, value) \ + if (__region_fn.isActive()) \ + { \ + CV__TRACE_ARG_VALUE(arg_id, arg_name, value); \ + } + +#undef CV_TRACE_ARG +#define CV_TRACE_ARG CV__TRACE_ARG + +#endif // OPENCV_DISABLE_TRACE + +#ifdef OPENCV_TRACE_VERBOSE +#define CV_TRACE_FUNCTION_VERBOSE CV_TRACE_FUNCTION +#define CV_TRACE_REGION_VERBOSE CV_TRACE_REGION +#define CV_TRACE_REGION_NEXT_VERBOSE CV_TRACE_REGION_NEXT +#define CV_TRACE_ARG_VALUE_VERBOSE CV_TRACE_ARG_VALUE +#define CV_TRACE_ARG_VERBOSE CV_TRACE_ARG +#else +#define CV_TRACE_FUNCTION_VERBOSE(...) +#define CV_TRACE_REGION_VERBOSE(...) +#define CV_TRACE_REGION_NEXT_VERBOSE(...) +#define CV_TRACE_ARG_VALUE_VERBOSE(...) +#define CV_TRACE_ARG_VERBOSE(...) +#endif + +//! @endcond + +//! @} + +}}} // namespace + +#endif // OPENCV_TRACE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/va_intel.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/va_intel.hpp new file mode 100644 index 00000000..f6654709 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/va_intel.hpp @@ -0,0 +1,78 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +// Copyright (C) 2015, Itseez, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. + +#ifndef OPENCV_CORE_VA_INTEL_HPP +#define OPENCV_CORE_VA_INTEL_HPP + +#ifndef __cplusplus +# error va_intel.hpp header must be compiled as C++ +#endif + +#include "opencv2/core.hpp" +#include "ocl.hpp" + +#if defined(HAVE_VA) +# include "va/va.h" +#else // HAVE_VA +# if !defined(_VA_H_) + typedef void* VADisplay; + typedef unsigned int VASurfaceID; +# endif // !_VA_H_ +#endif // HAVE_VA + +namespace cv { namespace va_intel { + +/** @addtogroup core_va_intel +This section describes Intel VA-API/OpenCL (CL-VA) interoperability. + +To enable CL-VA interoperability support, configure OpenCV using CMake with WITH_VA_INTEL=ON . Currently VA-API is +supported on Linux only. You should also install Intel Media Server Studio (MSS) to use this feature. You may +have to specify the path(s) to MSS components for cmake in environment variables: + +- VA_INTEL_IOCL_ROOT for Intel OpenCL (default is "/opt/intel/opencl"). + +To use CL-VA interoperability you should first create VADisplay (libva), and then call initializeContextFromVA() +function to create OpenCL context and set up interoperability. +*/ +//! @{ + +/////////////////// CL-VA Interoperability Functions /////////////////// + +namespace ocl { +using namespace cv::ocl; + +// TODO static functions in the Context class +/** @brief Creates OpenCL context from VA. +@param display - VADisplay for which CL interop should be established. +@param tryInterop - try to set up for interoperability, if true; set up for use slow copy if false. +@return Returns reference to OpenCL Context + */ +CV_EXPORTS Context& initializeContextFromVA(VADisplay display, bool tryInterop = true); + +} // namespace cv::va_intel::ocl + +/** @brief Converts InputArray to VASurfaceID object. +@param display - VADisplay object. +@param src - source InputArray. +@param surface - destination VASurfaceID object. +@param size - size of image represented by VASurfaceID object. + */ +CV_EXPORTS void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface, Size size); + +/** @brief Converts VASurfaceID object to OutputArray. +@param display - VADisplay object. +@param surface - source VASurfaceID object. +@param size - size of image represented by VASurfaceID object. +@param dst - destination OutputArray. + */ +CV_EXPORTS void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, OutputArray dst); + +//! @} + +}} // namespace cv::va_intel + +#endif /* OPENCV_CORE_VA_INTEL_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/version.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/version.hpp new file mode 100644 index 00000000..d05f5219 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/version.hpp @@ -0,0 +1,26 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_VERSION_HPP +#define OPENCV_VERSION_HPP + +#define CV_VERSION_MAJOR 3 +#define CV_VERSION_MINOR 4 +#define CV_VERSION_REVISION 16 +#define CV_VERSION_STATUS "" + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) + +#define CVAUX_STRW_EXP(__A) L ## #__A +#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A) + +#define CV_VERSION CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION) CV_VERSION_STATUS + +/* old style version constants*/ +#define CV_MAJOR_VERSION CV_VERSION_MAJOR +#define CV_MINOR_VERSION CV_VERSION_MINOR +#define CV_SUBMINOR_VERSION CV_VERSION_REVISION + +#endif // OPENCV_VERSION_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/vsx_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/vsx_utils.hpp new file mode 100644 index 00000000..68863ffb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/vsx_utils.hpp @@ -0,0 +1,1042 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html + +#ifndef OPENCV_HAL_VSX_UTILS_HPP +#define OPENCV_HAL_VSX_UTILS_HPP + +#include "opencv2/core/cvdef.h" + +#ifndef SKIP_INCLUDES +# include +#endif + +//! @addtogroup core_utils_vsx +//! @{ +#if CV_VSX + +#define __VSX_S16__(c, v) (c){v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v} +#define __VSX_S8__(c, v) (c){v, v, v, v, v, v, v, v} +#define __VSX_S4__(c, v) (c){v, v, v, v} +#define __VSX_S2__(c, v) (c){v, v} + +typedef __vector unsigned char vec_uchar16; +#define vec_uchar16_set(...) (vec_uchar16){__VA_ARGS__} +#define vec_uchar16_sp(c) (__VSX_S16__(vec_uchar16, (unsigned char)c)) +#define vec_uchar16_c(v) ((vec_uchar16)(v)) +#define vec_uchar16_z vec_uchar16_sp(0) + +typedef __vector signed char vec_char16; +#define vec_char16_set(...) (vec_char16){__VA_ARGS__} +#define vec_char16_sp(c) (__VSX_S16__(vec_char16, (signed char)c)) +#define vec_char16_c(v) ((vec_char16)(v)) +#define vec_char16_z vec_char16_sp(0) + +typedef __vector unsigned short vec_ushort8; +#define vec_ushort8_set(...) (vec_ushort8){__VA_ARGS__} +#define vec_ushort8_sp(c) (__VSX_S8__(vec_ushort8, (unsigned short)c)) +#define vec_ushort8_c(v) ((vec_ushort8)(v)) +#define vec_ushort8_z vec_ushort8_sp(0) + +typedef __vector signed short vec_short8; +#define vec_short8_set(...) (vec_short8){__VA_ARGS__} +#define vec_short8_sp(c) (__VSX_S8__(vec_short8, (signed short)c)) +#define vec_short8_c(v) ((vec_short8)(v)) +#define vec_short8_z vec_short8_sp(0) + +typedef __vector unsigned int vec_uint4; +#define vec_uint4_set(...) (vec_uint4){__VA_ARGS__} +#define vec_uint4_sp(c) (__VSX_S4__(vec_uint4, (unsigned int)c)) +#define vec_uint4_c(v) ((vec_uint4)(v)) +#define vec_uint4_z vec_uint4_sp(0) + +typedef __vector signed int vec_int4; +#define vec_int4_set(...) (vec_int4){__VA_ARGS__} +#define vec_int4_sp(c) (__VSX_S4__(vec_int4, (signed int)c)) +#define vec_int4_c(v) ((vec_int4)(v)) +#define vec_int4_z vec_int4_sp(0) + +typedef __vector float vec_float4; +#define vec_float4_set(...) (vec_float4){__VA_ARGS__} +#define vec_float4_sp(c) (__VSX_S4__(vec_float4, c)) +#define vec_float4_c(v) ((vec_float4)(v)) +#define vec_float4_z vec_float4_sp(0) + +typedef __vector unsigned long long vec_udword2; +#define vec_udword2_set(...) (vec_udword2){__VA_ARGS__} +#define vec_udword2_sp(c) (__VSX_S2__(vec_udword2, (unsigned long long)c)) +#define vec_udword2_c(v) ((vec_udword2)(v)) +#define vec_udword2_z vec_udword2_sp(0) + +typedef __vector signed long long vec_dword2; +#define vec_dword2_set(...) (vec_dword2){__VA_ARGS__} +#define vec_dword2_sp(c) (__VSX_S2__(vec_dword2, (signed long long)c)) +#define vec_dword2_c(v) ((vec_dword2)(v)) +#define vec_dword2_z vec_dword2_sp(0) + +typedef __vector double vec_double2; +#define vec_double2_set(...) (vec_double2){__VA_ARGS__} +#define vec_double2_c(v) ((vec_double2)(v)) +#define vec_double2_sp(c) (__VSX_S2__(vec_double2, c)) +#define vec_double2_z vec_double2_sp(0) + +#define vec_bchar16 __vector __bool char +#define vec_bchar16_set(...) (vec_bchar16){__VA_ARGS__} +#define vec_bchar16_c(v) ((vec_bchar16)(v)) + +#define vec_bshort8 __vector __bool short +#define vec_bshort8_set(...) (vec_bshort8){__VA_ARGS__} +#define vec_bshort8_c(v) ((vec_bshort8)(v)) + +#define vec_bint4 __vector __bool int +#define vec_bint4_set(...) (vec_bint4){__VA_ARGS__} +#define vec_bint4_c(v) ((vec_bint4)(v)) + +#define vec_bdword2 __vector __bool long long +#define vec_bdword2_set(...) (vec_bdword2){__VA_ARGS__} +#define vec_bdword2_c(v) ((vec_bdword2)(v)) + +#define VSX_FINLINE(tp) extern inline tp __attribute__((always_inline)) + +#define VSX_REDIRECT_1RG(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a) { return fn2(a); } + +#define VSX_REDIRECT_2RG(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a, const rg& b) { return fn2(a, b); } + +/* + * GCC VSX compatibility +**/ +#if defined(__GNUG__) && !defined(__clang__) + +// inline asm helper +#define VSX_IMPL_1RG(rt, rg, opc, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ rt rs; __asm__ __volatile__(#opc" %x0,%x1" : "=wa" (rs) : "wa" (a)); return rs; } + +#define VSX_IMPL_1VRG(rt, rg, opc, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ rt rs; __asm__ __volatile__(#opc" %0,%1" : "=v" (rs) : "v" (a)); return rs; } + +#define VSX_IMPL_2VRG_F(rt, rg, fopc, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a, const rg& b) \ +{ rt rs; __asm__ __volatile__(fopc : "=v" (rs) : "v" (a), "v" (b)); return rs; } + +#define VSX_IMPL_2VRG(rt, rg, opc, fnm) VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%1,%2", fnm) + +#if __GNUG__ < 8 + + // Support for int4 -> dword2 expanding multiply was added in GCC 8. + #ifdef vec_mule + #undef vec_mule + #endif + #ifdef vec_mulo + #undef vec_mulo + #endif + + VSX_REDIRECT_2RG(vec_ushort8, vec_uchar16, vec_mule, __builtin_vec_mule) + VSX_REDIRECT_2RG(vec_short8, vec_char16, vec_mule, __builtin_vec_mule) + VSX_REDIRECT_2RG(vec_int4, vec_short8, vec_mule, __builtin_vec_mule) + VSX_REDIRECT_2RG(vec_uint4, vec_ushort8, vec_mule, __builtin_vec_mule) + VSX_REDIRECT_2RG(vec_ushort8, vec_uchar16, vec_mulo, __builtin_vec_mulo) + VSX_REDIRECT_2RG(vec_short8, vec_char16, vec_mulo, __builtin_vec_mulo) + VSX_REDIRECT_2RG(vec_int4, vec_short8, vec_mulo, __builtin_vec_mulo) + VSX_REDIRECT_2RG(vec_uint4, vec_ushort8, vec_mulo, __builtin_vec_mulo) + + // dword2 support arrived in ISA 2.07 and GCC 8+ + VSX_IMPL_2VRG(vec_dword2, vec_int4, vmulosw, vec_mule) + VSX_IMPL_2VRG(vec_udword2, vec_uint4, vmulouw, vec_mule) + VSX_IMPL_2VRG(vec_dword2, vec_int4, vmulesw, vec_mulo) + VSX_IMPL_2VRG(vec_udword2, vec_uint4, vmuleuw, vec_mulo) + +#endif + +#if __GNUG__ < 7 +// up to GCC 6 vec_mul only supports precisions and llong +# ifdef vec_mul +# undef vec_mul +# endif +/* + * there's no a direct instruction for supporting 8-bit, 16-bit multiplication in ISA 2.07, + * XLC Implement it by using instruction "multiply even", "multiply odd" and "permute" +**/ +# define VSX_IMPL_MULH(Tvec, cperm) \ + VSX_FINLINE(Tvec) vec_mul(const Tvec& a, const Tvec& b) \ + { \ + static const vec_uchar16 ev_od = {cperm}; \ + return vec_perm((Tvec)vec_mule(a, b), (Tvec)vec_mulo(a, b), ev_od); \ + } + #define VSX_IMPL_MULH_P16 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30 + VSX_IMPL_MULH(vec_char16, VSX_IMPL_MULH_P16) + VSX_IMPL_MULH(vec_uchar16, VSX_IMPL_MULH_P16) + #define VSX_IMPL_MULH_P8 0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29 + VSX_IMPL_MULH(vec_short8, VSX_IMPL_MULH_P8) + VSX_IMPL_MULH(vec_ushort8, VSX_IMPL_MULH_P8) + // vmuluwm can be used for unsigned or signed integers, that's what they said + VSX_IMPL_2VRG(vec_int4, vec_int4, vmuluwm, vec_mul) + VSX_IMPL_2VRG(vec_uint4, vec_uint4, vmuluwm, vec_mul) + // redirect to GCC builtin vec_mul, since it already supports precisions and llong + VSX_REDIRECT_2RG(vec_float4, vec_float4, vec_mul, __builtin_vec_mul) + VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mul, __builtin_vec_mul) + VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mul, __builtin_vec_mul) + VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mul, __builtin_vec_mul) +#endif // __GNUG__ < 7 + +#if __GNUG__ < 6 +/* + * Instruction "compare greater than or equal" in ISA 2.07 only supports single + * and double precision. + * In XLC and new versions of GCC implement integers by using instruction "greater than" and NOR. +**/ +# ifdef vec_cmpge +# undef vec_cmpge +# endif +# ifdef vec_cmple +# undef vec_cmple +# endif +# define vec_cmple(a, b) vec_cmpge(b, a) +# define VSX_IMPL_CMPGE(rt, rg, opc, fnm) \ + VSX_IMPL_2VRG_F(rt, rg, #opc" %0,%2,%1\n\t xxlnor %x0,%x0,%x0", fnm) + + VSX_IMPL_CMPGE(vec_bchar16, vec_char16, vcmpgtsb, vec_cmpge) + VSX_IMPL_CMPGE(vec_bchar16, vec_uchar16, vcmpgtub, vec_cmpge) + VSX_IMPL_CMPGE(vec_bshort8, vec_short8, vcmpgtsh, vec_cmpge) + VSX_IMPL_CMPGE(vec_bshort8, vec_ushort8, vcmpgtuh, vec_cmpge) + VSX_IMPL_CMPGE(vec_bint4, vec_int4, vcmpgtsw, vec_cmpge) + VSX_IMPL_CMPGE(vec_bint4, vec_uint4, vcmpgtuw, vec_cmpge) + VSX_IMPL_CMPGE(vec_bdword2, vec_dword2, vcmpgtsd, vec_cmpge) + VSX_IMPL_CMPGE(vec_bdword2, vec_udword2, vcmpgtud, vec_cmpge) + +// redirect to GCC builtin cmpge, since it already supports precisions + VSX_REDIRECT_2RG(vec_bint4, vec_float4, vec_cmpge, __builtin_vec_cmpge) + VSX_REDIRECT_2RG(vec_bdword2, vec_double2, vec_cmpge, __builtin_vec_cmpge) + +// up to gcc5 vec_nor doesn't support bool long long +# undef vec_nor + template + VSX_REDIRECT_2RG(T, T, vec_nor, __builtin_vec_nor) + + VSX_FINLINE(vec_bdword2) vec_nor(const vec_bdword2& a, const vec_bdword2& b) + { return vec_bdword2_c(__builtin_vec_nor(vec_dword2_c(a), vec_dword2_c(b))); } + +// vec_packs doesn't support double words in gcc4 and old versions of gcc5 +# undef vec_packs + VSX_REDIRECT_2RG(vec_char16, vec_short8, vec_packs, __builtin_vec_packs) + VSX_REDIRECT_2RG(vec_uchar16, vec_ushort8, vec_packs, __builtin_vec_packs) + VSX_REDIRECT_2RG(vec_short8, vec_int4, vec_packs, __builtin_vec_packs) + VSX_REDIRECT_2RG(vec_ushort8, vec_uint4, vec_packs, __builtin_vec_packs) + + VSX_IMPL_2VRG_F(vec_int4, vec_dword2, "vpksdss %0,%2,%1", vec_packs) + VSX_IMPL_2VRG_F(vec_uint4, vec_udword2, "vpkudus %0,%2,%1", vec_packs) +#endif // __GNUG__ < 6 + +#if __GNUG__ < 5 +// vec_xxpermdi in gcc4 missing little-endian supports just like clang +# define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1))) +// same as vec_xxpermdi +# undef vec_vbpermq + VSX_IMPL_2VRG(vec_udword2, vec_uchar16, vbpermq, vec_vbpermq) + VSX_IMPL_2VRG(vec_dword2, vec_char16, vbpermq, vec_vbpermq) +#else +# define vec_permi vec_xxpermdi +#endif // __GNUG__ < 5 + +// shift left double by word immediate +#ifndef vec_sldw +# define vec_sldw __builtin_vsx_xxsldwi +#endif + +// vector population count +VSX_IMPL_1VRG(vec_uchar16, vec_uchar16, vpopcntb, vec_popcntu) +VSX_IMPL_1VRG(vec_uchar16, vec_char16, vpopcntb, vec_popcntu) +VSX_IMPL_1VRG(vec_ushort8, vec_ushort8, vpopcnth, vec_popcntu) +VSX_IMPL_1VRG(vec_ushort8, vec_short8, vpopcnth, vec_popcntu) +VSX_IMPL_1VRG(vec_uint4, vec_uint4, vpopcntw, vec_popcntu) +VSX_IMPL_1VRG(vec_uint4, vec_int4, vpopcntw, vec_popcntu) +VSX_IMPL_1VRG(vec_udword2, vec_udword2, vpopcntd, vec_popcntu) +VSX_IMPL_1VRG(vec_udword2, vec_dword2, vpopcntd, vec_popcntu) + +// converts between single and double-precision +VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp) +VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp) + +// converts word and doubleword to double-precision +#undef vec_ctd +VSX_IMPL_1RG(vec_double2, vec_int4, xvcvsxwdp, vec_ctdo) +VSX_IMPL_1RG(vec_double2, vec_uint4, xvcvuxwdp, vec_ctdo) +VSX_IMPL_1RG(vec_double2, vec_dword2, xvcvsxddp, vec_ctd) +VSX_IMPL_1RG(vec_double2, vec_udword2, xvcvuxddp, vec_ctd) + +// converts word and doubleword to single-precision +#undef vec_ctf +VSX_IMPL_1RG(vec_float4, vec_int4, xvcvsxwsp, vec_ctf) +VSX_IMPL_1RG(vec_float4, vec_uint4, xvcvuxwsp, vec_ctf) +VSX_IMPL_1RG(vec_float4, vec_dword2, xvcvsxdsp, vec_ctfo) +VSX_IMPL_1RG(vec_float4, vec_udword2, xvcvuxdsp, vec_ctfo) + +// converts single and double precision to signed word +#undef vec_cts +VSX_IMPL_1RG(vec_int4, vec_double2, xvcvdpsxws, vec_ctso) +VSX_IMPL_1RG(vec_int4, vec_float4, xvcvspsxws, vec_cts) + +// converts single and double precision to unsigned word +#undef vec_ctu +VSX_IMPL_1RG(vec_uint4, vec_double2, xvcvdpuxws, vec_ctuo) +VSX_IMPL_1RG(vec_uint4, vec_float4, xvcvspuxws, vec_ctu) + +// converts single and double precision to signed doubleword +#undef vec_ctsl +VSX_IMPL_1RG(vec_dword2, vec_double2, xvcvdpsxds, vec_ctsl) +VSX_IMPL_1RG(vec_dword2, vec_float4, xvcvspsxds, vec_ctslo) + +// converts single and double precision to unsigned doubleword +#undef vec_ctul +VSX_IMPL_1RG(vec_udword2, vec_double2, xvcvdpuxds, vec_ctul) +VSX_IMPL_1RG(vec_udword2, vec_float4, xvcvspuxds, vec_ctulo) + +// just in case if GCC doesn't define it +#ifndef vec_xl +# define vec_xl vec_vsx_ld +# define vec_xst vec_vsx_st +#endif + +#endif // GCC VSX compatibility + +/* + * CLANG VSX compatibility +**/ +#if defined(__clang__) && !defined(__IBMCPP__) + +/* + * CLANG doesn't support %x in the inline asm template which fixes register number + * when using any of the register constraints wa, wd, wf + * + * For more explanation checkout PowerPC and IBM RS6000 in https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + * Also there's already an open bug https://bugs.llvm.org/show_bug.cgi?id=31837 + * + * So we're not able to use inline asm and only use built-in functions that CLANG supports + * and use __builtin_convertvector if clang missing any of vector conversions built-in functions + * + * todo: clang asm template bug is fixed, need to reconsider the current workarounds. +*/ + +// convert vector helper +#define VSX_IMPL_CONVERT(rt, rg, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a) { return __builtin_convertvector(a, rt); } + +#if __clang_major__ < 5 +// implement vec_permi in a dirty way +# define VSX_IMPL_CLANG_4_PERMI(Tvec) \ + VSX_FINLINE(Tvec) vec_permi(const Tvec& a, const Tvec& b, unsigned const char c) \ + { \ + switch (c) \ + { \ + case 0: \ + return vec_mergeh(a, b); \ + case 1: \ + return vec_mergel(vec_mergeh(a, a), b); \ + case 2: \ + return vec_mergeh(vec_mergel(a, a), b); \ + default: \ + return vec_mergel(a, b); \ + } \ + } + VSX_IMPL_CLANG_4_PERMI(vec_udword2) + VSX_IMPL_CLANG_4_PERMI(vec_dword2) + VSX_IMPL_CLANG_4_PERMI(vec_double2) + +// vec_xxsldwi is missing in clang 4 +# define vec_xxsldwi(a, b, c) vec_sld(a, b, (c) * 4) +#else +// vec_xxpermdi is missing little-endian supports in clang 4 just like gcc4 +# define vec_permi(a, b, c) vec_xxpermdi(b, a, (3 ^ (((c) & 1) << 1 | (c) >> 1))) +#endif // __clang_major__ < 5 + +// shift left double by word immediate +#ifndef vec_sldw +# define vec_sldw vec_xxsldwi +#endif + +// Implement vec_rsqrt since clang only supports vec_rsqrte +#ifndef vec_rsqrt + VSX_FINLINE(vec_float4) vec_rsqrt(const vec_float4& a) + { return vec_div(vec_float4_sp(1), vec_sqrt(a)); } + + VSX_FINLINE(vec_double2) vec_rsqrt(const vec_double2& a) + { return vec_div(vec_double2_sp(1), vec_sqrt(a)); } +#endif + +// vec_promote missing support for doubleword +VSX_FINLINE(vec_dword2) vec_promote(long long a, int b) +{ + vec_dword2 ret = vec_dword2_z; + ret[b & 1] = a; + return ret; +} + +VSX_FINLINE(vec_udword2) vec_promote(unsigned long long a, int b) +{ + vec_udword2 ret = vec_udword2_z; + ret[b & 1] = a; + return ret; +} + +// vec_popcnt should return unsigned but clang has different thought just like gcc in vec_vpopcnt +#define VSX_IMPL_POPCNTU(Tvec, Tvec2, ucast) \ +VSX_FINLINE(Tvec) vec_popcntu(const Tvec2& a) \ +{ return ucast(vec_popcnt(a)); } +VSX_IMPL_POPCNTU(vec_uchar16, vec_char16, vec_uchar16_c); +VSX_IMPL_POPCNTU(vec_ushort8, vec_short8, vec_ushort8_c); +VSX_IMPL_POPCNTU(vec_uint4, vec_int4, vec_uint4_c); +VSX_IMPL_POPCNTU(vec_udword2, vec_dword2, vec_udword2_c); +// redirect unsigned types +VSX_REDIRECT_1RG(vec_uchar16, vec_uchar16, vec_popcntu, vec_popcnt) +VSX_REDIRECT_1RG(vec_ushort8, vec_ushort8, vec_popcntu, vec_popcnt) +VSX_REDIRECT_1RG(vec_uint4, vec_uint4, vec_popcntu, vec_popcnt) +VSX_REDIRECT_1RG(vec_udword2, vec_udword2, vec_popcntu, vec_popcnt) + +// converts between single and double precision +VSX_REDIRECT_1RG(vec_float4, vec_double2, vec_cvfo, __builtin_vsx_xvcvdpsp) +VSX_REDIRECT_1RG(vec_double2, vec_float4, vec_cvfo, __builtin_vsx_xvcvspdp) + +// converts word and doubleword to double-precision +#ifdef vec_ctd +# undef vec_ctd +#endif +VSX_REDIRECT_1RG(vec_double2, vec_int4, vec_ctdo, __builtin_vsx_xvcvsxwdp) +VSX_REDIRECT_1RG(vec_double2, vec_uint4, vec_ctdo, __builtin_vsx_xvcvuxwdp) + +VSX_IMPL_CONVERT(vec_double2, vec_dword2, vec_ctd) +VSX_IMPL_CONVERT(vec_double2, vec_udword2, vec_ctd) + +// converts word and doubleword to single-precision +#if __clang_major__ > 4 +# undef vec_ctf +#endif +VSX_IMPL_CONVERT(vec_float4, vec_int4, vec_ctf) +VSX_IMPL_CONVERT(vec_float4, vec_uint4, vec_ctf) +VSX_REDIRECT_1RG(vec_float4, vec_dword2, vec_ctfo, __builtin_vsx_xvcvsxdsp) +VSX_REDIRECT_1RG(vec_float4, vec_udword2, vec_ctfo, __builtin_vsx_xvcvuxdsp) + +// converts single and double precision to signed word +#if __clang_major__ > 4 +# undef vec_cts +#endif +VSX_REDIRECT_1RG(vec_int4, vec_double2, vec_ctso, __builtin_vsx_xvcvdpsxws) +VSX_IMPL_CONVERT(vec_int4, vec_float4, vec_cts) + +// converts single and double precision to unsigned word +#if __clang_major__ > 4 +# undef vec_ctu +#endif +VSX_REDIRECT_1RG(vec_uint4, vec_double2, vec_ctuo, __builtin_vsx_xvcvdpuxws) +VSX_IMPL_CONVERT(vec_uint4, vec_float4, vec_ctu) + +// converts single and double precision to signed doubleword +#ifdef vec_ctsl +# undef vec_ctsl +#endif +VSX_IMPL_CONVERT(vec_dword2, vec_double2, vec_ctsl) +// __builtin_convertvector unable to convert, xvcvspsxds is missing on it +VSX_FINLINE(vec_dword2) vec_ctslo(const vec_float4& a) +{ return vec_ctsl(vec_cvfo(a)); } + +// converts single and double precision to unsigned doubleword +#ifdef vec_ctul +# undef vec_ctul +#endif +VSX_IMPL_CONVERT(vec_udword2, vec_double2, vec_ctul) +// __builtin_convertvector unable to convert, xvcvspuxds is missing on it +VSX_FINLINE(vec_udword2) vec_ctulo(const vec_float4& a) +{ return vec_ctul(vec_cvfo(a)); } + +#endif // CLANG VSX compatibility + +/* + * Common GCC, CLANG compatibility +**/ +#if defined(__GNUG__) && !defined(__IBMCPP__) + +#ifdef vec_cvf +# undef vec_cvf +#endif + +#define VSX_IMPL_CONV_EVEN_4_2(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ return fn2(vec_sldw(a, a, 1)); } + +VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_float4, vec_cvf, vec_cvfo) +VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_int4, vec_ctd, vec_ctdo) +VSX_IMPL_CONV_EVEN_4_2(vec_double2, vec_uint4, vec_ctd, vec_ctdo) + +VSX_IMPL_CONV_EVEN_4_2(vec_dword2, vec_float4, vec_ctsl, vec_ctslo) +VSX_IMPL_CONV_EVEN_4_2(vec_udword2, vec_float4, vec_ctul, vec_ctulo) + +#define VSX_IMPL_CONV_EVEN_2_4(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ \ + rt v4 = fn2(a); \ + return vec_sldw(v4, v4, 3); \ +} + +VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_double2, vec_cvf, vec_cvfo) +VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_dword2, vec_ctf, vec_ctfo) +VSX_IMPL_CONV_EVEN_2_4(vec_float4, vec_udword2, vec_ctf, vec_ctfo) + +VSX_IMPL_CONV_EVEN_2_4(vec_int4, vec_double2, vec_cts, vec_ctso) +VSX_IMPL_CONV_EVEN_2_4(vec_uint4, vec_double2, vec_ctu, vec_ctuo) + +// Only for Eigen! +/* + * changing behavior of conversion intrinsics for gcc has effect on Eigen + * so we redefine old behavior again only on gcc, clang +*/ +#if !defined(__clang__) || __clang_major__ > 4 + // ignoring second arg since Eigen only truncates toward zero +# define VSX_IMPL_CONV_2VARIANT(rt, rg, fnm, fn2) \ + VSX_FINLINE(rt) fnm(const rg& a, int only_truncate) \ + { \ + assert(only_truncate == 0); \ + CV_UNUSED(only_truncate); \ + return fn2(a); \ + } + VSX_IMPL_CONV_2VARIANT(vec_int4, vec_float4, vec_cts, vec_cts) + VSX_IMPL_CONV_2VARIANT(vec_uint4, vec_float4, vec_ctu, vec_ctu) + VSX_IMPL_CONV_2VARIANT(vec_float4, vec_int4, vec_ctf, vec_ctf) + VSX_IMPL_CONV_2VARIANT(vec_float4, vec_uint4, vec_ctf, vec_ctf) + // define vec_cts for converting double precision to signed doubleword + // which isn't compatible with xlc but its okay since Eigen only uses it for gcc + VSX_IMPL_CONV_2VARIANT(vec_dword2, vec_double2, vec_cts, vec_ctsl) +#endif // Eigen + +#endif // Common GCC, CLANG compatibility + +/* + * XLC VSX compatibility +**/ +#if defined(__IBMCPP__) + +// vector population count +#define vec_popcntu vec_popcnt + +// overload and redirect with setting second arg to zero +// since we only support conversions without the second arg +#define VSX_IMPL_OVERLOAD_Z2(rt, rg, fnm) \ +VSX_FINLINE(rt) fnm(const rg& a) { return fnm(a, 0); } + +VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_int4, vec_ctd) +VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_uint4, vec_ctd) +VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_dword2, vec_ctd) +VSX_IMPL_OVERLOAD_Z2(vec_double2, vec_udword2, vec_ctd) + +VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_int4, vec_ctf) +VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_uint4, vec_ctf) +VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_dword2, vec_ctf) +VSX_IMPL_OVERLOAD_Z2(vec_float4, vec_udword2, vec_ctf) + +VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_double2, vec_cts) +VSX_IMPL_OVERLOAD_Z2(vec_int4, vec_float4, vec_cts) + +VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_double2, vec_ctu) +VSX_IMPL_OVERLOAD_Z2(vec_uint4, vec_float4, vec_ctu) + +VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_double2, vec_ctsl) +VSX_IMPL_OVERLOAD_Z2(vec_dword2, vec_float4, vec_ctsl) + +VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_double2, vec_ctul) +VSX_IMPL_OVERLOAD_Z2(vec_udword2, vec_float4, vec_ctul) + +// fixme: implement conversions of odd-numbered elements in a dirty way +// since xlc doesn't support VSX registers operand in inline asm. +#define VSX_IMPL_CONV_ODD_4_2(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a) { return fn2(vec_sldw(a, a, 3)); } + +VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_float4, vec_cvfo, vec_cvf) +VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_int4, vec_ctdo, vec_ctd) +VSX_IMPL_CONV_ODD_4_2(vec_double2, vec_uint4, vec_ctdo, vec_ctd) + +VSX_IMPL_CONV_ODD_4_2(vec_dword2, vec_float4, vec_ctslo, vec_ctsl) +VSX_IMPL_CONV_ODD_4_2(vec_udword2, vec_float4, vec_ctulo, vec_ctul) + +#define VSX_IMPL_CONV_ODD_2_4(rt, rg, fnm, fn2) \ +VSX_FINLINE(rt) fnm(const rg& a) \ +{ \ + rt v4 = fn2(a); \ + return vec_sldw(v4, v4, 1); \ +} + +VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_double2, vec_cvfo, vec_cvf) +VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_dword2, vec_ctfo, vec_ctf) +VSX_IMPL_CONV_ODD_2_4(vec_float4, vec_udword2, vec_ctfo, vec_ctf) + +VSX_IMPL_CONV_ODD_2_4(vec_int4, vec_double2, vec_ctso, vec_cts) +VSX_IMPL_CONV_ODD_2_4(vec_uint4, vec_double2, vec_ctuo, vec_ctu) + +#endif // XLC VSX compatibility + +// ignore GCC warning that caused by -Wunused-but-set-variable in rare cases +#if defined(__GNUG__) && !defined(__clang__) +# define VSX_UNUSED(Tvec) Tvec __attribute__((__unused__)) +#else // CLANG, XLC +# define VSX_UNUSED(Tvec) Tvec +#endif + +// gcc can find his way in casting log int and XLC, CLANG ambiguous +#if defined(__clang__) || defined(__IBMCPP__) + VSX_FINLINE(vec_udword2) vec_splats(uint64 v) + { return vec_splats((unsigned long long) v); } + + VSX_FINLINE(vec_dword2) vec_splats(int64 v) + { return vec_splats((long long) v); } + + VSX_FINLINE(vec_udword2) vec_promote(uint64 a, int b) + { return vec_promote((unsigned long long) a, b); } + + VSX_FINLINE(vec_dword2) vec_promote(int64 a, int b) + { return vec_promote((long long) a, b); } +#endif + +/* + * implement vsx_ld(offset, pointer), vsx_st(vector, offset, pointer) + * load and set using offset depend on the pointer type + * + * implement vsx_ldf(offset, pointer), vsx_stf(vector, offset, pointer) + * load and set using offset depend on fixed bytes size + * + * Note: In clang vec_xl and vec_xst fails to load unaligned addresses + * so we are using vec_vsx_ld, vec_vsx_st instead +*/ + +#if defined(__clang__) && !defined(__IBMCPP__) +# define vsx_ldf vec_vsx_ld +# define vsx_stf vec_vsx_st +#else // GCC , XLC +# define vsx_ldf vec_xl +# define vsx_stf vec_xst +#endif + +#define VSX_OFFSET(o, p) ((o) * sizeof(*(p))) +#define vsx_ld(o, p) vsx_ldf(VSX_OFFSET(o, p), p) +#define vsx_st(v, o, p) vsx_stf(v, VSX_OFFSET(o, p), p) + +/* + * implement vsx_ld2(offset, pointer), vsx_st2(vector, offset, pointer) to load and store double words + * In GCC vec_xl and vec_xst it maps to vec_vsx_ld, vec_vsx_st which doesn't support long long + * and in CLANG we are using vec_vsx_ld, vec_vsx_st because vec_xl, vec_xst fails to load unaligned addresses + * + * In XLC vec_xl and vec_xst fail to cast int64(long int) to long long +*/ +#if (defined(__GNUG__) || defined(__clang__)) && !defined(__IBMCPP__) + VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p) + { return vec_udword2_c(vsx_ldf(VSX_OFFSET(o, p), (unsigned int*)p)); } + + VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p) + { return vec_dword2_c(vsx_ldf(VSX_OFFSET(o, p), (int*)p)); } + + VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p) + { vsx_stf(vec_uint4_c(vec), VSX_OFFSET(o, p), (unsigned int*)p); } + + VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p) + { vsx_stf(vec_int4_c(vec), VSX_OFFSET(o, p), (int*)p); } +#else // XLC + VSX_FINLINE(vec_udword2) vsx_ld2(long o, const uint64* p) + { return vsx_ldf(VSX_OFFSET(o, p), (unsigned long long*)p); } + + VSX_FINLINE(vec_dword2) vsx_ld2(long o, const int64* p) + { return vsx_ldf(VSX_OFFSET(o, p), (long long*)p); } + + VSX_FINLINE(void) vsx_st2(const vec_udword2& vec, long o, uint64* p) + { vsx_stf(vec, VSX_OFFSET(o, p), (unsigned long long*)p); } + + VSX_FINLINE(void) vsx_st2(const vec_dword2& vec, long o, int64* p) + { vsx_stf(vec, VSX_OFFSET(o, p), (long long*)p); } +#endif + +// Store lower 8 byte +#define vec_st_l8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 0) + +// Store higher 8 byte +#define vec_st_h8(v, p) *((uint64*)(p)) = vec_extract(vec_udword2_c(v), 1) + +// Load 64-bits of integer data to lower part +#define VSX_IMPL_LOAD_L8(Tvec, Tp) \ +VSX_FINLINE(Tvec) vec_ld_l8(const Tp *p) \ +{ return ((Tvec)vec_promote(*((uint64*)p), 0)); } + +VSX_IMPL_LOAD_L8(vec_uchar16, uchar) +VSX_IMPL_LOAD_L8(vec_char16, schar) +VSX_IMPL_LOAD_L8(vec_ushort8, ushort) +VSX_IMPL_LOAD_L8(vec_short8, short) +VSX_IMPL_LOAD_L8(vec_uint4, uint) +VSX_IMPL_LOAD_L8(vec_int4, int) +VSX_IMPL_LOAD_L8(vec_float4, float) +VSX_IMPL_LOAD_L8(vec_udword2, uint64) +VSX_IMPL_LOAD_L8(vec_dword2, int64) +VSX_IMPL_LOAD_L8(vec_double2, double) + +// logical not +#define vec_not(a) vec_nor(a, a) + +// power9 yaya +// not equal +#ifndef vec_cmpne +# define vec_cmpne(a, b) vec_not(vec_cmpeq(a, b)) +#endif + +// absolute difference +#ifndef vec_absd +# define vec_absd(a, b) vec_sub(vec_max(a, b), vec_min(a, b)) +#endif + +/* + * Implement vec_unpacklu and vec_unpackhu + * since vec_unpackl, vec_unpackh only support signed integers +**/ +#define VSX_IMPL_UNPACKU(rt, rg, zero) \ +VSX_FINLINE(rt) vec_unpacklu(const rg& a) \ +{ return (rt)(vec_mergel(a, zero)); } \ +VSX_FINLINE(rt) vec_unpackhu(const rg& a) \ +{ return (rt)(vec_mergeh(a, zero)); } + +VSX_IMPL_UNPACKU(vec_ushort8, vec_uchar16, vec_uchar16_z) +VSX_IMPL_UNPACKU(vec_uint4, vec_ushort8, vec_ushort8_z) +VSX_IMPL_UNPACKU(vec_udword2, vec_uint4, vec_uint4_z) + +/* + * Implement vec_mergesqe and vec_mergesqo + * Merges the sequence values of even and odd elements of two vectors +*/ +#define VSX_IMPL_PERM(rt, fnm, ...) \ +VSX_FINLINE(rt) fnm(const rt& a, const rt& b) \ +{ static const vec_uchar16 perm = {__VA_ARGS__}; return vec_perm(a, b, perm); } + +// 16 +#define perm16_mergesqe 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 +#define perm16_mergesqo 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 +VSX_IMPL_PERM(vec_uchar16, vec_mergesqe, perm16_mergesqe) +VSX_IMPL_PERM(vec_uchar16, vec_mergesqo, perm16_mergesqo) +VSX_IMPL_PERM(vec_char16, vec_mergesqe, perm16_mergesqe) +VSX_IMPL_PERM(vec_char16, vec_mergesqo, perm16_mergesqo) +// 8 +#define perm8_mergesqe 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 +#define perm8_mergesqo 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 +VSX_IMPL_PERM(vec_ushort8, vec_mergesqe, perm8_mergesqe) +VSX_IMPL_PERM(vec_ushort8, vec_mergesqo, perm8_mergesqo) +VSX_IMPL_PERM(vec_short8, vec_mergesqe, perm8_mergesqe) +VSX_IMPL_PERM(vec_short8, vec_mergesqo, perm8_mergesqo) +// 4 +#define perm4_mergesqe 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 +#define perm4_mergesqo 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 +VSX_IMPL_PERM(vec_uint4, vec_mergesqe, perm4_mergesqe) +VSX_IMPL_PERM(vec_uint4, vec_mergesqo, perm4_mergesqo) +VSX_IMPL_PERM(vec_int4, vec_mergesqe, perm4_mergesqe) +VSX_IMPL_PERM(vec_int4, vec_mergesqo, perm4_mergesqo) +VSX_IMPL_PERM(vec_float4, vec_mergesqe, perm4_mergesqe) +VSX_IMPL_PERM(vec_float4, vec_mergesqo, perm4_mergesqo) +// 2 +VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqe, vec_mergeh) +VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqo, vec_mergel) +VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqe, vec_mergeh) +VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqo, vec_mergel) +VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqe, vec_mergeh) +VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqo, vec_mergel) + +/* + * Implement vec_mergesqh and vec_mergesql + * Merges the sequence most and least significant halves of two vectors +*/ +#define VSX_IMPL_MERGESQHL(Tvec) \ +VSX_FINLINE(Tvec) vec_mergesqh(const Tvec& a, const Tvec& b) \ +{ return (Tvec)vec_mergeh(vec_udword2_c(a), vec_udword2_c(b)); } \ +VSX_FINLINE(Tvec) vec_mergesql(const Tvec& a, const Tvec& b) \ +{ return (Tvec)vec_mergel(vec_udword2_c(a), vec_udword2_c(b)); } +VSX_IMPL_MERGESQHL(vec_uchar16) +VSX_IMPL_MERGESQHL(vec_char16) +VSX_IMPL_MERGESQHL(vec_ushort8) +VSX_IMPL_MERGESQHL(vec_short8) +VSX_IMPL_MERGESQHL(vec_uint4) +VSX_IMPL_MERGESQHL(vec_int4) +VSX_IMPL_MERGESQHL(vec_float4) +VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesqh, vec_mergeh) +VSX_REDIRECT_2RG(vec_udword2, vec_udword2, vec_mergesql, vec_mergel) +VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesqh, vec_mergeh) +VSX_REDIRECT_2RG(vec_dword2, vec_dword2, vec_mergesql, vec_mergel) +VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesqh, vec_mergeh) +VSX_REDIRECT_2RG(vec_double2, vec_double2, vec_mergesql, vec_mergel) + + +// 2 and 4 channels interleave for all types except 2 lanes +#define VSX_IMPL_ST_INTERLEAVE(Tp, Tvec) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \ +{ \ + vsx_stf(vec_mergeh(a, b), 0, ptr); \ + vsx_stf(vec_mergel(a, b), 16, ptr); \ +} \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, const Tvec& d, Tp* ptr) \ +{ \ + Tvec ac = vec_mergeh(a, c); \ + Tvec bd = vec_mergeh(b, d); \ + vsx_stf(vec_mergeh(ac, bd), 0, ptr); \ + vsx_stf(vec_mergel(ac, bd), 16, ptr); \ + ac = vec_mergel(a, c); \ + bd = vec_mergel(b, d); \ + vsx_stf(vec_mergeh(ac, bd), 32, ptr); \ + vsx_stf(vec_mergel(ac, bd), 48, ptr); \ +} +VSX_IMPL_ST_INTERLEAVE(uchar, vec_uchar16) +VSX_IMPL_ST_INTERLEAVE(schar, vec_char16) +VSX_IMPL_ST_INTERLEAVE(ushort, vec_ushort8) +VSX_IMPL_ST_INTERLEAVE(short, vec_short8) +VSX_IMPL_ST_INTERLEAVE(uint, vec_uint4) +VSX_IMPL_ST_INTERLEAVE(int, vec_int4) +VSX_IMPL_ST_INTERLEAVE(float, vec_float4) + +// 2 and 4 channels deinterleave for 16 lanes +#define VSX_IMPL_ST_DINTERLEAVE_8(Tp, Tvec) \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \ +{ \ + Tvec v0 = vsx_ld(0, ptr); \ + Tvec v1 = vsx_ld(16, ptr); \ + a = vec_mergesqe(v0, v1); \ + b = vec_mergesqo(v0, v1); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \ + Tvec& c, Tvec& d) \ +{ \ + Tvec v0 = vsx_ld(0, ptr); \ + Tvec v1 = vsx_ld(16, ptr); \ + Tvec v2 = vsx_ld(32, ptr); \ + Tvec v3 = vsx_ld(48, ptr); \ + Tvec m0 = vec_mergesqe(v0, v1); \ + Tvec m1 = vec_mergesqe(v2, v3); \ + a = vec_mergesqe(m0, m1); \ + c = vec_mergesqo(m0, m1); \ + m0 = vec_mergesqo(v0, v1); \ + m1 = vec_mergesqo(v2, v3); \ + b = vec_mergesqe(m0, m1); \ + d = vec_mergesqo(m0, m1); \ +} +VSX_IMPL_ST_DINTERLEAVE_8(uchar, vec_uchar16) +VSX_IMPL_ST_DINTERLEAVE_8(schar, vec_char16) + +// 2 and 4 channels deinterleave for 8 lanes +#define VSX_IMPL_ST_DINTERLEAVE_16(Tp, Tvec) \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \ +{ \ + Tvec v0 = vsx_ld(0, ptr); \ + Tvec v1 = vsx_ld(8, ptr); \ + a = vec_mergesqe(v0, v1); \ + b = vec_mergesqo(v0, v1); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \ + Tvec& c, Tvec& d) \ +{ \ + Tvec v0 = vsx_ld(0, ptr); \ + Tvec v1 = vsx_ld(8, ptr); \ + Tvec m0 = vec_mergeh(v0, v1); \ + Tvec m1 = vec_mergel(v0, v1); \ + Tvec ab0 = vec_mergeh(m0, m1); \ + Tvec cd0 = vec_mergel(m0, m1); \ + v0 = vsx_ld(16, ptr); \ + v1 = vsx_ld(24, ptr); \ + m0 = vec_mergeh(v0, v1); \ + m1 = vec_mergel(v0, v1); \ + Tvec ab1 = vec_mergeh(m0, m1); \ + Tvec cd1 = vec_mergel(m0, m1); \ + a = vec_mergesqh(ab0, ab1); \ + b = vec_mergesql(ab0, ab1); \ + c = vec_mergesqh(cd0, cd1); \ + d = vec_mergesql(cd0, cd1); \ +} +VSX_IMPL_ST_DINTERLEAVE_16(ushort, vec_ushort8) +VSX_IMPL_ST_DINTERLEAVE_16(short, vec_short8) + +// 2 and 4 channels deinterleave for 4 lanes +#define VSX_IMPL_ST_DINTERLEAVE_32(Tp, Tvec) \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \ +{ \ + a = vsx_ld(0, ptr); \ + b = vsx_ld(4, ptr); \ + Tvec m0 = vec_mergeh(a, b); \ + Tvec m1 = vec_mergel(a, b); \ + a = vec_mergeh(m0, m1); \ + b = vec_mergel(m0, m1); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \ + Tvec& c, Tvec& d) \ +{ \ + Tvec v0 = vsx_ld(0, ptr); \ + Tvec v1 = vsx_ld(4, ptr); \ + Tvec v2 = vsx_ld(8, ptr); \ + Tvec v3 = vsx_ld(12, ptr); \ + Tvec m0 = vec_mergeh(v0, v2); \ + Tvec m1 = vec_mergeh(v1, v3); \ + a = vec_mergeh(m0, m1); \ + b = vec_mergel(m0, m1); \ + m0 = vec_mergel(v0, v2); \ + m1 = vec_mergel(v1, v3); \ + c = vec_mergeh(m0, m1); \ + d = vec_mergel(m0, m1); \ +} +VSX_IMPL_ST_DINTERLEAVE_32(uint, vec_uint4) +VSX_IMPL_ST_DINTERLEAVE_32(int, vec_int4) +VSX_IMPL_ST_DINTERLEAVE_32(float, vec_float4) + +// 2 and 4 channels interleave and deinterleave for 2 lanes +#define VSX_IMPL_ST_D_INTERLEAVE_64(Tp, Tvec, ld_func, st_func) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, Tp* ptr) \ +{ \ + st_func(vec_mergeh(a, b), 0, ptr); \ + st_func(vec_mergel(a, b), 2, ptr); \ +} \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, const Tvec& d, Tp* ptr) \ +{ \ + st_func(vec_mergeh(a, b), 0, ptr); \ + st_func(vec_mergeh(c, d), 2, ptr); \ + st_func(vec_mergel(a, b), 4, ptr); \ + st_func(vec_mergel(c, d), 6, ptr); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b) \ +{ \ + Tvec m0 = ld_func(0, ptr); \ + Tvec m1 = ld_func(2, ptr); \ + a = vec_mergeh(m0, m1); \ + b = vec_mergel(m0, m1); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, \ + Tvec& c, Tvec& d) \ +{ \ + Tvec v0 = ld_func(0, ptr); \ + Tvec v1 = ld_func(2, ptr); \ + Tvec v2 = ld_func(4, ptr); \ + Tvec v3 = ld_func(6, ptr); \ + a = vec_mergeh(v0, v2); \ + b = vec_mergel(v0, v2); \ + c = vec_mergeh(v1, v3); \ + d = vec_mergel(v1, v3); \ +} +VSX_IMPL_ST_D_INTERLEAVE_64(int64, vec_dword2, vsx_ld2, vsx_st2) +VSX_IMPL_ST_D_INTERLEAVE_64(uint64, vec_udword2, vsx_ld2, vsx_st2) +VSX_IMPL_ST_D_INTERLEAVE_64(double, vec_double2, vsx_ld, vsx_st) + +/* 3 channels */ +#define VSX_IMPL_ST_INTERLEAVE_3CH_16(Tp, Tvec) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, Tp* ptr) \ +{ \ + static const vec_uchar16 a12 = {0, 16, 0, 1, 17, 0, 2, 18, 0, 3, 19, 0, 4, 20, 0, 5}; \ + static const vec_uchar16 a123 = {0, 1, 16, 3, 4, 17, 6, 7, 18, 9, 10, 19, 12, 13, 20, 15}; \ + vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \ + static const vec_uchar16 b12 = {21, 0, 6, 22, 0, 7, 23, 0, 8, 24, 0, 9, 25, 0, 10, 26}; \ + static const vec_uchar16 b123 = {0, 21, 2, 3, 22, 5, 6, 23, 8, 9, 24, 11, 12, 25, 14, 15}; \ + vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 16, ptr); \ + static const vec_uchar16 c12 = {0, 11, 27, 0, 12, 28, 0, 13, 29, 0, 14, 30, 0, 15, 31, 0}; \ + static const vec_uchar16 c123 = {26, 1, 2, 27, 4, 5, 28, 7, 8, 29, 10, 11, 30, 13, 14, 31}; \ + vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 32, ptr); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \ +{ \ + Tvec v1 = vsx_ld(0, ptr); \ + Tvec v2 = vsx_ld(16, ptr); \ + Tvec v3 = vsx_ld(32, ptr); \ + static const vec_uchar16 a12_perm = {0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 0, 0, 0, 0, 0}; \ + static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 20, 23, 26, 29}; \ + a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \ + static const vec_uchar16 b12_perm = {1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 0, 0, 0, 0, 0}; \ + static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 18, 21, 24, 27, 30}; \ + b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \ + static const vec_uchar16 c12_perm = {2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 0, 0, 0, 0, 0, 0}; \ + static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 19, 22, 25, 28, 31}; \ + c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \ +} +VSX_IMPL_ST_INTERLEAVE_3CH_16(uchar, vec_uchar16) +VSX_IMPL_ST_INTERLEAVE_3CH_16(schar, vec_char16) + +#define VSX_IMPL_ST_INTERLEAVE_3CH_8(Tp, Tvec) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, Tp* ptr) \ +{ \ + static const vec_uchar16 a12 = {0, 1, 16, 17, 0, 0, 2, 3, 18, 19, 0, 0, 4, 5, 20, 21}; \ + static const vec_uchar16 a123 = {0, 1, 2, 3, 16, 17, 6, 7, 8, 9, 18, 19, 12, 13, 14, 15}; \ + vsx_st(vec_perm(vec_perm(a, b, a12), c, a123), 0, ptr); \ + static const vec_uchar16 b12 = {0, 0, 6, 7, 22, 23, 0, 0, 8, 9, 24, 25, 0, 0, 10, 11}; \ + static const vec_uchar16 b123 = {20, 21, 2, 3, 4, 5, 22, 23, 8, 9, 10, 11, 24, 25, 14, 15}; \ + vsx_st(vec_perm(vec_perm(a, b, b12), c, b123), 8, ptr); \ + static const vec_uchar16 c12 = {26, 27, 0, 0, 12, 13, 28, 29, 0, 0, 14, 15, 30, 31, 0, 0}; \ + static const vec_uchar16 c123 = {0, 1, 26, 27, 4, 5, 6, 7, 28, 29, 10, 11, 12, 13, 30, 31}; \ + vsx_st(vec_perm(vec_perm(a, b, c12), c, c123), 16, ptr); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \ +{ \ + Tvec v1 = vsx_ld(0, ptr); \ + Tvec v2 = vsx_ld(8, ptr); \ + Tvec v3 = vsx_ld(16, ptr); \ + static const vec_uchar16 a12_perm = {0, 1, 6, 7, 12, 13, 18, 19, 24, 25, 30, 31, 0, 0, 0, 0}; \ + static const vec_uchar16 a123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 21, 26, 27}; \ + a = vec_perm(vec_perm(v1, v2, a12_perm), v3, a123_perm); \ + static const vec_uchar16 b12_perm = {2, 3, 8, 9, 14, 15, 20, 21, 26, 27, 0, 0, 0, 0, 0, 0}; \ + static const vec_uchar16 b123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 22, 23, 28, 29}; \ + b = vec_perm(vec_perm(v1, v2, b12_perm), v3, b123_perm); \ + static const vec_uchar16 c12_perm = {4, 5, 10, 11, 16, 17, 22, 23, 28, 29, 0, 0, 0, 0, 0, 0}; \ + static const vec_uchar16 c123_perm = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 24, 25, 30, 31}; \ + c = vec_perm(vec_perm(v1, v2, c12_perm), v3, c123_perm); \ +} +VSX_IMPL_ST_INTERLEAVE_3CH_8(ushort, vec_ushort8) +VSX_IMPL_ST_INTERLEAVE_3CH_8(short, vec_short8) + +#define VSX_IMPL_ST_INTERLEAVE_3CH_4(Tp, Tvec) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, Tp* ptr) \ +{ \ + Tvec hbc = vec_mergeh(b, c); \ + static const vec_uchar16 ahbc = {0, 1, 2, 3, 16, 17, 18, 19, 20, 21, 22, 23, 4, 5, 6, 7}; \ + vsx_st(vec_perm(a, hbc, ahbc), 0, ptr); \ + Tvec lab = vec_mergel(a, b); \ + vsx_st(vec_sld(lab, hbc, 8), 4, ptr); \ + static const vec_uchar16 clab = {8, 9, 10, 11, 24, 25, 26, 27, 28, 29, 30, 31, 12, 13, 14, 15};\ + vsx_st(vec_perm(c, lab, clab), 8, ptr); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, Tvec& b, Tvec& c) \ +{ \ + Tvec v1 = vsx_ld(0, ptr); \ + Tvec v2 = vsx_ld(4, ptr); \ + Tvec v3 = vsx_ld(8, ptr); \ + static const vec_uchar16 flp = {0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 28, 29, 30, 31}; \ + a = vec_perm(v1, vec_sld(v3, v2, 8), flp); \ + static const vec_uchar16 flp2 = {28, 29, 30, 31, 0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19}; \ + b = vec_perm(v2, vec_sld(v1, v3, 8), flp2); \ + c = vec_perm(vec_sld(v2, v1, 8), v3, flp); \ +} +VSX_IMPL_ST_INTERLEAVE_3CH_4(uint, vec_uint4) +VSX_IMPL_ST_INTERLEAVE_3CH_4(int, vec_int4) +VSX_IMPL_ST_INTERLEAVE_3CH_4(float, vec_float4) + +#define VSX_IMPL_ST_INTERLEAVE_3CH_2(Tp, Tvec, ld_func, st_func) \ +VSX_FINLINE(void) vec_st_interleave(const Tvec& a, const Tvec& b, \ + const Tvec& c, Tp* ptr) \ +{ \ + st_func(vec_mergeh(a, b), 0, ptr); \ + st_func(vec_permi(c, a, 1), 2, ptr); \ + st_func(vec_mergel(b, c), 4, ptr); \ +} \ +VSX_FINLINE(void) vec_ld_deinterleave(const Tp* ptr, Tvec& a, \ + Tvec& b, Tvec& c) \ +{ \ + Tvec v1 = ld_func(0, ptr); \ + Tvec v2 = ld_func(2, ptr); \ + Tvec v3 = ld_func(4, ptr); \ + a = vec_permi(v1, v2, 1); \ + b = vec_permi(v1, v3, 2); \ + c = vec_permi(v2, v3, 1); \ +} +VSX_IMPL_ST_INTERLEAVE_3CH_2(int64, vec_dword2, vsx_ld2, vsx_st2) +VSX_IMPL_ST_INTERLEAVE_3CH_2(uint64, vec_udword2, vsx_ld2, vsx_st2) +VSX_IMPL_ST_INTERLEAVE_3CH_2(double, vec_double2, vsx_ld, vsx_st) + +#endif // CV_VSX + +//! @} + +#endif // OPENCV_HAL_VSX_UTILS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/core/wimage.hpp b/third_party/opencv/uos/sw_64/include/opencv2/core/wimage.hpp new file mode 100644 index 00000000..c7b6efa0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/core/wimage.hpp @@ -0,0 +1,603 @@ +/*M////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +///////////////////////////////////////////////////////////////////////////////// +//M*/ + +#ifndef OPENCV_CORE_WIMAGE_HPP +#define OPENCV_CORE_WIMAGE_HPP + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +//! @addtogroup core +//! @{ + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +/** @brief Image class which provides a thin layer around an IplImage. + +The goals of the class design are: + + -# All the data has explicit ownership to avoid memory leaks + -# No hidden allocations or copies for performance. + -# Easy access to OpenCV methods (which will access IPP if available) + -# Can easily treat external data as an image + -# Easy to create images which are subsets of other images + -# Fast pixel access which can take advantage of number of channels if known at compile time. + +The WImage class is the image class which provides the data accessors. The 'W' comes from the fact +that it is also a wrapper around the popular but inconvenient IplImage class. A WImage can be +constructed either using a WImageBuffer class which allocates and frees the data, or using a +WImageView class which constructs a subimage or a view into external data. The view class does no +memory management. Each class actually has two versions, one when the number of channels is known +at compile time and one when it isn't. Using the one with the number of channels specified can +provide some compile time optimizations by using the fact that the number of channels is a +constant. + +We use the convention (c,r) to refer to column c and row r with (0,0) being the upper left corner. +This is similar to standard Euclidean coordinates with the first coordinate varying in the +horizontal direction and the second coordinate varying in the vertical direction. Thus (c,r) is +usually in the domain [0, width) X [0, height) + +Example usage: +@code +WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +vector vec(10, 3.0f); +WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data + +im.SetZero(); // same as cvSetZero(im.Ipl()) +*im(2, 3) = 15; // Modify the element at column 2, row 3 +MySetRand(&sub_im); + +// Copy the second row into the first. This can be done with no memory +// allocation and will use SSE if IPP is available. +int w = im.Width(); +im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); + +// Doesn't care about source of data since using WImage +void MySetRand(WImage_b* im) { // Works with any number of channels +for (int r = 0; r < im->Height(); ++r) { + float* row = im->Row(r); + for (int c = 0; c < im->Width(); ++c) { + for (int ch = 0; ch < im->Channels(); ++ch, ++row) { + *row = uchar(rand() & 255); + } + } +} +} +@endcode + +Functions that are not part of the basic image allocation, viewing, and access should come from +OpenCV, except some useful functions that are not part of OpenCV can be found in wimage_util.h +*/ +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + +/** Image class when both the pixel type and number of channels +are known at compile time. This wrapper will speed up some of the operations +like accessing individual pixels using the () operator. +*/ +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +/** Image class which owns the data, so it can be allocated and is always +freed. It cannot be copied but can be explicitly cloned. +*/ +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +/** Like a WImageBuffer class but when the number of channels is known at compile time. +*/ +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +/** View into an image class which allows treating a subimage as an image or treating external data +as an image +*/ +template class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +//! @} core + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/cvconfig.h b/third_party/opencv/uos/sw_64/include/opencv2/cvconfig.h new file mode 100644 index 00000000..250cc33d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/cvconfig.h @@ -0,0 +1,250 @@ +#ifndef OPENCV_CVCONFIG_H_INCLUDED +#define OPENCV_CVCONFIG_H_INCLUDED + +/* OpenCV compiled as static or dynamic libs */ +/* #undef BUILD_SHARED_LIBS */ + +/* OpenCV intrinsics optimized code */ +#define CV_ENABLE_INTRINSICS + +/* OpenCV additional optimized code */ +/* #undef CV_DISABLE_OPTIMIZATION */ + +/* Compile for 'real' NVIDIA GPU architectures */ +#define CUDA_ARCH_BIN "" + +/* NVIDIA GPU features are used */ +#define CUDA_ARCH_FEATURES "" + +/* Compile for 'virtual' NVIDIA PTX architectures */ +#define CUDA_ARCH_PTX "" + +/* AVFoundation video libraries */ +/* #undef HAVE_AVFOUNDATION */ + +/* V4L capturing support */ +/* #undef HAVE_CAMV4L */ + +/* V4L2 capturing support */ +#define HAVE_CAMV4L2 + +/* Carbon windowing environment */ +/* #undef HAVE_CARBON */ + +/* AMD's Basic Linear Algebra Subprograms Library*/ +/* #undef HAVE_CLAMDBLAS */ + +/* AMD's OpenCL Fast Fourier Transform Library*/ +/* #undef HAVE_CLAMDFFT */ + +/* Clp support */ +/* #undef HAVE_CLP */ + +/* Cocoa API */ +/* #undef HAVE_COCOA */ + +/* C= */ +/* #undef HAVE_CSTRIPES */ + +/* NVIDIA CUDA Basic Linear Algebra Subprograms (BLAS) API*/ +/* #undef HAVE_CUBLAS */ + +/* NVIDIA CUDA Runtime API*/ +/* #undef HAVE_CUDA */ + +/* NVIDIA CUDA Fast Fourier Transform (FFT) API*/ +/* #undef HAVE_CUFFT */ + +/* IEEE1394 capturing support */ +/* #undef HAVE_DC1394 */ + +/* IEEE1394 capturing support - libdc1394 v2.x */ +/* #undef HAVE_DC1394_2 */ + +/* DirectX */ +/* #undef HAVE_DIRECTX */ +/* #undef HAVE_DIRECTX_NV12 */ +/* #undef HAVE_D3D11 */ +/* #undef HAVE_D3D10 */ +/* #undef HAVE_D3D9 */ + +/* DirectShow Video Capture library */ +/* #undef HAVE_DSHOW */ + +/* Eigen Matrix & Linear Algebra Library */ +/* #undef HAVE_EIGEN */ + +/* FFMpeg video library */ +/* #undef HAVE_FFMPEG */ + +/* Geospatial Data Abstraction Library */ +/* #undef HAVE_GDAL */ + +/* GStreamer multimedia framework */ +/* #undef HAVE_GSTREAMER */ + +/* GTK+ 2.0 Thread support */ +/* #undef HAVE_GTHREAD */ + +/* GTK+ 2.x toolkit */ +/* #undef HAVE_GTK */ + +/* Halide support */ +/* #undef HAVE_HALIDE */ + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Intel Perceptual Computing SDK library */ +/* #undef HAVE_INTELPERC */ + +/* Intel Integrated Performance Primitives */ +/* #undef HAVE_IPP */ +/* #undef HAVE_IPP_ICV */ +/* #undef HAVE_IPP_IW */ +/* #undef HAVE_IPP_IW_LL */ + +/* JPEG-2000 codec */ +#define HAVE_JASPER + +/* IJG JPEG codec */ +#define HAVE_JPEG + +/* libpng/png.h needs to be included */ +/* #undef HAVE_LIBPNG_PNG_H */ + +/* GDCM DICOM codec */ +/* #undef HAVE_GDCM */ + +/* V4L/V4L2 capturing support via libv4l */ +/* #undef HAVE_LIBV4L */ + +/* Microsoft Media Foundation Capture library */ +/* #undef HAVE_MSMF */ + +/* NVIDIA Video Decoding API*/ +/* #undef HAVE_NVCUVID */ +/* #undef HAVE_NVCUVID_HEADER */ +/* #undef HAVE_DYNLINK_NVCUVID_HEADER */ + +/* NVIDIA Video Encoding API*/ +/* #undef HAVE_NVCUVENC */ + +/* OpenCL Support */ +#define HAVE_OPENCL +/* #undef HAVE_OPENCL_STATIC */ +/* #undef HAVE_OPENCL_SVM */ + +/* NVIDIA OpenCL D3D Extensions support */ +/* #undef HAVE_OPENCL_D3D11_NV */ + +/* OpenEXR codec */ +#define HAVE_OPENEXR + +/* OpenGL support*/ +/* #undef HAVE_OPENGL */ + +/* OpenNI library */ +/* #undef HAVE_OPENNI */ + +/* OpenNI library */ +/* #undef HAVE_OPENNI2 */ + +/* PNG codec */ +#define HAVE_PNG + +/* Posix threads (pthreads) */ +#define HAVE_PTHREAD + +/* parallel_for with pthreads */ +#define HAVE_PTHREADS_PF + +/* Qt support */ +/* #undef HAVE_QT */ + +/* Qt OpenGL support */ +/* #undef HAVE_QT_OPENGL */ + +/* QuickTime video libraries */ +/* #undef HAVE_QUICKTIME */ + +/* QTKit video libraries */ +/* #undef HAVE_QTKIT */ + +/* Intel Threading Building Blocks */ +/* #undef HAVE_TBB */ + +/* TIFF codec */ +#define HAVE_TIFF + +/* Unicap video capture library */ +/* #undef HAVE_UNICAP */ + +/* Video for Windows support */ +/* #undef HAVE_VFW */ + +/* V4L2 capturing support in videoio.h */ +/* #undef HAVE_VIDEOIO */ + +/* Win32 UI */ +/* #undef HAVE_WIN32UI */ + +/* XIMEA camera support */ +/* #undef HAVE_XIMEA */ + +/* Xine video library */ +/* #undef HAVE_XINE */ + +/* Define if your processor stores words with the most significant byte + first (like Motorola and SPARC, unlike Intel and VAX). */ +/* #undef WORDS_BIGENDIAN */ + +/* gPhoto2 library */ +/* #undef HAVE_GPHOTO2 */ + +/* VA library (libva) */ +/* #undef HAVE_VA */ + +/* Intel VA-API/OpenCL */ +/* #undef HAVE_VA_INTEL */ + +/* Intel Media SDK */ +/* #undef HAVE_MFX */ + +/* Lapack */ +/* #undef HAVE_LAPACK */ + +/* Library was compiled with functions instrumentation */ +/* #undef ENABLE_INSTRUMENTATION */ + +/* OpenVX */ +/* #undef HAVE_OPENVX */ + +#if defined(HAVE_XINE) || \ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + /*defined(HAVE_OPENNI) || too specialized */ \ + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) +#define HAVE_VIDEO_INPUT +#endif + +#if /*defined(HAVE_XINE) || */\ + defined(HAVE_GSTREAMER) || \ + defined(HAVE_QUICKTIME) || \ + defined(HAVE_QTKIT) || \ + defined(HAVE_AVFOUNDATION) || \ + defined(HAVE_FFMPEG) || \ + defined(HAVE_MSMF) +#define HAVE_VIDEO_OUTPUT +#endif + +/* OpenCV trace utilities */ +#define OPENCV_TRACE + +/* Library QR-code decoding */ +#define HAVE_QUIRC + +#endif // OPENCV_CVCONFIG_H_INCLUDED diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn.hpp new file mode 100644 index 00000000..97f2fe3f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn.hpp @@ -0,0 +1,78 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_HPP +#define OPENCV_DNN_HPP + +// This is an umbrella header to include into you project. +// We are free to change headers layout in dnn subfolder, so please include +// this header for future compatibility + + +/** @defgroup dnn Deep Neural Network module + @{ + This module contains: + - API for new layers creation, layers are building bricks of neural networks; + - set of built-in most-useful Layers; + - API to construct and modify comprehensive neural networks from layers; + - functionality for loading serialized networks models from different frameworks. + + Functionality of this module is designed only for forward pass computations (i.e. network testing). + A network training is in principle not supported. + @} +*/ +/** @example samples/dnn/classification.cpp +Check @ref tutorial_dnn_googlenet "the corresponding tutorial" for more details +*/ +/** @example samples/dnn/colorization.cpp +*/ +/** @example samples/dnn/object_detection.cpp +Check @ref tutorial_dnn_yolo "the corresponding tutorial" for more details +*/ +/** @example samples/dnn/openpose.cpp +*/ +/** @example samples/dnn/segmentation.cpp +*/ +/** @example samples/dnn/text_detection.cpp +*/ +#include + +#endif /* OPENCV_DNN_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/all_layers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/all_layers.hpp new file mode 100644 index 00000000..e92ce2f5 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/all_layers.hpp @@ -0,0 +1,695 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_DNN_ALL_LAYERS_HPP +#define OPENCV_DNN_DNN_ALL_LAYERS_HPP +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN +//! @addtogroup dnn +//! @{ + +/** @defgroup dnnLayerList Partial List of Implemented Layers + @{ + This subsection of dnn module contains information about built-in layers and their descriptions. + + Classes listed here, in fact, provides C++ API for creating instances of built-in layers. + In addition to this way of layers instantiation, there is a more common factory API (see @ref dnnLayerFactory), it allows to create layers dynamically (by name) and register new ones. + You can use both API, but factory API is less convenient for native C++ programming and basically designed for use inside importers (see @ref readNetFromCaffe(), @ref readNetFromTorch(), @ref readNetFromTensorflow()). + + Built-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers. + In particular, the following layers and Caffe importer were tested to reproduce Caffe functionality: + - Convolution + - Deconvolution + - Pooling + - InnerProduct + - TanH, ReLU, Sigmoid, BNLL, Power, AbsVal + - Softmax + - Reshape, Flatten, Slice, Split + - LRN + - MVN + - Dropout (since it does nothing on forward pass -)) +*/ + + class CV_EXPORTS BlankLayer : public Layer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + /** + * Constant layer produces the same data blob at an every forward pass. + */ + class CV_EXPORTS ConstLayer : public Layer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + //! LSTM recurrent layer + class CV_EXPORTS LSTMLayer : public Layer + { + public: + /** Creates instance of LSTM layer */ + static Ptr create(const LayerParams& params); + + /** @deprecated Use LayerParams::blobs instead. + @brief Set trained weights for LSTM layer. + + LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights. + + Let @f$x_t@f$ be current input, @f$h_t@f$ be current output, @f$c_t@f$ be current state. + Than current output and current cell state is computed as follows: + @f{eqnarray*}{ + h_t &= o_t \odot tanh(c_t), \\ + c_t &= f_t \odot c_{t-1} + i_t \odot g_t, \\ + @f} + where @f$\odot@f$ is per-element multiply operation and @f$i_t, f_t, o_t, g_t@f$ is internal gates that are computed using learned weights. + + Gates are computed as follows: + @f{eqnarray*}{ + i_t &= sigmoid&(W_{xi} x_t + W_{hi} h_{t-1} + b_i), \\ + f_t &= sigmoid&(W_{xf} x_t + W_{hf} h_{t-1} + b_f), \\ + o_t &= sigmoid&(W_{xo} x_t + W_{ho} h_{t-1} + b_o), \\ + g_t &= tanh &(W_{xg} x_t + W_{hg} h_{t-1} + b_g), \\ + @f} + where @f$W_{x?}@f$, @f$W_{h?}@f$ and @f$b_{?}@f$ are learned weights represented as matrices: + @f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$. + + For simplicity and performance purposes we use @f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] @f$ + (i.e. @f$W_x@f$ is vertical concatenation of @f$ W_{x?} @f$), @f$ W_x \in R^{4N_h \times N_x} @f$. + The same for @f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} @f$ + and for @f$ b = [b_i; b_f, b_o, b_g]@f$, @f$b \in R^{4N_h} @f$. + + @param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_h @f$) + @param Wx is matrix defining how current input is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_x @f$) + @param b is bias vector (i.e. according to above mentioned notation is @f$ b @f$) + */ + CV_DEPRECATED virtual void setWeights(const Mat &Wh, const Mat &Wx, const Mat &b) = 0; + + /** @brief Specifies shape of output blob which will be [[`T`], `N`] + @p outTailShape. + * @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used, + * where `Wh` is parameter from setWeights(). + */ + virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0; + + /** @deprecated Use flag `produce_cell_output` in LayerParams. + * @brief Specifies either interpret first dimension of input blob as timestamp dimension either as sample. + * + * If flag is set to true then shape of input blob will be interpreted as [`T`, `N`, `[data dims]`] where `T` specifies number of timestamps, `N` is number of independent streams. + * In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times. + * + * If flag is set to false then shape of input blob will be interpreted as [`N`, `[data dims]`]. + * In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`]. + */ + CV_DEPRECATED virtual void setUseTimstampsDim(bool use = true) = 0; + + /** @deprecated Use flag `use_timestamp_dim` in LayerParams. + * @brief If this flag is set to true then layer will produce @f$ c_t @f$ as second output. + * @details Shape of the second output is the same as first output. + */ + CV_DEPRECATED virtual void setProduceCellOutput(bool produce = false) = 0; + + /* In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$). + * @param input should contain packed values @f$x_t@f$ + * @param output contains computed outputs: @f$h_t@f$ (and @f$c_t@f$ if setProduceCellOutput() flag was set to true). + * + * If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`], + * where `T` specifies number of timestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]). + * + * If setUseTimstampsDim() is set to false then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension. + * (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]). + */ + + int inputNameToIndex(String inputName) CV_OVERRIDE; + int outputNameToIndex(const String& outputName) CV_OVERRIDE; + }; + + /** @brief Classical recurrent layer + + Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$. + + - input: should contain packed input @f$x_t@f$. + - output: should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true). + + input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively. + + output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix. + + If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix. + */ + class CV_EXPORTS RNNLayer : public Layer + { + public: + /** Creates instance of RNNLayer */ + static Ptr create(const LayerParams& params); + + /** Setups learned weights. + + Recurrent-layer behavior on each step is defined by current input @f$ x_t @f$, previous state @f$ h_t @f$ and learned weights as follows: + @f{eqnarray*}{ + h_t &= tanh&(W_{hh} h_{t-1} + W_{xh} x_t + b_h), \\ + o_t &= tanh&(W_{ho} h_t + b_o), + @f} + + @param Wxh is @f$ W_{xh} @f$ matrix + @param bh is @f$ b_{h} @f$ vector + @param Whh is @f$ W_{hh} @f$ matrix + @param Who is @f$ W_{xo} @f$ matrix + @param bo is @f$ b_{o} @f$ vector + */ + virtual void setWeights(const Mat &Wxh, const Mat &bh, const Mat &Whh, const Mat &Who, const Mat &bo) = 0; + + /** @brief If this flag is set to true then layer will produce @f$ h_t @f$ as second output. + * @details Shape of the second output is the same as first output. + */ + virtual void setProduceHiddenOutput(bool produce = false) = 0; + + }; + + class CV_EXPORTS BaseConvolutionLayer : public Layer + { + public: + CV_DEPRECATED_EXTERNAL Size kernel, stride, pad, dilation, adjustPad; + std::vector adjust_pads; + std::vector kernel_size, strides, dilations; + std::vector pads_begin, pads_end; + String padMode; + int numOutput; + }; + + class CV_EXPORTS ConvolutionLayer : public BaseConvolutionLayer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS DeconvolutionLayer : public BaseConvolutionLayer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS LRNLayer : public Layer + { + public: + int type; + + int size; + float alpha, beta, bias; + bool normBySize; + + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS PoolingLayer : public Layer + { + public: + int type; + std::vector kernel_size, strides; + std::vector pads_begin, pads_end; + bool globalPooling; //!< Flag is true if at least one of the axes is global pooled. + std::vector isGlobalPooling; + bool computeMaxIdx; + String padMode; + bool ceilMode; + // If true for average pooling with padding, divide an every output region + // by a whole kernel area. Otherwise exclude zero padded values and divide + // by number of real values. + bool avePoolPaddedArea; + // ROIPooling parameters. + Size pooledSize; + float spatialScale; + // PSROIPooling parameters. + int psRoiOutChannels; + + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS SoftmaxLayer : public Layer + { + public: + bool logSoftMax; + + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS InnerProductLayer : public Layer + { + public: + int axis; + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS MVNLayer : public Layer + { + public: + float eps; + bool normVariance, acrossChannels; + + static Ptr create(const LayerParams& params); + }; + + /* Reshaping */ + + class CV_EXPORTS ReshapeLayer : public Layer + { + public: + MatShape newShapeDesc; + Range newShapeRange; + + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS FlattenLayer : public Layer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ConcatLayer : public Layer + { + public: + int axis; + /** + * @brief Add zero padding in case of concatenation of blobs with different + * spatial sizes. + * + * Details: https://github.com/torch/nn/blob/master/doc/containers.md#depthconcat + */ + bool padding; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SplitLayer : public Layer + { + public: + int outputsCount; //!< Number of copies that will be produced (is ignored when negative). + + static Ptr create(const LayerParams ¶ms); + }; + + /** + * Slice layer has several modes: + * 1. Caffe mode + * @param[in] axis Axis of split operation + * @param[in] slice_point Array of split points + * + * Number of output blobs equals to number of split points plus one. The + * first blob is a slice on input from 0 to @p slice_point[0] - 1 by @p axis, + * the second output blob is a slice of input from @p slice_point[0] to + * @p slice_point[1] - 1 by @p axis and the last output blob is a slice of + * input from @p slice_point[-1] up to the end of @p axis size. + * + * 2. TensorFlow mode + * @param begin Vector of start indices + * @param size Vector of sizes + * + * More convenient numpy-like slice. One and only output blob + * is a slice `input[begin[0]:begin[0]+size[0], begin[1]:begin[1]+size[1], ...]` + * + * 3. Torch mode + * @param axis Axis of split operation + * + * Split input blob on the equal parts by @p axis. + */ + class CV_EXPORTS SliceLayer : public Layer + { + public: + /** + * @brief Vector of slice ranges. + * + * The first dimension equals number of output blobs. + * Inner vector has slice ranges for the first number of input dimensions. + */ + std::vector > sliceRanges; + std::vector > sliceSteps; + int axis; + int num_split; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS PermuteLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + /** + * Permute channels of 4-dimensional input blob. + * @param group Number of groups to split input channels and pick in turns + * into output blob. + * + * \f[ groupSize = \frac{number\ of\ channels}{group} \f] + * \f[ output(n, c, h, w) = input(n, groupSize \times (c \% group) + \lfloor \frac{c}{group} \rfloor, h, w) \f] + * Read more at https://arxiv.org/pdf/1707.01083.pdf + */ + class CV_EXPORTS ShuffleChannelLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + + int group; + }; + + /** + * @brief Adds extra values for specific axes. + * @param paddings Vector of paddings in format + * @code + * [ pad_before, pad_after, // [0]th dimension + * pad_before, pad_after, // [1]st dimension + * ... + * pad_before, pad_after ] // [n]th dimension + * @endcode + * that represents number of padded values at every dimension + * starting from the first one. The rest of dimensions won't + * be padded. + * @param value Value to be padded. Defaults to zero. + * @param type Padding type: 'constant', 'reflect' + * @param input_dims Torch's parameter. If @p input_dims is not equal to the + * actual input dimensionality then the `[0]th` dimension + * is considered as a batch dimension and @p paddings are shifted + * to a one dimension. Defaults to `-1` that means padding + * corresponding to @p paddings. + */ + class CV_EXPORTS PaddingLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + /* Activations */ + class CV_EXPORTS ActivationLayer : public Layer + { + public: + virtual void forwardSlice(const float* src, float* dst, int len, + size_t outPlaneSize, int cn0, int cn1) const = 0; + }; + + class CV_EXPORTS ReLULayer : public ActivationLayer + { + public: + float negativeSlope; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ReLU6Layer : public ActivationLayer + { + public: + float minValue, maxValue; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS ELULayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS TanHLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SwishLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS MishLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS SigmoidLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS BNLLLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS AbsLayer : public ActivationLayer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS PowerLayer : public ActivationLayer + { + public: + float power, scale, shift; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ExpLayer : public ActivationLayer + { + public: + float base, scale, shift; + + static Ptr create(const LayerParams ¶ms); + }; + + /* Layers used in semantic segmentation */ + + class CV_EXPORTS CropLayer : public Layer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + /** @brief Element wise operation on inputs + + Extra optional parameters: + - "operation" as string. Values are "sum" (default), "prod", "max", "div" + - "coeff" as float array. Specify weights of inputs for SUM operation + - "output_channels_mode" as string. Values are "same" (default, all input must have the same layout), "input_0", "input_0_truncate", "max_input_channels" + */ + class CV_EXPORTS EltwiseLayer : public Layer + { + public: + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS BatchNormLayer : public ActivationLayer + { + public: + bool hasWeights, hasBias; + float epsilon; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS MaxUnpoolLayer : public Layer + { + public: + Size poolKernel; + Size poolPad; + Size poolStride; + + static Ptr create(const LayerParams ¶ms); + }; + + class CV_EXPORTS ScaleLayer : public Layer + { + public: + bool hasBias; + int axis; + + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS ShiftLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS DataAugmentationLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS CorrelationLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS AccumLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS FlowWarpLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS PriorBoxLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS ReorgLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS RegionLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + /** + * @brief Detection output layer. + * + * The layer size is: @f$ (1 \times 1 \times N \times 7) @f$ + * where N is [keep_top_k] parameter multiplied by batch size. Each row is: + * [image_id, label, confidence, xmin, ymin, xmax, ymax] + * where image_id is the index of image input in the batch. + */ + class CV_EXPORTS DetectionOutputLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + /** + * @brief \f$ L_p \f$ - normalization layer. + * @param p Normalization factor. The most common `p = 1` for \f$ L_1 \f$ - + * normalization or `p = 2` for \f$ L_2 \f$ - normalization or a custom one. + * @param eps Parameter \f$ \epsilon \f$ to prevent a division by zero. + * @param across_spatial If true, normalize an input across all non-batch dimensions. + * Otherwise normalize an every channel separately. + * + * Across spatial: + * @f[ + * norm = \sqrt[p]{\epsilon + \sum_{x, y, c} |src(x, y, c)|^p } \\ + * dst(x, y, c) = \frac{ src(x, y, c) }{norm} + * @f] + * + * Channel wise normalization: + * @f[ + * norm(c) = \sqrt[p]{\epsilon + \sum_{x, y} |src(x, y, c)|^p } \\ + * dst(x, y, c) = \frac{ src(x, y, c) }{norm(c)} + * @f] + * + * Where `x, y` - spatial coordinates, `c` - channel. + * + * An every sample in the batch is normalized separately. Optionally, + * output is scaled by the trained parameters. + */ + class CV_EXPORTS NormalizeBBoxLayer : public Layer + { + public: + float pnorm, epsilon; + CV_DEPRECATED_EXTERNAL bool acrossSpatial; + + static Ptr create(const LayerParams& params); + }; + + /** + * @brief Resize input 4-dimensional blob by nearest neighbor or bilinear strategy. + * + * Layer is used to support TensorFlow's resize_nearest_neighbor and resize_bilinear ops. + */ + class CV_EXPORTS ResizeLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + /** + * @brief Bilinear resize layer from https://github.com/cdmh/deeplab-public-ver2 + * + * It differs from @ref ResizeLayer in output shape and resize scales computations. + */ + class CV_EXPORTS InterpLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS ProposalLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + + class CV_EXPORTS CropAndResizeLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + +//! @} +//! @} +CV__DNN_EXPERIMENTAL_NS_END +} +} +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/dict.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dict.hpp new file mode 100644 index 00000000..60c2aa5d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dict.hpp @@ -0,0 +1,160 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include +#include +#include + +#include + +#ifndef OPENCV_DNN_DNN_DICT_HPP +#define OPENCV_DNN_DNN_DICT_HPP + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN +//! @addtogroup dnn +//! @{ + +/** @brief This struct stores the scalar value (or array) of one of the following type: double, cv::String or int64. + * @todo Maybe int64 is useless because double type exactly stores at least 2^52 integers. + */ +struct CV_EXPORTS_W DictValue +{ + DictValue(const DictValue &r); + DictValue(bool i) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i ? 1 : 0; } //!< Constructs integer scalar + DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i; } //!< Constructs integer scalar + CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = i; } //!< Constructs integer scalar + DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer) { (*pi)[0] = p; } //!< Constructs integer scalar + CV_WRAP DictValue(double p) : type(Param::REAL), pd(new AutoBuffer) { (*pd)[0] = p; } //!< Constructs floating point scalar + CV_WRAP DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer) { (*ps)[0] = s; } //!< Constructs string scalar + DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer) { (*ps)[0] = s; } //!< @overload + + template + static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array + template + static DictValue arrayReal(TypeIter begin, int size); //!< Constructs floating point array + template + static DictValue arrayString(TypeIter begin, int size); //!< Constructs array of strings + + template + T get(int idx = -1) const; //!< Tries to convert array element with specified index to requested type and returns its. + + int size() const; + + CV_WRAP bool isInt() const; + CV_WRAP bool isString() const; + CV_WRAP bool isReal() const; + + CV_WRAP int getIntValue(int idx = -1) const; + CV_WRAP double getRealValue(int idx = -1) const; + CV_WRAP String getStringValue(int idx = -1) const; + + DictValue &operator=(const DictValue &r); + + friend std::ostream &operator<<(std::ostream &stream, const DictValue &dictv); + + ~DictValue(); + +private: + + int type; + + union + { + AutoBuffer *pi; + AutoBuffer *pd; + AutoBuffer *ps; + void *pv; + }; + + DictValue(int _type, void *_p) : type(_type), pv(_p) {} + void release(); +}; + +/** @brief This class implements name-value dictionary, values are instances of DictValue. */ +class CV_EXPORTS Dict +{ + typedef std::map _Dict; + _Dict dict; + +public: + + //! Checks a presence of the @p key in the dictionary. + bool has(const String &key) const; + + //! If the @p key in the dictionary then returns pointer to its value, else returns NULL. + DictValue *ptr(const String &key); + + /** @overload */ + const DictValue *ptr(const String &key) const; + + //! If the @p key in the dictionary then returns its value, else an error will be generated. + const DictValue &get(const String &key) const; + + /** @overload */ + template + T get(const String &key) const; + + //! If the @p key in the dictionary then returns its value, else returns @p defaultValue. + template + T get(const String &key, const T &defaultValue) const; + + //! Sets new @p value for the @p key, or adds new key-value pair into the dictionary. + template + const T &set(const String &key, const T &value); + + //! Erase @p key from the dictionary. + void erase(const String &key); + + friend std::ostream &operator<<(std::ostream &stream, const Dict &dict); + + std::map::const_iterator begin() const; + + std::map::const_iterator end() const; +}; + +//! @} +CV__DNN_EXPERIMENTAL_NS_END +} +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.hpp new file mode 100644 index 00000000..a2a48b7f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.hpp @@ -0,0 +1,1072 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_DNN_HPP +#define OPENCV_DNN_DNN_HPP + +#include +#include +#include "opencv2/core/async.hpp" + +#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS +#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v23 { +#define CV__DNN_EXPERIMENTAL_NS_END } +namespace cv { namespace dnn { namespace experimental_dnn_34_v23 { } using namespace experimental_dnn_34_v23; }} +#else +#define CV__DNN_EXPERIMENTAL_NS_BEGIN +#define CV__DNN_EXPERIMENTAL_NS_END +#endif + +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN +//! @addtogroup dnn +//! @{ + + typedef std::vector MatShape; + + /** + * @brief Enum of computation backends supported by layers. + * @see Net::setPreferableBackend + */ + enum Backend + { + //! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if + //! OpenCV is built with Intel's Inference Engine library or + //! DNN_BACKEND_OPENCV otherwise. + DNN_BACKEND_DEFAULT = 0, + DNN_BACKEND_HALIDE, + DNN_BACKEND_INFERENCE_ENGINE, //!< Intel's Inference Engine computational backend + //!< @sa setInferenceEngineBackendType + DNN_BACKEND_OPENCV, + // OpenCV 4.x: DNN_BACKEND_VKCOM, + // OpenCV 4.x: DNN_BACKEND_CUDA, + +#ifdef __OPENCV_BUILD + DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() + DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType() +#endif + }; + + /** + * @brief Enum of target devices for computations. + * @see Net::setPreferableTarget + */ + enum Target + { + DNN_TARGET_CPU = 0, + DNN_TARGET_OPENCL, + DNN_TARGET_OPENCL_FP16, + DNN_TARGET_MYRIAD, + DNN_TARGET_FPGA //!< FPGA device with CPU fallbacks using Inference Engine's Heterogeneous plugin. + }; + + CV_EXPORTS std::vector< std::pair > getAvailableBackends(); + CV_EXPORTS_W std::vector getAvailableTargets(dnn::Backend be); + + /** @brief This class provides all data needed to initialize layer. + * + * It includes dictionary with scalar params (which can be read by using Dict interface), + * blob params #blobs and optional meta information: #name and #type of layer instance. + */ + class CV_EXPORTS LayerParams : public Dict + { + public: + //TODO: Add ability to name blob params + std::vector blobs; //!< List of learned parameters stored as blobs. + + String name; //!< Name of the layer instance (optional, can be used internal purposes). + String type; //!< Type name which was used for creating layer by layer factory (optional). + }; + + /** + * @brief Derivatives of this class encapsulates functions of certain backends. + */ + class BackendNode + { + public: + BackendNode(int backendId); + + virtual ~BackendNode(); //!< Virtual destructor to make polymorphism. + + int backendId; //!< Backend identifier. + }; + + /** + * @brief Derivatives of this class wraps cv::Mat for different backends and targets. + */ + class BackendWrapper + { + public: + BackendWrapper(int backendId, int targetId); + + /** + * @brief Wrap cv::Mat for specific backend and target. + * @param[in] targetId Target identifier. + * @param[in] m cv::Mat for wrapping. + * + * Make CPU->GPU data transfer if it's require for the target. + */ + BackendWrapper(int targetId, const cv::Mat& m); + + /** + * @brief Make wrapper for reused cv::Mat. + * @param[in] base Wrapper of cv::Mat that will be reused. + * @param[in] shape Specific shape. + * + * Initialize wrapper from another one. It'll wrap the same host CPU + * memory and mustn't allocate memory on device(i.e. GPU). It might + * has different shape. Use in case of CPU memory reusing for reuse + * associated memory on device too. + */ + BackendWrapper(const Ptr& base, const MatShape& shape); + + virtual ~BackendWrapper(); //!< Virtual destructor to make polymorphism. + + /** + * @brief Transfer data to CPU host memory. + */ + virtual void copyToHost() = 0; + + /** + * @brief Indicate that an actual data is on CPU. + */ + virtual void setHostDirty() = 0; + + int backendId; //!< Backend identifier. + int targetId; //!< Target identifier. + }; + + class CV_EXPORTS ActivationLayer; + + /** @brief This interface class allows to build new Layers - are building blocks of networks. + * + * Each class, derived from Layer, must implement allocate() methods to declare own outputs and forward() to compute outputs. + * Also before using the new layer into networks you must register your layer by using one of @ref dnnLayerFactory "LayerFactory" macros. + */ + class CV_EXPORTS_W Layer : public Algorithm + { + public: + + //! List of learned parameters must be stored here to allow read them by using Net::getParam(). + CV_PROP_RW std::vector blobs; + + /** @brief Computes and sets internal parameters according to inputs, outputs and blobs. + * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead + * @param[in] input vector of already allocated input blobs + * @param[out] output vector of already allocated output blobs + * + * If this method is called after network has allocated all memory for input and output blobs + * and before inferencing. + */ + CV_DEPRECATED_EXTERNAL + virtual void finalize(const std::vector &input, std::vector &output); + + /** @brief Computes and sets internal parameters according to inputs, outputs and blobs. + * @param[in] inputs vector of already allocated input blobs + * @param[out] outputs vector of already allocated output blobs + * + * If this method is called after network has allocated all memory for input and output blobs + * and before inferencing. + */ + CV_WRAP virtual void finalize(InputArrayOfArrays inputs, OutputArrayOfArrays outputs); + + /** @brief Given the @p input blobs, computes the output @p blobs. + * @deprecated Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead + * @param[in] input the input blobs. + * @param[out] output allocated output blobs, which will store results of the computation. + * @param[out] internals allocated internal blobs + */ + CV_DEPRECATED_EXTERNAL + virtual void forward(std::vector &input, std::vector &output, std::vector &internals); + + /** @brief Given the @p input blobs, computes the output @p blobs. + * @param[in] inputs the input blobs. + * @param[out] outputs allocated output blobs, which will store results of the computation. + * @param[out] internals allocated internal blobs + */ + virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals); + + /** @brief Given the @p input blobs, computes the output @p blobs. + * @param[in] inputs the input blobs. + * @param[out] outputs allocated output blobs, which will store results of the computation. + * @param[out] internals allocated internal blobs + */ + void forward_fallback(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals); + + /** @brief + * @overload + * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead + */ + CV_DEPRECATED_EXTERNAL + void finalize(const std::vector &inputs, CV_OUT std::vector &outputs); + + /** @brief + * @overload + * @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead + */ + CV_DEPRECATED std::vector finalize(const std::vector &inputs); + + /** @brief Allocates layer and computes output. + * @deprecated This method will be removed in the future release. + */ + CV_DEPRECATED CV_WRAP void run(const std::vector &inputs, CV_OUT std::vector &outputs, + CV_IN_OUT std::vector &internals); + + /** @brief Returns index of input blob into the input array. + * @param inputName label of input blob + * + * Each layer input and output can be labeled to easily identify them using "%[.output_name]" notation. + * This method maps label of input blob to its index into input vector. + */ + virtual int inputNameToIndex(String inputName); + /** @brief Returns index of output blob in output array. + * @see inputNameToIndex() + */ + CV_WRAP virtual int outputNameToIndex(const String& outputName); + + /** + * @brief Ask layer if it support specific backend for doing computations. + * @param[in] backendId computation backend identifier. + * @see Backend + */ + virtual bool supportBackend(int backendId); + + /** + * @brief Returns Halide backend node. + * @param[in] inputs Input Halide buffers. + * @see BackendNode, BackendWrapper + * + * Input buffers should be exactly the same that will be used in forward invocations. + * Despite we can use Halide::ImageParam based on input shape only, + * it helps prevent some memory management issues (if something wrong, + * Halide tests will be failed). + */ + virtual Ptr initHalide(const std::vector > &inputs); + + virtual Ptr initInfEngine(const std::vector > &inputs); + + virtual Ptr initNgraph(const std::vector > &inputs, const std::vector >& nodes); + + /** + * @brief Automatic Halide scheduling based on layer hyper-parameters. + * @param[in] node Backend node with Halide functions. + * @param[in] inputs Blobs that will be used in forward invocations. + * @param[in] outputs Blobs that will be used in forward invocations. + * @param[in] targetId Target identifier + * @see BackendNode, Target + * + * Layer don't use own Halide::Func members because we can have applied + * layers fusing. In this way the fused function should be scheduled. + */ + virtual void applyHalideScheduler(Ptr& node, + const std::vector &inputs, + const std::vector &outputs, + int targetId) const; + + /** + * @brief Implement layers fusing. + * @param[in] node Backend node of bottom layer. + * @see BackendNode + * + * Actual for graph-based backends. If layer attached successfully, + * returns non-empty cv::Ptr to node of the same backend. + * Fuse only over the last function. + */ + virtual Ptr tryAttach(const Ptr& node); + + /** + * @brief Tries to attach to the layer the subsequent activation layer, i.e. do the layer fusion in a partial case. + * @param[in] layer The subsequent activation layer. + * + * Returns true if the activation layer has been attached successfully. + */ + virtual bool setActivation(const Ptr& layer); + + /** + * @brief Try to fuse current layer with a next one + * @param[in] top Next layer to be fused. + * @returns True if fusion was performed. + */ + virtual bool tryFuse(Ptr& top); + + /** + * @brief Returns parameters of layers with channel-wise multiplication and addition. + * @param[out] scale Channel-wise multipliers. Total number of values should + * be equal to number of channels. + * @param[out] shift Channel-wise offsets. Total number of values should + * be equal to number of channels. + * + * Some layers can fuse their transformations with further layers. + * In example, convolution + batch normalization. This way base layer + * use weights from layer after it. Fused layer is skipped. + * By default, @p scale and @p shift are empty that means layer has no + * element-wise multiplications or additions. + */ + virtual void getScaleShift(Mat& scale, Mat& shift) const; + + /** + * @brief "Deattaches" all the layers, attached to particular layer. + */ + virtual void unsetAttached(); + + virtual bool getMemoryShapes(const std::vector &inputs, + const int requiredOutputs, + std::vector &outputs, + std::vector &internals) const; + + virtual int64 getFLOPS(const std::vector &inputs, + const std::vector &outputs) const {CV_UNUSED(inputs); CV_UNUSED(outputs); return 0;} + + virtual bool updateMemoryShapes(const std::vector &inputs); + + CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes. + CV_PROP String type; //!< Type name which was used for creating layer by layer factory. + CV_PROP int preferableTarget; //!< prefer target for layer forwarding + + Layer(); + explicit Layer(const LayerParams ¶ms); //!< Initializes only #name, #type and #blobs fields. + void setParamsFrom(const LayerParams ¶ms); //!< Initializes only #name, #type and #blobs fields. + virtual ~Layer(); + }; + + /** @brief This class allows to create and manipulate comprehensive artificial neural networks. + * + * Neural network is presented as directed acyclic graph (DAG), where vertices are Layer instances, + * and edges specify relationships between layers inputs and outputs. + * + * Each network layer has unique integer id and unique string name inside its network. + * LayerId can store either layer name or layer id. + * + * This class supports reference counting of its instances, i. e. copies point to the same instance. + */ + class CV_EXPORTS_W_SIMPLE Net + { + public: + + CV_WRAP Net(); //!< Default constructor. + CV_WRAP ~Net(); //!< Destructor frees the net only if there aren't references to the net anymore. + + /** @brief Create a network from Intel's Model Optimizer intermediate representation (IR). + * @param[in] xml XML configuration file with network's topology. + * @param[in] bin Binary file with trained weights. + * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine + * backend. + */ + CV_WRAP static Net readFromModelOptimizer(const String& xml, const String& bin); + + /** @brief Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR). + * @param[in] bufferModelConfig buffer with model's configuration. + * @param[in] bufferWeights buffer with model's trained weights. + * @returns Net object. + */ + CV_WRAP static + Net readFromModelOptimizer(const std::vector& bufferModelConfig, const std::vector& bufferWeights); + + /** @brief Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR). + * @param[in] bufferModelConfigPtr buffer pointer of model's configuration. + * @param[in] bufferModelConfigSize buffer size of model's configuration. + * @param[in] bufferWeightsPtr buffer pointer of model's trained weights. + * @param[in] bufferWeightsSize buffer size of model's trained weights. + * @returns Net object. + */ + static + Net readFromModelOptimizer(const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize, + const uchar* bufferWeightsPtr, size_t bufferWeightsSize); + + /** Returns true if there are no layers in the network. */ + CV_WRAP bool empty() const; + + /** @brief Dump net to String + * @returns String with structure, hyperparameters, backend, target and fusion + * Call method after setInput(). To see correct backend, target and fusion run after forward(). + */ + CV_WRAP String dump(); + /** @brief Dump net structure, hyperparameters, backend, target and fusion to dot file + * @param path path to output file with .dot extension + * @see dump() + */ + CV_WRAP void dumpToFile(const String& path); + /** @brief Adds new layer to the net. + * @param name unique name of the adding layer. + * @param type typename of the adding layer (type must be registered in LayerRegister). + * @param params parameters which will be used to initialize the creating layer. + * @returns unique identifier of created layer, or -1 if a failure will happen. + */ + int addLayer(const String &name, const String &type, LayerParams ¶ms); + /** @brief Adds new layer and connects its first input to the first output of previously added layer. + * @see addLayer() + */ + int addLayerToPrev(const String &name, const String &type, LayerParams ¶ms); + + /** @brief Converts string name of the layer to the integer identifier. + * @returns id of the layer, or -1 if the layer wasn't found. + */ + CV_WRAP int getLayerId(const String &layer); + + CV_WRAP std::vector getLayerNames() const; + + /** @brief Container for strings and integers. */ + typedef DictValue LayerId; + + /** @brief Returns pointer to layer with specified id or name which the network use. */ + CV_WRAP Ptr getLayer(LayerId layerId); + + /** @brief Returns pointers to input layers of specific layer. */ + std::vector > getLayerInputs(LayerId layerId); // FIXIT: CV_WRAP + + /** @brief Connects output of the first layer to input of the second layer. + * @param outPin descriptor of the first layer output. + * @param inpPin descriptor of the second layer input. + * + * Descriptors have the following template <layer_name>[.input_number]: + * - the first part of the template layer_name is string name of the added layer. + * If this part is empty then the network input pseudo layer will be used; + * - the second optional part of the template input_number + * is either number of the layer input, either label one. + * If this part is omitted then the first layer input will be used. + * + * @see setNetInputs(), Layer::inputNameToIndex(), Layer::outputNameToIndex() + */ + CV_WRAP void connect(String outPin, String inpPin); + + /** @brief Connects #@p outNum output of the first layer to #@p inNum input of the second layer. + * @param outLayerId identifier of the first layer + * @param outNum number of the first layer output + * @param inpLayerId identifier of the second layer + * @param inpNum number of the second layer input + */ + void connect(int outLayerId, int outNum, int inpLayerId, int inpNum); + + /** @brief Sets outputs names of the network input pseudo layer. + * + * Each net always has special own the network input pseudo layer with id=0. + * This layer stores the user blobs only and don't make any computations. + * In fact, this layer provides the only way to pass user data into the network. + * As any other layer, this layer can label its outputs and this function provides an easy way to do this. + */ + CV_WRAP void setInputsNames(const std::vector &inputBlobNames); + + /** @brief Specify shape of network input. + */ + CV_WRAP void setInputShape(const String &inputName, const MatShape& shape); + + /** @brief Runs forward pass to compute output of layer with name @p outputName. + * @param outputName name for layer which output is needed to get + * @return blob for first output of specified layer. + * @details By default runs forward pass for the whole network. + */ + CV_WRAP Mat forward(const String& outputName = String()); + + /** @brief Runs forward pass to compute output of layer with name @p outputName. + * @param outputName name for layer which output is needed to get + * @details By default runs forward pass for the whole network. + * + * This is an asynchronous version of forward(const String&). + * dnn::DNN_BACKEND_INFERENCE_ENGINE backend is required. + */ + CV_WRAP AsyncArray forwardAsync(const String& outputName = String()); + + /** @brief Runs forward pass to compute output of layer with name @p outputName. + * @param outputBlobs contains all output blobs for specified layer. + * @param outputName name for layer which output is needed to get + * @details If @p outputName is empty, runs forward pass for the whole network. + */ + CV_WRAP void forward(OutputArrayOfArrays outputBlobs, const String& outputName = String()); + + /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames. + * @param outputBlobs contains blobs for first outputs of specified layers. + * @param outBlobNames names for layers which outputs are needed to get + */ + CV_WRAP void forward(OutputArrayOfArrays outputBlobs, + const std::vector& outBlobNames); + + /** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames. + * @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames. + * @param outBlobNames names for layers which outputs are needed to get + */ + CV_WRAP_AS(forwardAndRetrieve) void forward(CV_OUT std::vector >& outputBlobs, + const std::vector& outBlobNames); + + /** + * @brief Compile Halide layers. + * @param[in] scheduler Path to YAML file with scheduling directives. + * @see setPreferableBackend + * + * Schedule layers that support Halide backend. Then compile them for + * specific target. For layers that not represented in scheduling file + * or if no manual scheduling used at all, automatic scheduling will be applied. + */ + CV_WRAP void setHalideScheduler(const String& scheduler); + + /** + * @brief Ask network to use specific computation backend where it supported. + * @param[in] backendId backend identifier. + * @see Backend + * + * If OpenCV is compiled with Intel's Inference Engine library, DNN_BACKEND_DEFAULT + * means DNN_BACKEND_INFERENCE_ENGINE. Otherwise it equals to DNN_BACKEND_OPENCV. + */ + CV_WRAP void setPreferableBackend(int backendId); + + /** + * @brief Ask network to make computations on specific target device. + * @param[in] targetId target identifier. + * @see Target + * + * List of supported combinations backend / target: + * | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE | + * |------------------------|--------------------|------------------------------|--------------------| + * | DNN_TARGET_CPU | + | + | + | + * | DNN_TARGET_OPENCL | + | + | + | + * | DNN_TARGET_OPENCL_FP16 | + | + | | + * | DNN_TARGET_MYRIAD | | + | | + * | DNN_TARGET_FPGA | | + | | + */ + CV_WRAP void setPreferableTarget(int targetId); + + /** @brief Sets the new input value for the network + * @param blob A new blob. Should have CV_32F or CV_8U depth. + * @param name A name of input layer. + * @param scalefactor An optional normalization scale. + * @param mean An optional mean subtraction values. + * @see connect(String, String) to know format of the descriptor. + * + * If scale or mean values are specified, a final input blob is computed + * as: + * \f[input(n,c,h,w) = scalefactor \times (blob(n,c,h,w) - mean_c)\f] + */ + CV_WRAP void setInput(InputArray blob, const String& name = "", + double scalefactor = 1.0, const Scalar& mean = Scalar()); + + /** @brief Sets the new value for the learned param of the layer. + * @param layer name or id of the layer. + * @param numParam index of the layer parameter in the Layer::blobs array. + * @param blob the new value. + * @see Layer::blobs + * @note If shape of the new blob differs from the previous shape, + * then the following forward pass may fail. + */ + CV_WRAP void setParam(LayerId layer, int numParam, const Mat &blob); + + /** @brief Returns parameter blob of the layer. + * @param layer name or id of the layer. + * @param numParam index of the layer parameter in the Layer::blobs array. + * @see Layer::blobs + */ + CV_WRAP Mat getParam(LayerId layer, int numParam = 0); + + /** @brief Returns indexes of layers with unconnected outputs. + */ + CV_WRAP std::vector getUnconnectedOutLayers() const; + + /** @brief Returns names of layers with unconnected outputs. + */ + CV_WRAP std::vector getUnconnectedOutLayersNames() const; + + /** @brief Returns input and output shapes for all layers in loaded model; + * preliminary inferencing isn't necessary. + * @param netInputShapes shapes for all input blobs in net input layer. + * @param layersIds output parameter for layer IDs. + * @param inLayersShapes output parameter for input layers shapes; + * order is the same as in layersIds + * @param outLayersShapes output parameter for output layers shapes; + * order is the same as in layersIds + */ + CV_WRAP void getLayersShapes(const std::vector& netInputShapes, + CV_OUT std::vector& layersIds, + CV_OUT std::vector >& inLayersShapes, + CV_OUT std::vector >& outLayersShapes) const; + + /** @overload */ + CV_WRAP void getLayersShapes(const MatShape& netInputShape, + CV_OUT std::vector& layersIds, + CV_OUT std::vector >& inLayersShapes, + CV_OUT std::vector >& outLayersShapes) const; + + /** @brief Returns input and output shapes for layer with specified + * id in loaded model; preliminary inferencing isn't necessary. + * @param netInputShape shape input blob in net input layer. + * @param layerId id for layer. + * @param inLayerShapes output parameter for input layers shapes; + * order is the same as in layersIds + * @param outLayerShapes output parameter for output layers shapes; + * order is the same as in layersIds + */ + void getLayerShapes(const MatShape& netInputShape, + const int layerId, + CV_OUT std::vector& inLayerShapes, + CV_OUT std::vector& outLayerShapes) const; // FIXIT: CV_WRAP + + /** @overload */ + void getLayerShapes(const std::vector& netInputShapes, + const int layerId, + CV_OUT std::vector& inLayerShapes, + CV_OUT std::vector& outLayerShapes) const; // FIXIT: CV_WRAP + + /** @brief Computes FLOP for whole loaded model with specified input shapes. + * @param netInputShapes vector of shapes for all net inputs. + * @returns computed FLOP. + */ + CV_WRAP int64 getFLOPS(const std::vector& netInputShapes) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const int layerId, + const std::vector& netInputShapes) const; + /** @overload */ + CV_WRAP int64 getFLOPS(const int layerId, + const MatShape& netInputShape) const; + + /** @brief Returns list of types for layer used in model. + * @param layersTypes output parameter for returning types. + */ + CV_WRAP void getLayerTypes(CV_OUT std::vector& layersTypes) const; + + /** @brief Returns count of layers of specified type. + * @param layerType type. + * @returns count of layers + */ + CV_WRAP int getLayersCount(const String& layerType) const; + + /** @brief Computes bytes number which are required to store + * all weights and intermediate blobs for model. + * @param netInputShapes vector of shapes for all net inputs. + * @param weights output parameter to store resulting bytes for weights. + * @param blobs output parameter to store resulting bytes for intermediate blobs. + */ + void getMemoryConsumption(const std::vector& netInputShapes, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; // FIXIT: CV_WRAP + /** @overload */ + CV_WRAP void getMemoryConsumption(const MatShape& netInputShape, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const int layerId, + const std::vector& netInputShapes, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + /** @overload */ + CV_WRAP void getMemoryConsumption(const int layerId, + const MatShape& netInputShape, + CV_OUT size_t& weights, CV_OUT size_t& blobs) const; + + /** @brief Computes bytes number which are required to store + * all weights and intermediate blobs for each layer. + * @param netInputShapes vector of shapes for all net inputs. + * @param layerIds output vector to save layer IDs. + * @param weights output parameter to store resulting bytes for weights. + * @param blobs output parameter to store resulting bytes for intermediate blobs. + */ + void getMemoryConsumption(const std::vector& netInputShapes, + CV_OUT std::vector& layerIds, + CV_OUT std::vector& weights, + CV_OUT std::vector& blobs) const; // FIXIT: CV_WRAP + /** @overload */ + void getMemoryConsumption(const MatShape& netInputShape, + CV_OUT std::vector& layerIds, + CV_OUT std::vector& weights, + CV_OUT std::vector& blobs) const; // FIXIT: CV_WRAP + + /** @brief Enables or disables layer fusion in the network. + * @param fusion true to enable the fusion, false to disable. The fusion is enabled by default. + */ + CV_WRAP void enableFusion(bool fusion); + + /** @brief Returns overall time for inference and timings (in ticks) for layers. + * + * Indexes in returned vector correspond to layers ids. Some layers can be fused with others, + * in this case zero ticks count will be return for that skipped layers. Supported by DNN_BACKEND_OPENCV on DNN_TARGET_CPU only. + * + * @param[out] timings vector for tick timings for all layers. + * @return overall ticks for model inference. + */ + CV_WRAP int64 getPerfProfile(CV_OUT std::vector& timings); + + private: + struct Impl; + Ptr impl; + }; + + /** @brief Reads a network model stored in Darknet model files. + * @param cfgFile path to the .cfg file with text description of the network architecture. + * @param darknetModel path to the .weights file with learned network. + * @returns Network object that ready to do forward, throw an exception in failure cases. + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromDarknet(const String &cfgFile, const String &darknetModel = String()); + + /** @brief Reads a network model stored in Darknet model files. + * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture. + * @param bufferModel A buffer contains a content of .weights file with learned network. + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromDarknet(const std::vector& bufferCfg, + const std::vector& bufferModel = std::vector()); + + /** @brief Reads a network model stored in Darknet model files. + * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture. + * @param lenCfg Number of bytes to read from bufferCfg + * @param bufferModel A buffer contains a content of .weights file with learned network. + * @param lenModel Number of bytes to read from bufferModel + * @returns Net object. + */ + CV_EXPORTS Net readNetFromDarknet(const char *bufferCfg, size_t lenCfg, + const char *bufferModel = NULL, size_t lenModel = 0); + + /** @brief Reads a network model stored in Caffe framework's format. + * @param prototxt path to the .prototxt file with text description of the network architecture. + * @param caffeModel path to the .caffemodel file with learned network. + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromCaffe(const String &prototxt, const String &caffeModel = String()); + + /** @brief Reads a network model stored in Caffe model in memory. + * @param bufferProto buffer containing the content of the .prototxt file + * @param bufferModel buffer containing the content of the .caffemodel file + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromCaffe(const std::vector& bufferProto, + const std::vector& bufferModel = std::vector()); + + /** @brief Reads a network model stored in Caffe model in memory. + * @details This is an overloaded member function, provided for convenience. + * It differs from the above function only in what argument(s) it accepts. + * @param bufferProto buffer containing the content of the .prototxt file + * @param lenProto length of bufferProto + * @param bufferModel buffer containing the content of the .caffemodel file + * @param lenModel length of bufferModel + * @returns Net object. + */ + CV_EXPORTS Net readNetFromCaffe(const char *bufferProto, size_t lenProto, + const char *bufferModel = NULL, size_t lenModel = 0); + + /** @brief Reads a network model stored in TensorFlow framework's format. + * @param model path to the .pb file with binary protobuf description of the network architecture + * @param config path to the .pbtxt file that contains text graph definition in protobuf format. + * Resulting Net object is built by text graph using weights from a binary one that + * let us make it more flexible. + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromTensorflow(const String &model, const String &config = String()); + + /** @brief Reads a network model stored in TensorFlow framework's format. + * @param bufferModel buffer containing the content of the pb file + * @param bufferConfig buffer containing the content of the pbtxt file + * @returns Net object. + */ + CV_EXPORTS_W Net readNetFromTensorflow(const std::vector& bufferModel, + const std::vector& bufferConfig = std::vector()); + + /** @brief Reads a network model stored in TensorFlow framework's format. + * @details This is an overloaded member function, provided for convenience. + * It differs from the above function only in what argument(s) it accepts. + * @param bufferModel buffer containing the content of the pb file + * @param lenModel length of bufferModel + * @param bufferConfig buffer containing the content of the pbtxt file + * @param lenConfig length of bufferConfig + */ + CV_EXPORTS Net readNetFromTensorflow(const char *bufferModel, size_t lenModel, + const char *bufferConfig = NULL, size_t lenConfig = 0); + + /** + * @brief Reads a network model stored in Torch7 framework's format. + * @param model path to the file, dumped from Torch by using torch.save() function. + * @param isBinary specifies whether the network was serialized in ascii mode or binary. + * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch. + * @returns Net object. + * + * @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language, + * which has various bit-length on different systems. + * + * The loading file must contain serialized nn.Module object + * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors. + * + * List of supported layers (i.e. object instances derived from Torch nn.Module class): + * - nn.Sequential + * - nn.Parallel + * - nn.Concat + * - nn.Linear + * - nn.SpatialConvolution + * - nn.SpatialMaxPooling, nn.SpatialAveragePooling + * - nn.ReLU, nn.TanH, nn.Sigmoid + * - nn.Reshape + * - nn.SoftMax, nn.LogSoftMax + * + * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported. + */ + CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true, bool evaluate = true); + + /** + * @brief Read deep learning network represented in one of the supported formats. + * @param[in] model Binary file contains trained weights. The following file + * extensions are expected for models from different frameworks: + * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/) + * * `*.pb` (TensorFlow, https://www.tensorflow.org/) + * * `*.t7` | `*.net` (Torch, http://torch.ch/) + * * `*.weights` (Darknet, https://pjreddie.com/darknet/) + * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit) + * * `*.onnx` (ONNX, https://onnx.ai/) + * @param[in] config Text file contains network configuration. It could be a + * file with the following extensions: + * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/) + * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/) + * * `*.cfg` (Darknet, https://pjreddie.com/darknet/) + * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit) + * @param[in] framework Explicit framework name tag to determine a format. + * @returns Net object. + * + * This function automatically detects an origin framework of trained model + * and calls an appropriate function such @ref readNetFromCaffe, @ref readNetFromTensorflow, + * @ref readNetFromTorch or @ref readNetFromDarknet. An order of @p model and @p config + * arguments does not matter. + */ + CV_EXPORTS_W Net readNet(const String& model, const String& config = "", const String& framework = ""); + + /** + * @brief Read deep learning network represented in one of the supported formats. + * @details This is an overloaded member function, provided for convenience. + * It differs from the above function only in what argument(s) it accepts. + * @param[in] framework Name of origin framework. + * @param[in] bufferModel A buffer with a content of binary file with weights + * @param[in] bufferConfig A buffer with a content of text file contains network configuration. + * @returns Net object. + */ + CV_EXPORTS_W Net readNet(const String& framework, const std::vector& bufferModel, + const std::vector& bufferConfig = std::vector()); + + /** @brief Loads blob which was serialized as torch.Tensor object of Torch7 framework. + * @warning This function has the same limitations as readNetFromTorch(). + */ + CV_EXPORTS_W Mat readTorchBlob(const String &filename, bool isBinary = true); + + /** @brief Load a network from Intel's Model Optimizer intermediate representation. + * @param[in] xml XML configuration file with network's topology. + * @param[in] bin Binary file with trained weights. + * @returns Net object. + * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine + * backend. + */ + CV_EXPORTS_W + Net readNetFromModelOptimizer(const String &xml, const String &bin); + + /** @brief Load a network from Intel's Model Optimizer intermediate representation. + * @param[in] bufferModelConfig Buffer contains XML configuration with network's topology. + * @param[in] bufferWeights Buffer contains binary data with trained weights. + * @returns Net object. + * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine + * backend. + */ + CV_EXPORTS_W + Net readNetFromModelOptimizer(const std::vector& bufferModelConfig, const std::vector& bufferWeights); + + /** @brief Load a network from Intel's Model Optimizer intermediate representation. + * @param[in] bufferModelConfigPtr Pointer to buffer which contains XML configuration with network's topology. + * @param[in] bufferModelConfigSize Binary size of XML configuration data. + * @param[in] bufferWeightsPtr Pointer to buffer which contains binary data with trained weights. + * @param[in] bufferWeightsSize Binary size of trained weights data. + * @returns Net object. + * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine + * backend. + */ + CV_EXPORTS + Net readNetFromModelOptimizer(const uchar* bufferModelConfigPtr, size_t bufferModelConfigSize, + const uchar* bufferWeightsPtr, size_t bufferWeightsSize); + + /** @brief Reads a network model ONNX. + * @param onnxFile path to the .onnx file with text description of the network architecture. + * @returns Network object that ready to do forward, throw an exception in failure cases. + */ + CV_EXPORTS_W Net readNetFromONNX(const String &onnxFile); + + /** @brief Reads a network model from ONNX + * in-memory buffer. + * @param buffer memory address of the first byte of the buffer. + * @param sizeBuffer size of the buffer. + * @returns Network object that ready to do forward, throw an exception + * in failure cases. + */ + CV_EXPORTS Net readNetFromONNX(const char* buffer, size_t sizeBuffer); + + /** @brief Reads a network model from ONNX + * in-memory buffer. + * @param buffer in-memory buffer that stores the ONNX model bytes. + * @returns Network object that ready to do forward, throw an exception + * in failure cases. + */ + CV_EXPORTS_W Net readNetFromONNX(const std::vector& buffer); + + /** @brief Creates blob from .pb file. + * @param path to the .pb file with input tensor. + * @returns Mat. + */ + CV_EXPORTS_W Mat readTensorFromONNX(const String& path); + + /** @brief Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center, + * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels. + * @param image input image (with 1-, 3- or 4-channels). + * @param size spatial size for output image + * @param mean scalar with mean values which are subtracted from channels. Values are intended + * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true. + * @param scalefactor multiplier for @p image values. + * @param swapRB flag which indicates that swap first and last channels + * in 3-channel image is necessary. + * @param crop flag which indicates whether image will be cropped after resize or not + * @param ddepth Depth of output blob. Choose CV_32F or CV_8U. + * @details if @p crop is true, input image is resized so one side after resize is equal to corresponding + * dimension in @p size and another one is equal or larger. Then, crop from the center is performed. + * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed. + * @returns 4-dimensional Mat with NCHW dimensions order. + */ + CV_EXPORTS_W Mat blobFromImage(InputArray image, double scalefactor=1.0, const Size& size = Size(), + const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false, + int ddepth=CV_32F); + + /** @brief Creates 4-dimensional blob from image. + * @details This is an overloaded member function, provided for convenience. + * It differs from the above function only in what argument(s) it accepts. + */ + CV_EXPORTS void blobFromImage(InputArray image, OutputArray blob, double scalefactor=1.0, + const Size& size = Size(), const Scalar& mean = Scalar(), + bool swapRB=false, bool crop=false, int ddepth=CV_32F); + + + /** @brief Creates 4-dimensional blob from series of images. Optionally resizes and + * crops @p images from center, subtract @p mean values, scales values by @p scalefactor, + * swap Blue and Red channels. + * @param images input images (all with 1-, 3- or 4-channels). + * @param size spatial size for output image + * @param mean scalar with mean values which are subtracted from channels. Values are intended + * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true. + * @param scalefactor multiplier for @p images values. + * @param swapRB flag which indicates that swap first and last channels + * in 3-channel image is necessary. + * @param crop flag which indicates whether image will be cropped after resize or not + * @param ddepth Depth of output blob. Choose CV_32F or CV_8U. + * @details if @p crop is true, input image is resized so one side after resize is equal to corresponding + * dimension in @p size and another one is equal or larger. Then, crop from the center is performed. + * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed. + * @returns 4-dimensional Mat with NCHW dimensions order. + */ + CV_EXPORTS_W Mat blobFromImages(InputArrayOfArrays images, double scalefactor=1.0, + Size size = Size(), const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false, + int ddepth=CV_32F); + + /** @brief Creates 4-dimensional blob from series of images. + * @details This is an overloaded member function, provided for convenience. + * It differs from the above function only in what argument(s) it accepts. + */ + CV_EXPORTS void blobFromImages(InputArrayOfArrays images, OutputArray blob, + double scalefactor=1.0, Size size = Size(), + const Scalar& mean = Scalar(), bool swapRB=false, bool crop=false, + int ddepth=CV_32F); + + /** @brief Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure + * (std::vector). + * @param[in] blob_ 4 dimensional array (images, channels, height, width) in floating point precision (CV_32F) from + * which you would like to extract the images. + * @param[out] images_ array of 2D Mat containing the images extracted from the blob in floating point precision + * (CV_32F). They are non normalized neither mean added. The number of returned images equals the first dimension + * of the blob (batch size). Every image has a number of channels equals to the second dimension of the blob (depth). + */ + CV_EXPORTS_W void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_); + + /** @brief Convert all weights of Caffe network to half precision floating point. + * @param src Path to origin model from Caffe framework contains single + * precision floating point weights (usually has `.caffemodel` extension). + * @param dst Path to destination model with updated weights. + * @param layersTypes Set of layers types which parameters will be converted. + * By default, converts only Convolutional and Fully-Connected layers' + * weights. + * + * @note Shrinked model has no origin float32 weights so it can't be used + * in origin Caffe framework anymore. However the structure of data + * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe. + * So the resulting model may be used there. + */ + CV_EXPORTS_W void shrinkCaffeModel(const String& src, const String& dst, + const std::vector& layersTypes = std::vector()); + + /** @brief Create a text representation for a binary network stored in protocol buffer format. + * @param[in] model A path to binary network. + * @param[in] output A path to output text file to be created. + * + * @note To reduce output file size, trained weights are not included. + */ + CV_EXPORTS_W void writeTextGraph(const String& model, const String& output); + + /** @brief Performs non maximum suppression given boxes and corresponding scores. + + * @param bboxes a set of bounding boxes to apply NMS. + * @param scores a set of corresponding confidences. + * @param score_threshold a threshold used to filter boxes by score. + * @param nms_threshold a threshold used in non maximum suppression. + * @param indices the kept indices of bboxes after NMS. + * @param eta a coefficient in adaptive threshold formula: \f$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i\f$. + * @param top_k if `>0`, keep at most @p top_k picked indices. + */ + CV_EXPORTS void NMSBoxes(const std::vector& bboxes, const std::vector& scores, + const float score_threshold, const float nms_threshold, + CV_OUT std::vector& indices, + const float eta = 1.f, const int top_k = 0); + + CV_EXPORTS_W void NMSBoxes(const std::vector& bboxes, const std::vector& scores, + const float score_threshold, const float nms_threshold, + CV_OUT std::vector& indices, + const float eta = 1.f, const int top_k = 0); + + CV_EXPORTS_AS(NMSBoxesRotated) void NMSBoxes(const std::vector& bboxes, const std::vector& scores, + const float score_threshold, const float nms_threshold, + CV_OUT std::vector& indices, + const float eta = 1.f, const int top_k = 0); + +//! @} +CV__DNN_EXPERIMENTAL_NS_END +} +} + +#include +#include + +/// @deprecated Include this header directly from application. Automatic inclusion will be removed +#include + +#endif /* OPENCV_DNN_DNN_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.inl.hpp new file mode 100644 index 00000000..17d4c200 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/dnn.inl.hpp @@ -0,0 +1,395 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_DNN_INL_HPP +#define OPENCV_DNN_DNN_INL_HPP + +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN + +template +DictValue DictValue::arrayInt(TypeIter begin, int size) +{ + DictValue res(Param::INT, new AutoBuffer(size)); + for (int j = 0; j < size; begin++, j++) + (*res.pi)[j] = *begin; + return res; +} + +template +DictValue DictValue::arrayReal(TypeIter begin, int size) +{ + DictValue res(Param::REAL, new AutoBuffer(size)); + for (int j = 0; j < size; begin++, j++) + (*res.pd)[j] = *begin; + return res; +} + +template +DictValue DictValue::arrayString(TypeIter begin, int size) +{ + DictValue res(Param::STRING, new AutoBuffer(size)); + for (int j = 0; j < size; begin++, j++) + (*res.ps)[j] = *begin; + return res; +} + +template<> +inline DictValue DictValue::get(int idx) const +{ + CV_Assert(idx == -1); + return *this; +} + +template<> +inline int64 DictValue::get(int idx) const +{ + CV_Assert((idx == -1 && size() == 1) || (idx >= 0 && idx < size())); + idx = (idx == -1) ? 0 : idx; + + if (type == Param::INT) + { + return (*pi)[idx]; + } + else if (type == Param::REAL) + { + double doubleValue = (*pd)[idx]; + + double fracpart, intpart; + fracpart = std::modf(doubleValue, &intpart); + CV_Assert(fracpart == 0.0); + + return (int64)doubleValue; + } + else if (type == Param::STRING) + { + return std::atoi((*ps)[idx].c_str()); + } + else + { + CV_Assert(isInt() || isReal() || isString()); + return 0; + } +} + +template<> +inline int DictValue::get(int idx) const +{ + return (int)get(idx); +} + +inline int DictValue::getIntValue(int idx) const +{ + return (int)get(idx); +} + +template<> +inline unsigned DictValue::get(int idx) const +{ + return (unsigned)get(idx); +} + +template<> +inline bool DictValue::get(int idx) const +{ + return (get(idx) != 0); +} + +template<> +inline double DictValue::get(int idx) const +{ + CV_Assert((idx == -1 && size() == 1) || (idx >= 0 && idx < size())); + idx = (idx == -1) ? 0 : idx; + + if (type == Param::REAL) + { + return (*pd)[idx]; + } + else if (type == Param::INT) + { + return (double)(*pi)[idx]; + } + else if (type == Param::STRING) + { + return std::atof((*ps)[idx].c_str()); + } + else + { + CV_Assert(isReal() || isInt() || isString()); + return 0; + } +} + +inline double DictValue::getRealValue(int idx) const +{ + return get(idx); +} + +template<> +inline float DictValue::get(int idx) const +{ + return (float)get(idx); +} + +template<> +inline String DictValue::get(int idx) const +{ + CV_Assert(isString()); + CV_Assert((idx == -1 && ps->size() == 1) || (idx >= 0 && idx < (int)ps->size())); + return (*ps)[(idx == -1) ? 0 : idx]; +} + + +inline String DictValue::getStringValue(int idx) const +{ + return get(idx); +} + +inline void DictValue::release() +{ + switch (type) + { + case Param::INT: + delete pi; + break; + case Param::STRING: + delete ps; + break; + case Param::REAL: + delete pd; + break; + } +} + +inline DictValue::~DictValue() +{ + release(); +} + +inline DictValue & DictValue::operator=(const DictValue &r) +{ + if (&r == this) + return *this; + + if (r.type == Param::INT) + { + AutoBuffer *tmp = new AutoBuffer(*r.pi); + release(); + pi = tmp; + } + else if (r.type == Param::STRING) + { + AutoBuffer *tmp = new AutoBuffer(*r.ps); + release(); + ps = tmp; + } + else if (r.type == Param::REAL) + { + AutoBuffer *tmp = new AutoBuffer(*r.pd); + release(); + pd = tmp; + } + + type = r.type; + + return *this; +} + +inline DictValue::DictValue(const DictValue &r) +{ + type = r.type; + + if (r.type == Param::INT) + pi = new AutoBuffer(*r.pi); + else if (r.type == Param::STRING) + ps = new AutoBuffer(*r.ps); + else if (r.type == Param::REAL) + pd = new AutoBuffer(*r.pd); +} + +inline bool DictValue::isString() const +{ + return (type == Param::STRING); +} + +inline bool DictValue::isInt() const +{ + return (type == Param::INT); +} + +inline bool DictValue::isReal() const +{ + return (type == Param::REAL || type == Param::INT); +} + +inline int DictValue::size() const +{ + switch (type) + { + case Param::INT: + return (int)pi->size(); + case Param::STRING: + return (int)ps->size(); + case Param::REAL: + return (int)pd->size(); + } +#ifdef __OPENCV_BUILD + CV_Error(Error::StsInternal, ""); +#else + CV_ErrorNoReturn(Error::StsInternal, ""); +#endif +} + +inline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv) +{ + int i; + + if (dictv.isInt()) + { + for (i = 0; i < dictv.size() - 1; i++) + stream << dictv.get(i) << ", "; + stream << dictv.get(i); + } + else if (dictv.isReal()) + { + for (i = 0; i < dictv.size() - 1; i++) + stream << dictv.get(i) << ", "; + stream << dictv.get(i); + } + else if (dictv.isString()) + { + for (i = 0; i < dictv.size() - 1; i++) + stream << "\"" << dictv.get(i) << "\", "; + stream << dictv.get(i); + } + + return stream; +} + +///////////////////////////////////////////////////////////////// + +inline bool Dict::has(const String &key) const +{ + return dict.count(key) != 0; +} + +inline DictValue *Dict::ptr(const String &key) +{ + _Dict::iterator i = dict.find(key); + return (i == dict.end()) ? NULL : &i->second; +} + +inline const DictValue *Dict::ptr(const String &key) const +{ + _Dict::const_iterator i = dict.find(key); + return (i == dict.end()) ? NULL : &i->second; +} + +inline const DictValue &Dict::get(const String &key) const +{ + _Dict::const_iterator i = dict.find(key); + if (i == dict.end()) + CV_Error(Error::StsObjectNotFound, "Required argument \"" + key + "\" not found into dictionary"); + return i->second; +} + +template +inline T Dict::get(const String &key) const +{ + return this->get(key).get(); +} + +template +inline T Dict::get(const String &key, const T &defaultValue) const +{ + _Dict::const_iterator i = dict.find(key); + + if (i != dict.end()) + return i->second.get(); + else + return defaultValue; +} + +template +inline const T &Dict::set(const String &key, const T &value) +{ + _Dict::iterator i = dict.find(key); + + if (i != dict.end()) + i->second = DictValue(value); + else + dict.insert(std::make_pair(key, DictValue(value))); + + return value; +} + +inline void Dict::erase(const String &key) +{ + dict.erase(key); +} + +inline std::ostream &operator<<(std::ostream &stream, const Dict &dict) +{ + Dict::_Dict::const_iterator it; + for (it = dict.dict.begin(); it != dict.dict.end(); it++) + stream << it->first << " : " << it->second << "\n"; + + return stream; +} + +inline std::map::const_iterator Dict::begin() const +{ + return dict.begin(); +} + +inline std::map::const_iterator Dict::end() const +{ + return dict.end(); +} + +CV__DNN_EXPERIMENTAL_NS_END +} +} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.details.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.details.hpp new file mode 100644 index 00000000..619514e8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.details.hpp @@ -0,0 +1,78 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +#ifndef OPENCV_DNN_LAYER_DETAILS_HPP +#define OPENCV_DNN_LAYER_DETAILS_HPP + +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN + +/** @brief Registers layer constructor in runtime. +* @param type string, containing type name of the layer. +* @param constructorFunc pointer to the function of type LayerRegister::Constructor, which creates the layer. +* @details This macros must be placed inside the function code. +*/ +#define CV_DNN_REGISTER_LAYER_FUNC(type, constructorFunc) \ + cv::dnn::LayerFactory::registerLayer(#type, constructorFunc); + +/** @brief Registers layer class in runtime. + * @param type string, containing type name of the layer. + * @param class C++ class, derived from Layer. + * @details This macros must be placed inside the function code. + */ +#define CV_DNN_REGISTER_LAYER_CLASS(type, class) \ + cv::dnn::LayerFactory::registerLayer(#type, cv::dnn::details::_layerDynamicRegisterer); + +/** @brief Registers layer constructor on module load time. +* @param type string, containing type name of the layer. +* @param constructorFunc pointer to the function of type LayerRegister::Constructor, which creates the layer. +* @details This macros must be placed outside the function code. +*/ +#define CV_DNN_REGISTER_LAYER_FUNC_STATIC(type, constructorFunc) \ +static cv::dnn::details::_LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, constructorFunc); + +/** @brief Registers layer class on module load time. + * @param type string, containing type name of the layer. + * @param class C++ class, derived from Layer. + * @details This macros must be placed outside the function code. + */ +#define CV_DNN_REGISTER_LAYER_CLASS_STATIC(type, class) \ +Ptr __LayerStaticRegisterer_func_##type(LayerParams ¶ms) \ + { return Ptr(new class(params)); } \ +static cv::dnn::details::_LayerStaticRegisterer __LayerStaticRegisterer_##type(#type, __LayerStaticRegisterer_func_##type); + +namespace details { + +template +Ptr _layerDynamicRegisterer(LayerParams ¶ms) +{ + return Ptr(LayerClass::create(params)); +} + +//allows automatically register created layer on module load time +class _LayerStaticRegisterer +{ + String type; +public: + + _LayerStaticRegisterer(const String &layerType, LayerFactory::Constructor layerConstructor) + { + this->type = layerType; + LayerFactory::registerLayer(layerType, layerConstructor); + } + + ~_LayerStaticRegisterer() + { + LayerFactory::unregisterLayer(type); + } +}; + +} // namespace +CV__DNN_EXPERIMENTAL_NS_END +}} // namespace + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.hpp new file mode 100644 index 00000000..c4712b8e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/layer.hpp @@ -0,0 +1,85 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_LAYER_HPP +#define OPENCV_DNN_LAYER_HPP +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN +//! @addtogroup dnn +//! @{ +//! +//! @defgroup dnnLayerFactory Utilities for New Layers Registration +//! @{ + +/** @brief %Layer factory allows to create instances of registered layers. */ +class CV_EXPORTS LayerFactory +{ +public: + + //! Each Layer class must provide this function to the factory + typedef Ptr(*Constructor)(LayerParams ¶ms); + + //! Registers the layer class with typename @p type and specified @p constructor. Thread-safe. + static void registerLayer(const String &type, Constructor constructor); + + //! Unregisters registered layer with specified type name. Thread-safe. + static void unregisterLayer(const String &type); + + /** @brief Creates instance of registered layer. + * @param type type name of creating layer. + * @param params parameters which will be used for layer initialization. + * @note Thread-safe. + */ + static Ptr createLayerInstance(const String &type, LayerParams& params); + +private: + LayerFactory(); +}; + +//! @} +//! @} +CV__DNN_EXPERIMENTAL_NS_END +} +} +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/shape_utils.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/shape_utils.hpp new file mode 100644 index 00000000..b77333bd --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/shape_utils.hpp @@ -0,0 +1,258 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_DNN_DNN_SHAPE_UTILS_HPP +#define OPENCV_DNN_DNN_SHAPE_UTILS_HPP + +#include +#include // CV_MAX_DIM +#include +#include +#include + +namespace cv { +namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN + +//Slicing + +struct _Range : public cv::Range +{ + _Range(const Range &r) : cv::Range(r) {} + _Range(int start_, int size_ = 1) : cv::Range(start_, start_ + size_) {} +}; + +static inline Mat slice(const Mat &m, const _Range &r0) +{ + Range ranges[CV_MAX_DIM]; + for (int i = 1; i < m.dims; i++) + ranges[i] = Range::all(); + ranges[0] = r0; + return m(&ranges[0]); +} + +static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1) +{ + CV_Assert(m.dims >= 2); + Range ranges[CV_MAX_DIM]; + for (int i = 2; i < m.dims; i++) + ranges[i] = Range::all(); + ranges[0] = r0; + ranges[1] = r1; + return m(&ranges[0]); +} + +static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2) +{ + CV_Assert(m.dims >= 3); + Range ranges[CV_MAX_DIM]; + for (int i = 3; i < m.dims; i++) + ranges[i] = Range::all(); + ranges[0] = r0; + ranges[1] = r1; + ranges[2] = r2; + return m(&ranges[0]); +} + +static inline Mat slice(const Mat &m, const _Range &r0, const _Range &r1, const _Range &r2, const _Range &r3) +{ + CV_Assert(m.dims >= 4); + Range ranges[CV_MAX_DIM]; + for (int i = 4; i < m.dims; i++) + ranges[i] = Range::all(); + ranges[0] = r0; + ranges[1] = r1; + ranges[2] = r2; + ranges[3] = r3; + return m(&ranges[0]); +} + +static inline Mat getPlane(const Mat &m, int n, int cn) +{ + CV_Assert(m.dims > 2); + int sz[CV_MAX_DIM]; + for(int i = 2; i < m.dims; i++) + { + sz[i-2] = m.size.p[i]; + } + return Mat(m.dims - 2, sz, m.type(), (void*)m.ptr(n, cn)); +} + +static inline MatShape shape(const int* dims, const int n) +{ + MatShape shape; + shape.assign(dims, dims + n); + return shape; +} + +static inline MatShape shape(const Mat& mat) +{ + return shape(mat.size.p, mat.dims); +} + +static inline MatShape shape(const MatSize& sz) +{ + return shape(sz.p, sz.dims()); +} + +static inline MatShape shape(const UMat& mat) +{ + return shape(mat.size.p, mat.dims); +} + +#if 0 // issues with MatExpr wrapped into InputArray +static inline +MatShape shape(InputArray input) +{ + int sz[CV_MAX_DIM]; + int ndims = input.sizend(sz); + return shape(sz, ndims); +} +#endif + +namespace {inline bool is_neg(int i) { return i < 0; }} + +static inline MatShape shape(int a0, int a1=-1, int a2=-1, int a3=-1) +{ + int dims[] = {a0, a1, a2, a3}; + MatShape s = shape(dims, 4); + s.erase(std::remove_if(s.begin(), s.end(), is_neg), s.end()); + return s; +} + +static inline int total(const MatShape& shape, int start = -1, int end = -1) +{ + if (start == -1) start = 0; + if (end == -1) end = (int)shape.size(); + + if (shape.empty()) + return 0; + + int elems = 1; + CV_Assert(start <= (int)shape.size() && end <= (int)shape.size() && + start <= end); + for(int i = start; i < end; i++) + { + elems *= shape[i]; + } + return elems; +} + +static inline MatShape concat(const MatShape& a, const MatShape& b) +{ + MatShape c = a; + c.insert(c.end(), b.begin(), b.end()); + + return c; +} + +static inline std::string toString(const MatShape& shape, const String& name = "") +{ + std::ostringstream ss; + if (!name.empty()) + ss << name << ' '; + ss << '['; + for(size_t i = 0, n = shape.size(); i < n; ++i) + ss << ' ' << shape[i]; + ss << " ]"; + return ss.str(); +} +static inline void print(const MatShape& shape, const String& name = "") +{ + std::cout << toString(shape, name) << std::endl; +} +static inline std::ostream& operator<<(std::ostream &out, const MatShape& shape) +{ + out << toString(shape); + return out; +} + +/// @brief Converts axis from `[-dims; dims)` (similar to Python's slice notation) to `[0; dims)` range. +static inline +int normalize_axis(int axis, int dims) +{ + CV_Check(axis, axis >= -dims && axis < dims, ""); + axis = (axis < 0) ? (dims + axis) : axis; + CV_DbgCheck(axis, axis >= 0 && axis < dims, ""); + return axis; +} + +static inline +int normalize_axis(int axis, const MatShape& shape) +{ + return normalize_axis(axis, (int)shape.size()); +} + +static inline +Range normalize_axis_range(const Range& r, int axisSize) +{ + if (r == Range::all()) + return Range(0, axisSize); + CV_CheckGE(r.start, 0, ""); + Range clamped(r.start, + r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1); + CV_DbgCheckGE(clamped.start, 0, ""); + CV_CheckLT(clamped.start, clamped.end, ""); + CV_CheckLE(clamped.end, axisSize, ""); + return clamped; +} + +static inline +bool isAllOnes(const MatShape &inputShape, int startPos, int endPos) +{ + CV_Assert(!inputShape.empty()); + + CV_CheckGE((int) inputShape.size(), startPos, ""); + CV_CheckGE(startPos, 0, ""); + CV_CheckLE(startPos, endPos, ""); + CV_CheckLE((size_t)endPos, inputShape.size(), ""); + + for (size_t i = startPos; i < endPos; i++) + { + if (inputShape[i] != 1) + return false; + } + return true; +} +CV__DNN_EXPERIMENTAL_NS_END +} +} +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/dnn/utils/inference_engine.hpp b/third_party/opencv/uos/sw_64/include/opencv2/dnn/utils/inference_engine.hpp new file mode 100644 index 00000000..4a7e9e07 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/dnn/utils/inference_engine.hpp @@ -0,0 +1,71 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2018-2019, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. + +#ifndef OPENCV_DNN_UTILS_INF_ENGINE_HPP +#define OPENCV_DNN_UTILS_INF_ENGINE_HPP + +#include "../dnn.hpp" + +namespace cv { namespace dnn { +CV__DNN_EXPERIMENTAL_NS_BEGIN + + +/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */ +#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER" +#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH" + +/** @brief Returns Inference Engine internal backend API. + * + * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros. + * + * Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable). + */ +CV_EXPORTS_W cv::String getInferenceEngineBackendType(); + +/** @brief Specify Inference Engine internal backend API. + * + * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros. + * + * @returns previous value of internal backend API + */ +CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType); + + +/** @brief Release a Myriad device (binded by OpenCV). + * + * Single Myriad device cannot be shared across multiple processes which uses + * Inference Engine's Myriad plugin. + */ +CV_EXPORTS_W void resetMyriadDevice(); + + +/* Values for 'OPENCV_DNN_IE_VPU_TYPE' parameter */ +#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_UNSPECIFIED "" +/// Intel(R) Movidius(TM) Neural Compute Stick, NCS (USB 03e7:2150), Myriad2 (https://software.intel.com/en-us/movidius-ncs) +#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2" +/// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick) +#define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX" +#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_ARM_COMPUTE "ARM_COMPUTE" +#define CV_DNN_INFERENCE_ENGINE_CPU_TYPE_X86 "X86" + + +/** @brief Returns Inference Engine VPU type. + * + * See values of `CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*` macros. + */ +CV_EXPORTS_W cv::String getInferenceEngineVPUType(); + +/** @brief Returns Inference Engine CPU type. + * + * Specify OpenVINO plugin: CPU or ARM. + */ +CV_EXPORTS_W cv::String getInferenceEngineCPUType(); + +CV__DNN_EXPERIMENTAL_NS_END +}} // namespace + +#endif // OPENCV_DNN_UTILS_INF_ENGINE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/features2d.hpp b/third_party/opencv/uos/sw_64/include/opencv2/features2d.hpp new file mode 100644 index 00000000..bf193599 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/features2d.hpp @@ -0,0 +1,1512 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_FEATURES_2D_HPP +#define OPENCV_FEATURES_2D_HPP + +#include "opencv2/opencv_modules.hpp" +#include "opencv2/core.hpp" + +#ifdef HAVE_OPENCV_FLANN +#include "opencv2/flann/miniflann.hpp" +#endif + +/** + @defgroup features2d 2D Features Framework + @{ + @defgroup features2d_main Feature Detection and Description + @defgroup features2d_match Descriptor Matchers + +Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to +easily switch between different algorithms solving the same problem. This section is devoted to +matching descriptors that are represented as vectors in a multidimensional space. All objects that +implement vector descriptor matchers inherit the DescriptorMatcher interface. + + @defgroup features2d_draw Drawing Function of Keypoints and Matches + @defgroup features2d_category Object Categorization + +This section describes approaches based on local 2D features and used to categorize objects. + + @defgroup feature2d_hal Hardware Acceleration Layer + @{ + @defgroup features2d_hal_interface Interface + @} + @} + */ + +namespace cv +{ + +//! @addtogroup features2d_main +//! @{ + +// //! writes vector of keypoints to the file storage +// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector& keypoints); +// //! reads vector of keypoints from the specified file storage node +// CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector& keypoints); + +/** @brief A class filters a vector of keypoints. + + Because now it is difficult to provide a convenient interface for all usage scenarios of the + keypoints filter class, it has only several needed by now static methods. + */ +class CV_EXPORTS KeyPointsFilter +{ +public: + KeyPointsFilter(){} + + /* + * Remove keypoints within borderPixels of an image edge. + */ + static void runByImageBorder( std::vector& keypoints, Size imageSize, int borderSize ); + /* + * Remove keypoints of sizes out of range. + */ + static void runByKeypointSize( std::vector& keypoints, float minSize, + float maxSize=FLT_MAX ); + /* + * Remove keypoints from some image by mask for pixels of this image. + */ + static void runByPixelsMask( std::vector& keypoints, const Mat& mask ); + /* + * Remove duplicated keypoints. + */ + static void removeDuplicated( std::vector& keypoints ); + /* + * Remove duplicated keypoints and sort the remaining keypoints + */ + static void removeDuplicatedSorted( std::vector& keypoints ); + + /* + * Retain the specified number of the best keypoints (according to the response) + */ + static void retainBest( std::vector& keypoints, int npoints ); +}; + + +/************************************ Base Classes ************************************/ + +/** @brief Abstract base class for 2D image feature detectors and descriptor extractors +*/ +#ifdef __EMSCRIPTEN__ +class CV_EXPORTS_W Feature2D : public Algorithm +#else +class CV_EXPORTS_W Feature2D : public virtual Algorithm +#endif +{ +public: + virtual ~Feature2D(); + + /** @brief Detects keypoints in an image (first variant) or image set (second variant). + + @param image Image. + @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set + of keypoints detected in images[i] . + @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer + matrix with non-zero values in the region of interest. + */ + CV_WRAP virtual void detect( InputArray image, + CV_OUT std::vector& keypoints, + InputArray mask=noArray() ); + + /** @overload + @param images Image set. + @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set + of keypoints detected in images[i] . + @param masks Masks for each input image specifying where to look for keypoints (optional). + masks[i] is a mask for images[i]. + */ + CV_WRAP virtual void detect( InputArrayOfArrays images, + CV_OUT std::vector >& keypoints, + InputArrayOfArrays masks=noArray() ); + + /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set + (second variant). + + @param image Image. + @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be + computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint + with several dominant orientations (for each orientation). + @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are + descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the + descriptor for keypoint j-th keypoint. + */ + CV_WRAP virtual void compute( InputArray image, + CV_OUT CV_IN_OUT std::vector& keypoints, + OutputArray descriptors ); + + /** @overload + + @param images Image set. + @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be + computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint + with several dominant orientations (for each orientation). + @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are + descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the + descriptor for keypoint j-th keypoint. + */ + CV_WRAP virtual void compute( InputArrayOfArrays images, + CV_OUT CV_IN_OUT std::vector >& keypoints, + OutputArrayOfArrays descriptors ); + + /** Detects keypoints and computes the descriptors */ + CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, + CV_OUT std::vector& keypoints, + OutputArray descriptors, + bool useProvidedKeypoints=false ); + + CV_WRAP virtual int descriptorSize() const; + CV_WRAP virtual int descriptorType() const; + CV_WRAP virtual int defaultNorm() const; + + CV_WRAP void write( const String& fileName ) const; + + CV_WRAP void read( const String& fileName ); + + virtual void write( FileStorage&) const CV_OVERRIDE; + + // see corresponding cv::Algorithm method + CV_WRAP virtual void read( const FileNode&) CV_OVERRIDE; + + //! Return true if detector object is empty + CV_WRAP virtual bool empty() const CV_OVERRIDE; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; + + // see corresponding cv::Algorithm method + CV_WRAP inline void write(const Ptr& fs, const String& name = String()) const { Algorithm::write(fs, name); } +}; + +/** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch +between different algorithms solving the same problem. All objects that implement keypoint detectors +inherit the FeatureDetector interface. */ +typedef Feature2D FeatureDetector; + +/** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you +to easily switch between different algorithms solving the same problem. This section is devoted to +computing descriptors represented as vectors in a multidimensional space. All objects that implement +the vector descriptor extractors inherit the DescriptorExtractor interface. + */ +typedef Feature2D DescriptorExtractor; + + +/** @brief Class for implementing the wrapper which makes detectors and extractors to be affine invariant, +described as ASIFT in @cite YM11 . +*/ +class CV_EXPORTS_W AffineFeature : public Feature2D +{ +public: + /** + @param backend The detector/extractor you want to use as backend. + @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n. + @param minTilt The lowest power index of tilt factor. 0 is used in the paper. + @param tiltStep Tilt sampling step \f$\delta_t\f$ in Algorithm 1 in the paper. + @param rotateStepBase Rotation sampling step factor b in Algorithm 1 in the paper. + */ + CV_WRAP static Ptr create(const Ptr& backend, + int maxTilt = 5, int minTilt = 0, float tiltStep = 1.4142135623730951f, float rotateStepBase = 72); + + CV_WRAP virtual void setViewParams(const std::vector& tilts, const std::vector& rolls) = 0; + CV_WRAP virtual void getViewParams(std::vector& tilts, std::vector& rolls) const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +typedef AffineFeature AffineFeatureDetector; +typedef AffineFeature AffineDescriptorExtractor; + + +/** @brief Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform +(SIFT) algorithm by D. Lowe @cite Lowe04 . +*/ +class CV_EXPORTS_W SIFT : public Feature2D +{ +public: + /** + @param nfeatures The number of best features to retain. The features are ranked by their scores + (measured in SIFT algorithm as the local contrast) + + @param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The + number of octaves is computed automatically from the image resolution. + + @param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform + (low-contrast) regions. The larger the threshold, the less features are produced by the detector. + + @note The contrast threshold will be divided by nOctaveLayers when the filtering is applied. When + nOctaveLayers is set to default and if you want to use the value used in D. Lowe paper, 0.03, set + this argument to 0.09. + + @param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning + is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are + filtered out (more features are retained). + + @param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image + is captured with a weak camera with soft lenses, you might want to reduce the number. + */ + CV_WRAP static Ptr create(int nfeatures = 0, int nOctaveLayers = 3, + double contrastThreshold = 0.04, double edgeThreshold = 10, + double sigma = 1.6); + + /** @brief Create SIFT with specified descriptorType. + @param nfeatures The number of best features to retain. The features are ranked by their scores + (measured in SIFT algorithm as the local contrast) + + @param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The + number of octaves is computed automatically from the image resolution. + + @param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform + (low-contrast) regions. The larger the threshold, the less features are produced by the detector. + + @note The contrast threshold will be divided by nOctaveLayers when the filtering is applied. When + nOctaveLayers is set to default and if you want to use the value used in D. Lowe paper, 0.03, set + this argument to 0.09. + + @param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning + is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are + filtered out (more features are retained). + + @param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image + is captured with a weak camera with soft lenses, you might want to reduce the number. + + @param descriptorType The type of descriptors. Only CV_32F and CV_8U are supported. + */ + CV_WRAP static Ptr create(int nfeatures, int nOctaveLayers, + double contrastThreshold, double edgeThreshold, + double sigma, int descriptorType); + + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +typedef SIFT SiftFeatureDetector; +typedef SIFT SiftDescriptorExtractor; + + +/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 . + */ +class CV_EXPORTS_W BRISK : public Feature2D +{ +public: + /** @brief The BRISK constructor + + @param thresh AGAST detection threshold score. + @param octaves detection octaves. Use 0 to do single scale. + @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a + keypoint. + */ + CV_WRAP static Ptr create(int thresh=30, int octaves=3, float patternScale=1.0f); + + /** @brief The BRISK constructor for a custom pattern + + @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for + keypoint scale 1). + @param numberList defines the number of sampling points on the sampling circle. Must be the same + size as radiusList.. + @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint + scale 1). + @param dMin threshold for the long pairings used for orientation determination (in pixels for + keypoint scale 1). + @param indexChange index remapping of the bits. */ + CV_WRAP static Ptr create(const std::vector &radiusList, const std::vector &numberList, + float dMax=5.85f, float dMin=8.2f, const std::vector& indexChange=std::vector()); + + /** @brief The BRISK constructor for a custom pattern, detection threshold and octaves + + @param thresh AGAST detection threshold score. + @param octaves detection octaves. Use 0 to do single scale. + @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for + keypoint scale 1). + @param numberList defines the number of sampling points on the sampling circle. Must be the same + size as radiusList.. + @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint + scale 1). + @param dMin threshold for the long pairings used for orientation determination (in pixels for + keypoint scale 1). + @param indexChange index remapping of the bits. */ + CV_WRAP static Ptr create(int thresh, int octaves, const std::vector &radiusList, + const std::vector &numberList, float dMax=5.85f, float dMin=8.2f, + const std::vector& indexChange=std::vector()); + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor + +described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects +the strongest features using FAST or Harris response, finds their orientation using first-order +moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or +k-tuples) are rotated according to the measured orientation). + */ +class CV_EXPORTS_W ORB : public Feature2D +{ +public: + enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; + + /** @brief The ORB constructor + + @param nfeatures The maximum number of features to retain. + @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical + pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor + will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor + will mean that to cover certain scale range you will need more pyramid levels and so the speed + will suffer. + @param nlevels The number of pyramid levels. The smallest level will have linear size equal to + input_image_linear_size/pow(scaleFactor, nlevels - firstLevel). + @param edgeThreshold This is size of the border where the features are not detected. It should + roughly match the patchSize parameter. + @param firstLevel The level of pyramid to put source image to. Previous layers are filled + with upscaled source image. + @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The + default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, + so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 + random points (of course, those point coordinates are random, but they are generated from the + pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel + rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such + output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, + denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each + bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3). + @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features + (the score is written to KeyPoint::score and is used to retain best nfeatures features); + FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, + but it is a little faster to compute. + @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller + pyramid layers the perceived image area covered by a feature will be larger. + @param fastThreshold the fast threshold + */ + CV_WRAP static Ptr create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31, + int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20); + + CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; + CV_WRAP virtual int getMaxFeatures() const = 0; + + CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0; + CV_WRAP virtual double getScaleFactor() const = 0; + + CV_WRAP virtual void setNLevels(int nlevels) = 0; + CV_WRAP virtual int getNLevels() const = 0; + + CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0; + CV_WRAP virtual int getEdgeThreshold() const = 0; + + CV_WRAP virtual void setFirstLevel(int firstLevel) = 0; + CV_WRAP virtual int getFirstLevel() const = 0; + + CV_WRAP virtual void setWTA_K(int wta_k) = 0; + CV_WRAP virtual int getWTA_K() const = 0; + + CV_WRAP virtual void setScoreType(int scoreType) = 0; + CV_WRAP virtual int getScoreType() const = 0; + + CV_WRAP virtual void setPatchSize(int patchSize) = 0; + CV_WRAP virtual int getPatchSize() const = 0; + + CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0; + CV_WRAP virtual int getFastThreshold() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Maximally stable extremal region extractor + +The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki +article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)). + +- there are two different implementation of %MSER: one for grey image, one for color image + +- the grey image algorithm is taken from: @cite nister2008linear ; the paper claims to be faster +than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop. + +- the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower +than grey image method ( 3~4 times ) + +- (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py +*/ +class CV_EXPORTS_W MSER : public Feature2D +{ +public: + /** @brief Full constructor for %MSER detector + + @param delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$ + @param min_area prune the area which smaller than minArea + @param max_area prune the area which bigger than maxArea + @param max_variation prune the area have similar size to its children + @param min_diversity for color image, trace back to cut off mser with diversity less than min_diversity + @param max_evolution for color image, the evolution steps + @param area_threshold for color image, the area threshold to cause re-initialize + @param min_margin for color image, ignore too small margin + @param edge_blur_size for color image, the aperture size for edge blur + */ + CV_WRAP static Ptr create( int delta=5, int min_area=60, int max_area=14400, + double max_variation=0.25, double min_diversity=.2, + int max_evolution=200, double area_threshold=1.01, + double min_margin=0.003, int edge_blur_size=5 ); + + /** @brief Detect %MSER regions + + @param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3) + @param msers resulting list of point sets + @param bboxes resulting bounding boxes + */ + CV_WRAP virtual void detectRegions( InputArray image, + CV_OUT std::vector >& msers, + CV_OUT std::vector& bboxes ) = 0; + + CV_WRAP virtual void setDelta(int delta) = 0; + CV_WRAP virtual int getDelta() const = 0; + + CV_WRAP virtual void setMinArea(int minArea) = 0; + CV_WRAP virtual int getMinArea() const = 0; + + CV_WRAP virtual void setMaxArea(int maxArea) = 0; + CV_WRAP virtual int getMaxArea() const = 0; + + CV_WRAP virtual void setPass2Only(bool f) = 0; + CV_WRAP virtual bool getPass2Only() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @overload */ +CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, + int threshold, bool nonmaxSuppression=true ); + +/** @brief Detects corners using the FAST algorithm + +@param image grayscale image where keypoints (corners) are detected. +@param keypoints keypoints detected on the image. +@param threshold threshold on difference between intensity of the central pixel and pixels of a +circle around this pixel. +@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners +(keypoints). +@param type one of the three neighborhoods as defined in the paper: +FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, +FastFeatureDetector::TYPE_5_8 + +Detects corners using the FAST algorithm by @cite Rosten06 . + +@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8, +cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner +detection, use cv2.FAST.detect() method. + */ +CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector& keypoints, + int threshold, bool nonmaxSuppression, int type ); + +//! @} features2d_main + +//! @addtogroup features2d_main +//! @{ + +/** @brief Wrapping class for feature detection using the FAST method. : + */ +class CV_EXPORTS_W FastFeatureDetector : public Feature2D +{ +public: + enum + { + TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2, + THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002, + }; + + CV_WRAP static Ptr create( int threshold=10, + bool nonmaxSuppression=true, + int type=FastFeatureDetector::TYPE_9_16 ); + + CV_WRAP virtual void setThreshold(int threshold) = 0; + CV_WRAP virtual int getThreshold() const = 0; + + CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; + CV_WRAP virtual bool getNonmaxSuppression() const = 0; + + CV_WRAP virtual void setType(int type) = 0; + CV_WRAP virtual int getType() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @overload */ +CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector& keypoints, + int threshold, bool nonmaxSuppression=true ); + +/** @brief Detects corners using the AGAST algorithm + +@param image grayscale image where keypoints (corners) are detected. +@param keypoints keypoints detected on the image. +@param threshold threshold on difference between intensity of the central pixel and pixels of a +circle around this pixel. +@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners +(keypoints). +@param type one of the four neighborhoods as defined in the paper: +AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d, +AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16 + +For non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results. +The 32-bit binary tree tables were generated automatically from original code using perl script. +The perl script and examples of tree generation are placed in features2d/doc folder. +Detects corners using the AGAST algorithm by @cite mair2010_agast . + + */ +CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector& keypoints, + int threshold, bool nonmaxSuppression, int type ); +//! @} features2d_main + +//! @addtogroup features2d_main +//! @{ + +/** @brief Wrapping class for feature detection using the AGAST method. : + */ +class CV_EXPORTS_W AgastFeatureDetector : public Feature2D +{ +public: + enum + { + AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3, + THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001, + }; + + CV_WRAP static Ptr create( int threshold=10, + bool nonmaxSuppression=true, + int type=AgastFeatureDetector::OAST_9_16 ); + + CV_WRAP virtual void setThreshold(int threshold) = 0; + CV_WRAP virtual int getThreshold() const = 0; + + CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; + CV_WRAP virtual bool getNonmaxSuppression() const = 0; + + CV_WRAP virtual void setType(int type) = 0; + CV_WRAP virtual int getType() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. : + */ +class CV_EXPORTS_W GFTTDetector : public Feature2D +{ +public: + CV_WRAP static Ptr create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, + int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); + CV_WRAP static Ptr create( int maxCorners, double qualityLevel, double minDistance, + int blockSize, int gradiantSize, bool useHarrisDetector=false, double k=0.04 ); + CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; + CV_WRAP virtual int getMaxFeatures() const = 0; + + CV_WRAP virtual void setQualityLevel(double qlevel) = 0; + CV_WRAP virtual double getQualityLevel() const = 0; + + CV_WRAP virtual void setMinDistance(double minDistance) = 0; + CV_WRAP virtual double getMinDistance() const = 0; + + CV_WRAP virtual void setBlockSize(int blockSize) = 0; + CV_WRAP virtual int getBlockSize() const = 0; + + CV_WRAP virtual void setHarrisDetector(bool val) = 0; + CV_WRAP virtual bool getHarrisDetector() const = 0; + + CV_WRAP virtual void setK(double k) = 0; + CV_WRAP virtual double getK() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class for extracting blobs from an image. : + +The class implements a simple algorithm for extracting blobs from an image: + +1. Convert the source image to binary images by applying thresholding with several thresholds from + minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between + neighboring thresholds. +2. Extract connected components from every binary image by findContours and calculate their + centers. +3. Group centers from several binary images by their coordinates. Close centers form one group that + corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter. +4. From the groups, estimate final centers of blobs and their radiuses and return as locations and + sizes of keypoints. + +This class performs several filtrations of returned blobs. You should set filterBy\* to true/false +to turn on/off corresponding filtration. Available filtrations: + +- **By color**. This filter compares the intensity of a binary image at the center of a blob to +blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs +and blobColor = 255 to extract light blobs. +- **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive). +- **By circularity**. Extracted blobs have circularity +(\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and +maxCircularity (exclusive). +- **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio +between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive). +- **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between +minConvexity (inclusive) and maxConvexity (exclusive). + +Default values of parameters are tuned to extract dark circular blobs. + */ +class CV_EXPORTS_W SimpleBlobDetector : public Feature2D +{ +public: + struct CV_EXPORTS_W_SIMPLE Params + { + CV_WRAP Params(); + CV_PROP_RW float thresholdStep; + CV_PROP_RW float minThreshold; + CV_PROP_RW float maxThreshold; + CV_PROP_RW size_t minRepeatability; + CV_PROP_RW float minDistBetweenBlobs; + + CV_PROP_RW bool filterByColor; + CV_PROP_RW uchar blobColor; + + CV_PROP_RW bool filterByArea; + CV_PROP_RW float minArea, maxArea; + + CV_PROP_RW bool filterByCircularity; + CV_PROP_RW float minCircularity, maxCircularity; + + CV_PROP_RW bool filterByInertia; + CV_PROP_RW float minInertiaRatio, maxInertiaRatio; + + CV_PROP_RW bool filterByConvexity; + CV_PROP_RW float minConvexity, maxConvexity; + + void read( const FileNode& fn ); + void write( FileStorage& fs ) const; + }; + + CV_WRAP static Ptr + create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +//! @} features2d_main + +//! @addtogroup features2d_main +//! @{ + +/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 . + +@note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo +F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision +(ECCV), Fiorenze, Italy, October 2012. +*/ +class CV_EXPORTS_W KAZE : public Feature2D +{ +public: + enum + { + DIFF_PM_G1 = 0, + DIFF_PM_G2 = 1, + DIFF_WEICKERT = 2, + DIFF_CHARBONNIER = 3 + }; + + /** @brief The KAZE constructor + + @param extended Set to enable extraction of extended (128-byte) descriptor. + @param upright Set to enable use of upright descriptors (non rotation-invariant). + @param threshold Detector response threshold to accept point + @param nOctaves Maximum octave evolution of the image + @param nOctaveLayers Default number of sublevels per scale level + @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or + DIFF_CHARBONNIER + */ + CV_WRAP static Ptr create(bool extended=false, bool upright=false, + float threshold = 0.001f, + int nOctaves = 4, int nOctaveLayers = 4, + int diffusivity = KAZE::DIFF_PM_G2); + + CV_WRAP virtual void setExtended(bool extended) = 0; + CV_WRAP virtual bool getExtended() const = 0; + + CV_WRAP virtual void setUpright(bool upright) = 0; + CV_WRAP virtual bool getUpright() const = 0; + + CV_WRAP virtual void setThreshold(double threshold) = 0; + CV_WRAP virtual double getThreshold() const = 0; + + CV_WRAP virtual void setNOctaves(int octaves) = 0; + CV_WRAP virtual int getNOctaves() const = 0; + + CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; + CV_WRAP virtual int getNOctaveLayers() const = 0; + + CV_WRAP virtual void setDiffusivity(int diff) = 0; + CV_WRAP virtual int getDiffusivity() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +/** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13. + +@details AKAZE descriptors can only be used with KAZE or AKAZE keypoints. This class is thread-safe. + +@note When you need descriptors use Feature2D::detectAndCompute, which +provides better performance. When using Feature2D::detect followed by +Feature2D::compute scale space pyramid is computed twice. + +@note AKAZE implements T-API. When image is passed as UMat some parts of the algorithm +will use OpenCL. + +@note [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear +Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In +British Machine Vision Conference (BMVC), Bristol, UK, September 2013. + +*/ +class CV_EXPORTS_W AKAZE : public Feature2D +{ +public: + // AKAZE descriptor type + enum + { + DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_KAZE = 3, + DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation + DESCRIPTOR_MLDB = 5 + }; + + /** @brief The AKAZE constructor + + @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE, + DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT. + @param descriptor_size Size of the descriptor in bits. 0 -\> Full size + @param descriptor_channels Number of channels in the descriptor (1, 2, 3) + @param threshold Detector response threshold to accept point + @param nOctaves Maximum octave evolution of the image + @param nOctaveLayers Default number of sublevels per scale level + @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or + DIFF_CHARBONNIER + */ + CV_WRAP static Ptr create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB, + int descriptor_size = 0, int descriptor_channels = 3, + float threshold = 0.001f, int nOctaves = 4, + int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2); + + CV_WRAP virtual void setDescriptorType(int dtype) = 0; + CV_WRAP virtual int getDescriptorType() const = 0; + + CV_WRAP virtual void setDescriptorSize(int dsize) = 0; + CV_WRAP virtual int getDescriptorSize() const = 0; + + CV_WRAP virtual void setDescriptorChannels(int dch) = 0; + CV_WRAP virtual int getDescriptorChannels() const = 0; + + CV_WRAP virtual void setThreshold(double threshold) = 0; + CV_WRAP virtual double getThreshold() const = 0; + + CV_WRAP virtual void setNOctaves(int octaves) = 0; + CV_WRAP virtual int getNOctaves() const = 0; + + CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; + CV_WRAP virtual int getNOctaveLayers() const = 0; + + CV_WRAP virtual void setDiffusivity(int diff) = 0; + CV_WRAP virtual int getDiffusivity() const = 0; + CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; +}; + +//! @} features2d_main + +/****************************************************************************************\ +* Distance * +\****************************************************************************************/ + +template +struct CV_EXPORTS Accumulator +{ + typedef T Type; +}; + +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; +template<> struct Accumulator { typedef float Type; }; + +/* + * Squared Euclidean distance functor + */ +template +struct CV_EXPORTS SL2 +{ + enum { normType = NORM_L2SQR }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL2Sqr(a, b, size); + } +}; + +/* + * Euclidean distance functor + */ +template +struct L2 +{ + enum { normType = NORM_L2 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return (ResultType)std::sqrt((double)normL2Sqr(a, b, size)); + } +}; + +/* + * Manhattan distance (city block distance) functor + */ +template +struct L1 +{ + enum { normType = NORM_L1 }; + typedef T ValueType; + typedef typename Accumulator::Type ResultType; + + ResultType operator()( const T* a, const T* b, int size ) const + { + return normL1(a, b, size); + } +}; + +/****************************************************************************************\ +* DescriptorMatcher * +\****************************************************************************************/ + +//! @addtogroup features2d_match +//! @{ + +/** @brief Abstract base class for matching keypoint descriptors. + +It has two groups of match methods: for matching descriptors of an image with another image or with +an image set. + */ +class CV_EXPORTS_W DescriptorMatcher : public Algorithm +{ +public: + enum + { + FLANNBASED = 1, + BRUTEFORCE = 2, + BRUTEFORCE_L1 = 3, + BRUTEFORCE_HAMMING = 4, + BRUTEFORCE_HAMMINGLUT = 5, + BRUTEFORCE_SL2 = 6 + }; + virtual ~DescriptorMatcher(); + + /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor + collection. + + If the collection is not empty, the new descriptors are added to existing train descriptors. + + @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same + train image. + */ + CV_WRAP virtual void add( InputArrayOfArrays descriptors ); + + /** @brief Returns a constant link to the train descriptor collection trainDescCollection . + */ + CV_WRAP const std::vector& getTrainDescriptors() const; + + /** @brief Clears the train descriptor collections. + */ + CV_WRAP virtual void clear() CV_OVERRIDE; + + /** @brief Returns true if there are no train descriptors in the both collections. + */ + CV_WRAP virtual bool empty() const CV_OVERRIDE; + + /** @brief Returns true if the descriptor matcher supports masking permissible matches. + */ + CV_WRAP virtual bool isMaskSupported() const = 0; + + /** @brief Trains a descriptor matcher + + Trains a descriptor matcher (for example, the flann index). In all methods to match, the method + train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher) + have an empty implementation of this method. Other matchers really train their inner structures (for + example, FlannBasedMatcher trains flann::Index ). + */ + CV_WRAP virtual void train(); + + /** @brief Finds the best match for each descriptor from a query set. + + @param queryDescriptors Query set of descriptors. + @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors + collection stored in the class object. + @param matches Matches. If a query descriptor is masked out in mask , no match is added for this + descriptor. So, matches size may be smaller than the query descriptors count. + @param mask Mask specifying permissible matches between an input query and train matrices of + descriptors. + + In the first variant of this method, the train descriptors are passed as an input argument. In the + second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is + used. Optional mask (or masks) can be passed to specify which query and training descriptors can be + matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if + mask.at\(i,j) is non-zero. + */ + CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors, + CV_OUT std::vector& matches, InputArray mask=noArray() ) const; + + /** @brief Finds the k best matches for each descriptor from a query set. + + @param queryDescriptors Query set of descriptors. + @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors + collection stored in the class object. + @param mask Mask specifying permissible matches between an input query and train matrices of + descriptors. + @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. + @param k Count of best matches found per each query descriptor or less if a query descriptor has + less than k possible matches in total. + @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is + false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, + the matches vector does not contain matches for fully masked-out query descriptors. + + These extended variants of DescriptorMatcher::match methods find several best matches for each query + descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match + for the details about query and train descriptors. + */ + CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors, + CV_OUT std::vector >& matches, int k, + InputArray mask=noArray(), bool compactResult=false ) const; + + /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance. + + @param queryDescriptors Query set of descriptors. + @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors + collection stored in the class object. + @param matches Found matches. + @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is + false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, + the matches vector does not contain matches for fully masked-out query descriptors. + @param maxDistance Threshold for the distance between matched descriptors. Distance means here + metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured + in Pixels)! + @param mask Mask specifying permissible matches between an input query and train matrices of + descriptors. + + For each query descriptor, the methods find such training descriptors that the distance between the + query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are + returned in the distance increasing order. + */ + CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, + CV_OUT std::vector >& matches, float maxDistance, + InputArray mask=noArray(), bool compactResult=false ) const; + + /** @overload + @param queryDescriptors Query set of descriptors. + @param matches Matches. If a query descriptor is masked out in mask , no match is added for this + descriptor. So, matches size may be smaller than the query descriptors count. + @param masks Set of masks. Each masks[i] specifies permissible matches between the input query + descriptors and stored train descriptors from the i-th image trainDescCollection[i]. + */ + CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector& matches, + InputArrayOfArrays masks=noArray() ); + /** @overload + @param queryDescriptors Query set of descriptors. + @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. + @param k Count of best matches found per each query descriptor or less if a query descriptor has + less than k possible matches in total. + @param masks Set of masks. Each masks[i] specifies permissible matches between the input query + descriptors and stored train descriptors from the i-th image trainDescCollection[i]. + @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is + false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, + the matches vector does not contain matches for fully masked-out query descriptors. + */ + CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector >& matches, int k, + InputArrayOfArrays masks=noArray(), bool compactResult=false ); + /** @overload + @param queryDescriptors Query set of descriptors. + @param matches Found matches. + @param maxDistance Threshold for the distance between matched descriptors. Distance means here + metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured + in Pixels)! + @param masks Set of masks. Each masks[i] specifies permissible matches between the input query + descriptors and stored train descriptors from the i-th image trainDescCollection[i]. + @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is + false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, + the matches vector does not contain matches for fully masked-out query descriptors. + */ + CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector >& matches, float maxDistance, + InputArrayOfArrays masks=noArray(), bool compactResult=false ); + + + CV_WRAP void write( const String& fileName ) const + { + FileStorage fs(fileName, FileStorage::WRITE); + write(fs); + } + + CV_WRAP void read( const String& fileName ) + { + FileStorage fs(fileName, FileStorage::READ); + read(fs.root()); + } + // Reads matcher object from a file node + // see corresponding cv::Algorithm method + CV_WRAP virtual void read( const FileNode& ) CV_OVERRIDE; + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const CV_OVERRIDE; + + /** @brief Clones the matcher. + + @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, + that is, copies both parameters and train data. If emptyTrainData is true, the method creates an + object copy with the current parameters but with empty train data. + */ + CV_WRAP CV_NODISCARD_STD virtual Ptr clone( bool emptyTrainData=false ) const = 0; + + /** @brief Creates a descriptor matcher of a given type with the default parameters (using default + constructor). + + @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are + supported: + - `BruteForce` (it uses L2 ) + - `BruteForce-L1` + - `BruteForce-Hamming` + - `BruteForce-Hamming(2)` + - `FlannBased` + */ + CV_WRAP static Ptr create( const String& descriptorMatcherType ); + + CV_WRAP static Ptr create( int matcherType ); + + + // see corresponding cv::Algorithm method + CV_WRAP inline void write(const Ptr& fs, const String& name = String()) const { Algorithm::write(fs, name); } + +protected: + /** + * Class to work with descriptors from several images as with one merged matrix. + * It is used e.g. in FlannBasedMatcher. + */ + class CV_EXPORTS DescriptorCollection + { + public: + DescriptorCollection(); + DescriptorCollection( const DescriptorCollection& collection ); + virtual ~DescriptorCollection(); + + // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. + void set( const std::vector& descriptors ); + virtual void clear(); + + const Mat& getDescriptors() const; + const Mat getDescriptor( int imgIdx, int localDescIdx ) const; + const Mat getDescriptor( int globalDescIdx ) const; + void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; + + int size() const; + + protected: + Mat mergedDescriptors; + std::vector startIdxs; + }; + + //! In fact the matching is implemented only by the following two methods. These methods suppose + //! that the class object has been trained already. Public match methods call these methods + //! after calling train(). + virtual void knnMatchImpl( InputArray queryDescriptors, std::vector >& matches, int k, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; + virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector >& matches, float maxDistance, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; + + static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx ); + static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx ); + + CV_NODISCARD_STD static Mat clone_op( Mat m ) { return m.clone(); } + void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const; + + //! Collection of descriptors from train images. + std::vector trainDescCollection; + std::vector utrainDescCollection; +}; + +/** @brief Brute-force descriptor matcher. + +For each descriptor in the first set, this matcher finds the closest descriptor in the second set +by trying each one. This descriptor matcher supports masking permissible matches of descriptor +sets. + */ +class CV_EXPORTS_W BFMatcher : public DescriptorMatcher +{ +public: + /** @brief Brute-force matcher constructor (obsolete). Please use BFMatcher.create() + * + * + */ + CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); + + virtual ~BFMatcher() {} + + virtual bool isMaskSupported() const CV_OVERRIDE { return true; } + + /** @brief Brute-force matcher create method. + @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are + preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and + BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor + description). + @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k + nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with + k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the + matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent + pairs. Such technique usually produces best results with minimal number of outliers when there are + enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. + */ + CV_WRAP static Ptr create( int normType=NORM_L2, bool crossCheck=false ) ; + + CV_NODISCARD_STD virtual Ptr clone( bool emptyTrainData=false ) const CV_OVERRIDE; +protected: + virtual void knnMatchImpl( InputArray queryDescriptors, std::vector >& matches, int k, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; + virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector >& matches, float maxDistance, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; + + int normType; + bool crossCheck; +}; + +#if defined(HAVE_OPENCV_FLANN) || defined(CV_DOXYGEN) + +/** @brief Flann-based descriptor matcher. + +This matcher trains cv::flann::Index on a train descriptor collection and calls its nearest search +methods to find the best matches. So, this matcher may be faster when matching a large train +collection than the brute force matcher. FlannBasedMatcher does not support masking permissible +matches of descriptor sets because flann::Index does not support this. : + */ +class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher +{ +public: + CV_WRAP FlannBasedMatcher( const Ptr& indexParams=makePtr(), + const Ptr& searchParams=makePtr() ); + + virtual void add( InputArrayOfArrays descriptors ) CV_OVERRIDE; + virtual void clear() CV_OVERRIDE; + + // Reads matcher object from a file node + virtual void read( const FileNode& ) CV_OVERRIDE; + // Writes matcher object to a file storage + virtual void write( FileStorage& ) const CV_OVERRIDE; + + virtual void train() CV_OVERRIDE; + virtual bool isMaskSupported() const CV_OVERRIDE; + + CV_WRAP static Ptr create(); + + CV_NODISCARD_STD virtual Ptr clone( bool emptyTrainData=false ) const CV_OVERRIDE; +protected: + static void convertToDMatches( const DescriptorCollection& descriptors, + const Mat& indices, const Mat& distances, + std::vector >& matches ); + + virtual void knnMatchImpl( InputArray queryDescriptors, std::vector >& matches, int k, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; + virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector >& matches, float maxDistance, + InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; + + Ptr indexParams; + Ptr searchParams; + Ptr flannIndex; + + DescriptorCollection mergedDescriptors; + int addedDescCount; +}; + +#endif + +//! @} features2d_match + +/****************************************************************************************\ +* Drawing functions * +\****************************************************************************************/ + +//! @addtogroup features2d_draw +//! @{ + +struct CV_EXPORTS DrawMatchesFlags +{ + enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create), + //!< i.e. existing memory of output image may be reused. + //!< Two source image, matches and single keypoints will be drawn. + //!< For each keypoint only the center point will be drawn (without + //!< the circle around keypoint with keypoint size and orientation). + DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create). + //!< Matches will be drawn on existing content of output image. + NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn. + DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and + //!< orientation will be drawn. + }; +}; + +/** @brief Draws keypoints. + +@param image Source image. +@param keypoints Keypoints from the source image. +@param outImage Output image. Its content depends on the flags value defining what is drawn in the +output image. See possible flags bit values below. +@param color Color of keypoints. +@param flags Flags setting drawing features. Possible flags bit values are defined by +DrawMatchesFlags. See details above in drawMatches . + +@note +For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT, +cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, +cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS + */ +CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector& keypoints, InputOutputArray outImage, + const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); + +/** @brief Draws the found matches of keypoints from two images. + +@param img1 First source image. +@param keypoints1 Keypoints from the first source image. +@param img2 Second source image. +@param keypoints2 Keypoints from the second source image. +@param matches1to2 Matches from the first image to the second one, which means that keypoints1[i] +has a corresponding point in keypoints2[matches[i]] . +@param outImg Output image. Its content depends on the flags value defining what is drawn in the +output image. See possible flags bit values below. +@param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) +, the color is generated randomly. +@param singlePointColor Color of single keypoints (circles), which means that keypoints do not +have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly. +@param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are +drawn. +@param flags Flags setting drawing features. Possible flags bit values are defined by +DrawMatchesFlags. + +This function draws matches of keypoints from two images in the output image. Match is a line +connecting two keypoints (circles). See cv::DrawMatchesFlags. + */ +CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector& keypoints1, + InputArray img2, const std::vector& keypoints2, + const std::vector& matches1to2, InputOutputArray outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const std::vector& matchesMask=std::vector(), int flags=DrawMatchesFlags::DEFAULT ); + +/** @overload */ +CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector& keypoints1, + InputArray img2, const std::vector& keypoints2, + const std::vector& matches1to2, InputOutputArray outImg, + const int matchesThickness, const Scalar& matchColor=Scalar::all(-1), + const Scalar& singlePointColor=Scalar::all(-1), const std::vector& matchesMask=std::vector(), + int flags=DrawMatchesFlags::DEFAULT ); + +CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector& keypoints1, + InputArray img2, const std::vector& keypoints2, + const std::vector >& matches1to2, InputOutputArray outImg, + const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), + const std::vector >& matchesMask=std::vector >(), int flags=DrawMatchesFlags::DEFAULT ); + +//! @} features2d_draw + +/****************************************************************************************\ +* Functions to evaluate the feature detectors and [generic] descriptor extractors * +\****************************************************************************************/ + +CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + std::vector* keypoints1, std::vector* keypoints2, + float& repeatability, int& correspCount, + const Ptr& fdetector=Ptr() ); + +CV_EXPORTS void computeRecallPrecisionCurve( const std::vector >& matches1to2, + const std::vector >& correctMatches1to2Mask, + std::vector& recallPrecisionCurve ); + +CV_EXPORTS float getRecall( const std::vector& recallPrecisionCurve, float l_precision ); +CV_EXPORTS int getNearestPoint( const std::vector& recallPrecisionCurve, float l_precision ); + +/****************************************************************************************\ +* Bag of visual words * +\****************************************************************************************/ + +//! @addtogroup features2d_category +//! @{ + +/** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors. + +For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka, +Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. : + */ +class CV_EXPORTS_W BOWTrainer +{ +public: + BOWTrainer(); + virtual ~BOWTrainer(); + + /** @brief Adds descriptors to a training set. + + @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a + descriptor. + + The training set is clustered using clustermethod to construct the vocabulary. + */ + CV_WRAP void add( const Mat& descriptors ); + + /** @brief Returns a training set of descriptors. + */ + CV_WRAP const std::vector& getDescriptors() const; + + /** @brief Returns the count of all descriptors stored in the training set. + */ + CV_WRAP int descriptorsCount() const; + + CV_WRAP virtual void clear(); + + /** @overload */ + CV_WRAP virtual Mat cluster() const = 0; + + /** @brief Clusters train descriptors. + + @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor. + Descriptors are not added to the inner train descriptor set. + + The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first + variant of the method, train descriptors stored in the object are clustered. In the second variant, + input descriptors are clustered. + */ + CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0; + +protected: + std::vector descriptors; + int size; +}; + +/** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. : + */ +class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer +{ +public: + /** @brief The constructor. + + @see cv::kmeans + */ + CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), + int attempts=3, int flags=KMEANS_PP_CENTERS ); + virtual ~BOWKMeansTrainer(); + + // Returns trained vocabulary (i.e. cluster centers). + CV_WRAP virtual Mat cluster() const CV_OVERRIDE; + CV_WRAP virtual Mat cluster( const Mat& descriptors ) const CV_OVERRIDE; + +protected: + + int clusterCount; + TermCriteria termcrit; + int attempts; + int flags; +}; + +/** @brief Class to compute an image descriptor using the *bag of visual words*. + +Such a computation consists of the following steps: + +1. Compute descriptors for a given image and its keypoints set. +2. Find the nearest visual words from the vocabulary for each keypoint descriptor. +3. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words +encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the +vocabulary in the given image. + */ +class CV_EXPORTS_W BOWImgDescriptorExtractor +{ +public: + /** @brief The constructor. + + @param dextractor Descriptor extractor that is used to compute descriptors for an input image and + its keypoints. + @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary + for each keypoint descriptor of the image. + */ + CV_WRAP BOWImgDescriptorExtractor( const Ptr& dextractor, + const Ptr& dmatcher ); + /** @overload */ + BOWImgDescriptorExtractor( const Ptr& dmatcher ); + virtual ~BOWImgDescriptorExtractor(); + + /** @brief Sets a visual vocabulary. + + @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the + vocabulary is a visual word (cluster center). + */ + CV_WRAP void setVocabulary( const Mat& vocabulary ); + + /** @brief Returns the set vocabulary. + */ + CV_WRAP const Mat& getVocabulary() const; + + /** @brief Computes an image descriptor using the set visual vocabulary. + + @param image Image, for which the descriptor is computed. + @param keypoints Keypoints detected in the input image. + @param imgDescriptor Computed output image descriptor. + @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that + pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) + returned if it is non-zero. + @param descriptors Descriptors of the image keypoints that are returned if they are non-zero. + */ + void compute( InputArray image, std::vector& keypoints, OutputArray imgDescriptor, + std::vector >* pointIdxsOfClusters=0, Mat* descriptors=0 ); + /** @overload + @param keypointDescriptors Computed descriptors to match with vocabulary. + @param imgDescriptor Computed output image descriptor. + @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that + pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) + returned if it is non-zero. + */ + void compute( InputArray keypointDescriptors, OutputArray imgDescriptor, + std::vector >* pointIdxsOfClusters=0 ); + // compute() is not constant because DescriptorMatcher::match is not constant + + CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector& keypoints, CV_OUT Mat& imgDescriptor ) + { compute(image,keypoints,imgDescriptor); } + + /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0. + */ + CV_WRAP int descriptorSize() const; + + /** @brief Returns an image descriptor type. + */ + CV_WRAP int descriptorType() const; + +protected: + Mat vocabulary; + Ptr dextractor; + Ptr dmatcher; +}; + +//! @} features2d_category + +//! @} features2d + +} /* namespace cv */ + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/features2d/features2d.hpp b/third_party/opencv/uos/sw_64/include/opencv2/features2d/features2d.hpp new file mode 100644 index 00000000..e81df0ad --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/features2d/features2d.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/features2d.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/features2d/hal/interface.h b/third_party/opencv/uos/sw_64/include/opencv2/features2d/hal/interface.h new file mode 100644 index 00000000..bc3b0842 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/features2d/hal/interface.h @@ -0,0 +1,33 @@ +#ifndef OPENCV_FEATURE2D_HAL_INTERFACE_H +#define OPENCV_FEATURE2D_HAL_INTERFACE_H + +#include "opencv2/core/cvdef.h" +//! @addtogroup features2d_hal_interface +//! @{ + +//! @name Fast feature detector types +//! @sa cv::FastFeatureDetector +//! @{ +#define CV_HAL_TYPE_5_8 0 +#define CV_HAL_TYPE_7_12 1 +#define CV_HAL_TYPE_9_16 2 +//! @} + +//! @name Key point +//! @sa cv::KeyPoint +//! @{ +struct CV_EXPORTS cvhalKeyPoint +{ + float x; + float y; + float size; + float angle; + float response; + int octave; + int class_id; +}; +//! @} + +//! @} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann.hpp b/third_party/opencv/uos/sw_64/include/opencv2/flann.hpp new file mode 100644 index 00000000..5c52bfee --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann.hpp @@ -0,0 +1,635 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_FLANN_HPP +#define OPENCV_FLANN_HPP + +#include "opencv2/core.hpp" +#include "opencv2/flann/miniflann.hpp" +#include "opencv2/flann/flann_base.hpp" + +/** +@defgroup flann Clustering and Search in Multi-Dimensional Spaces + +This section documents OpenCV's interface to the FLANN library. FLANN (Fast Library for Approximate +Nearest Neighbors) is a library that contains a collection of algorithms optimized for fast nearest +neighbor search in large datasets and for high dimensional features. More information about FLANN +can be found in @cite Muja2009 . +*/ + +namespace cvflann +{ + CV_EXPORTS flann_distance_t flann_distance_type(); + CV_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order); +} + + +namespace cv +{ +namespace flann +{ + + +//! @addtogroup flann +//! @{ + +template struct CvType {}; +template <> struct CvType { static int type() { return CV_8U; } }; +template <> struct CvType { static int type() { return CV_8S; } }; +template <> struct CvType { static int type() { return CV_16U; } }; +template <> struct CvType { static int type() { return CV_16S; } }; +template <> struct CvType { static int type() { return CV_32S; } }; +template <> struct CvType { static int type() { return CV_32F; } }; +template <> struct CvType { static int type() { return CV_64F; } }; + + +// bring the flann parameters into this namespace +using ::cvflann::get_param; +using ::cvflann::print_params; + +// bring the flann distances into this namespace +using ::cvflann::L2_Simple; +using ::cvflann::L2; +using ::cvflann::L1; +using ::cvflann::MinkowskiDistance; +using ::cvflann::MaxDistance; +using ::cvflann::HammingLUT; +using ::cvflann::Hamming; +using ::cvflann::Hamming2; +using ::cvflann::DNAmmingLUT; +using ::cvflann::DNAmming2; +using ::cvflann::HistIntersectionDistance; +using ::cvflann::HellingerDistance; +using ::cvflann::ChiSquareDistance; +using ::cvflann::KL_Divergence; + + +/** @brief The FLANN nearest neighbor index class. This class is templated with the type of elements for which +the index is built. + +`Distance` functor specifies the metric to be used to calculate the distance between two points. +There are several `Distance` functors that are readily available: + +cv::cvflann::L2_Simple - Squared Euclidean distance functor. +This is the simpler, unrolled version. This is preferable for very low dimensionality data (eg 3D points) + +cv::flann::L2 - Squared Euclidean distance functor, optimized version. + +cv::flann::L1 - Manhattan distance functor, optimized version. + +cv::flann::MinkowskiDistance - The Minkowsky distance functor. +This is highly optimised with loop unrolling. +The computation of squared root at the end is omitted for efficiency. + +cv::flann::MaxDistance - The max distance functor. It computes the +maximum distance between two vectors. This distance is not a valid kdtree distance, it's not +dimensionwise additive. + +cv::flann::HammingLUT - %Hamming distance functor. It counts the bit +differences between two strings using a lookup table implementation. + +cv::flann::Hamming - %Hamming distance functor. Population count is +performed using library calls, if available. Lookup table implementation is used as a fallback. + +cv::flann::Hamming2 - %Hamming distance functor. Population count is +implemented in 12 arithmetic operations (one of which is multiplication). + +cv::flann::DNAmmingLUT - %Adaptation of the Hamming distance functor to DNA comparison. +As the four bases A, C, G, T of the DNA (or A, G, C, U for RNA) can be coded on 2 bits, +it counts the bits pairs differences between two sequences using a lookup table implementation. + +cv::flann::DNAmming2 - %Adaptation of the Hamming distance functor to DNA comparison. +Bases differences count are vectorised thanks to arithmetic operations using standard +registers (AVX2 and AVX-512 should come in a near future). + +cv::flann::HistIntersectionDistance - The histogram +intersection distance functor. + +cv::flann::HellingerDistance - The Hellinger distance functor. + +cv::flann::ChiSquareDistance - The chi-square distance functor. + +cv::flann::KL_Divergence - The Kullback-Leibler divergence functor. + +Although the provided implementations cover a vast range of cases, it is also possible to use +a custom implementation. The distance functor is a class whose `operator()` computes the distance +between two features. If the distance is also a kd-tree compatible distance, it should also provide an +`accum_dist()` method that computes the distance between individual feature dimensions. + +In addition to `operator()` and `accum_dist()`, a distance functor should also define the +`ElementType` and the `ResultType` as the types of the elements it operates on and the type of the +result it computes. If a distance functor can be used as a kd-tree distance (meaning that the full +distance between a pair of features can be accumulated from the partial distances between the +individual dimensions) a typedef `is_kdtree_distance` should be present inside the distance functor. +If the distance is not a kd-tree distance, but it's a distance in a vector space (the individual +dimensions of the elements it operates on can be accessed independently) a typedef +`is_vector_space_distance` should be defined inside the functor. If neither typedef is defined, the +distance is assumed to be a metric distance and will only be used with indexes operating on +generic metric distances. + */ +template +class GenericIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** @brief Constructs a nearest neighbor search index for a given dataset. + + @param features Matrix of containing the features(points) to index. The size of the matrix is + num_features x feature_dimensionality and the data type of the elements in the matrix must + coincide with the type of the index. + @param params Structure containing the index parameters. The type of index that will be + constructed depends on the type of this parameter. See the description. + @param distance + + The method constructs a fast search structure from a set of features using the specified algorithm + with specified parameters, as defined by params. params is a reference to one of the following class + IndexParams descendants: + + - **LinearIndexParams** When passing an object of this type, the index will perform a linear, + brute-force search. : + @code + struct LinearIndexParams : public IndexParams + { + }; + @endcode + - **KDTreeIndexParams** When passing an object of this type the index constructed will consist of + a set of randomized kd-trees which will be searched in parallel. : + @code + struct KDTreeIndexParams : public IndexParams + { + KDTreeIndexParams( int trees = 4 ); + }; + @endcode + - **HierarchicalClusteringIndexParams** When passing an object of this type the index constructed + will be a hierarchical tree of clusters, dividing each set of points into n clusters whose centers + are picked among the points without further refinement of their position. + This algorithm fits both floating, integer and binary vectors. : + @code + struct HierarchicalClusteringIndexParams : public IndexParams + { + HierarchicalClusteringIndexParams( + int branching = 32, + flann_centers_init_t centers_init = CENTERS_RANDOM, + int trees = 4, + int leaf_size = 100); + + }; + @endcode + - **KMeansIndexParams** When passing an object of this type the index constructed will be a + hierarchical k-means tree (one tree by default), dividing each set of points into n clusters + whose barycenters are refined iteratively. + Note that this algorithm has been extended to the support of binary vectors as an alternative + to LSH when knn search speed is the criterium. It will also outperform LSH when processing + directly (i.e. without the use of MCA/PCA) datasets whose points share mostly the same values + for most of the dimensions. It is recommended to set more than one tree with binary data. : + @code + struct KMeansIndexParams : public IndexParams + { + KMeansIndexParams( + int branching = 32, + int iterations = 11, + flann_centers_init_t centers_init = CENTERS_RANDOM, + float cb_index = 0.2 ); + + KMeansIndexParams( + int branching, + int iterations, + flann_centers_init_t centers_init, + float cb_index, + int trees ); + }; + @endcode + - **CompositeIndexParams** When using a parameters object of this type the index created + combines the randomized kd-trees and the hierarchical k-means tree. : + @code + struct CompositeIndexParams : public IndexParams + { + CompositeIndexParams( + int trees = 4, + int branching = 32, + int iterations = 11, + flann_centers_init_t centers_init = CENTERS_RANDOM, + float cb_index = 0.2 ); + }; + @endcode + - **LshIndexParams** When using a parameters object of this type the index created uses + multi-probe LSH (by Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search + by Qin Lv, William Josephson, Zhe Wang, Moses Charikar, Kai Li., Proceedings of the 33rd + International Conference on Very Large Data Bases (VLDB). Vienna, Austria. September 2007). + This algorithm is designed for binary vectors. : + @code + struct LshIndexParams : public IndexParams + { + LshIndexParams( + unsigned int table_number, + unsigned int key_size, + unsigned int multi_probe_level ); + }; + @endcode + - **AutotunedIndexParams** When passing an object of this type the index created is + automatically tuned to offer the best performance, by choosing the optimal index type + (randomized kd-trees, hierarchical kmeans, linear) and parameters for the dataset provided. : + @code + struct AutotunedIndexParams : public IndexParams + { + AutotunedIndexParams( + float target_precision = 0.9, + float build_weight = 0.01, + float memory_weight = 0, + float sample_fraction = 0.1 ); + }; + @endcode + - **SavedIndexParams** This object type is used for loading a previously saved index from the + disk. : + @code + struct SavedIndexParams : public IndexParams + { + SavedIndexParams( String filename ); + }; + @endcode + */ + GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance()); + + ~GenericIndex(); + + /** @brief Performs a K-nearest neighbor search for a given query point using the index. + + @param query The query point + @param indices Vector that will contain the indices of the K-nearest neighbors found. It must have + at least knn size. + @param dists Vector that will contain the distances to the K-nearest neighbors found. It must have + at least knn size. + @param knn Number of nearest neighbors to search for. + @param params SearchParams + */ + void knnSearch(const std::vector& query, std::vector& indices, + std::vector& dists, int knn, const ::cvflann::SearchParams& params); + void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params); + + /** @brief Performs a radius nearest neighbor search for a given query point using the index. + + @param query The query point. + @param indices Vector that will contain the indices of the nearest neighbors found. + @param dists Vector that will contain the distances to the nearest neighbors found. It has the same + number of elements as indices. + @param radius The search radius. + @param params SearchParams + + This function returns the number of nearest neighbors found. + */ + int radiusSearch(const std::vector& query, std::vector& indices, + std::vector& dists, DistanceType radius, const ::cvflann::SearchParams& params); + int radiusSearch(const Mat& query, Mat& indices, Mat& dists, + DistanceType radius, const ::cvflann::SearchParams& params); + + void save(String filename) { nnIndex->save(filename); } + + int veclen() const { return nnIndex->veclen(); } + + int size() const { return (int)nnIndex->size(); } + + ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); } + + CV_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); } + +private: + ::cvflann::Index* nnIndex; + Mat _dataset; +}; + +//! @cond IGNORED + +#define FLANN_DISTANCE_CHECK \ + if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \ + printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\ + "the distance using cvflann::set_distance_type. This is no longer working as expected "\ + "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\ + "for example for L1 distance use: GenericIndex< L1 > \n"); \ + } + + +template +GenericIndex::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance) +: _dataset(dataset) +{ + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)_dataset.ptr(0), _dataset.rows, _dataset.cols); + + nnIndex = new ::cvflann::Index(m_dataset, params, distance); + + FLANN_DISTANCE_CHECK + + nnIndex->buildIndex(); +} + +template +GenericIndex::~GenericIndex() +{ + delete nnIndex; +} + +template +void GenericIndex::knnSearch(const std::vector& query, std::vector& indices, std::vector& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams); +} + + +template +void GenericIndex::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); +} + +template +int GenericIndex::radiusSearch(const std::vector& query, std::vector& indices, std::vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +template +int GenericIndex::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) +{ + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + FLANN_DISTANCE_CHECK + + return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); +} + +/** + * @deprecated Use GenericIndex class instead + */ +template +class Index_ +{ +public: + typedef typename L2::ElementType ElementType; + typedef typename L2::ResultType DistanceType; + + CV_DEPRECATED Index_(const Mat& dataset, const ::cvflann::IndexParams& params) + { + printf("[WARNING] The cv::flann::Index_ class is deperecated, use cv::flann::GenericIndex instead\n"); + + CV_Assert(dataset.type() == CvType::type()); + CV_Assert(dataset.isContinuous()); + ::cvflann::Matrix m_dataset((ElementType*)dataset.ptr(0), dataset.rows, dataset.cols); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + nnIndex_L1 = NULL; + nnIndex_L2 = new ::cvflann::Index< L2 >(m_dataset, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + nnIndex_L1 = new ::cvflann::Index< L1 >(m_dataset, params); + nnIndex_L2 = NULL; + } + else { + printf("[ERROR] cv::flann::Index_ only provides backwards compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::GenericIndex\n"); + CV_Assert(0); + } + if (nnIndex_L1) nnIndex_L1->buildIndex(); + if (nnIndex_L2) nnIndex_L2->buildIndex(); + } + CV_DEPRECATED ~Index_() + { + if (nnIndex_L1) delete nnIndex_L1; + if (nnIndex_L2) delete nnIndex_L2; + } + + CV_DEPRECATED void knnSearch(const std::vector& query, std::vector& indices, std::vector& dists, int knn, const ::cvflann::SearchParams& searchParams) + { + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams); + } + CV_DEPRECATED void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams) + { + CV_Assert(queries.type() == CvType::type()); + CV_Assert(queries.isContinuous()); + ::cvflann::Matrix m_queries((ElementType*)queries.ptr(0), queries.rows, queries.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); + if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams); + } + + CV_DEPRECATED int radiusSearch(const std::vector& query, std::vector& indices, std::vector& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) + { + ::cvflann::Matrix m_query((ElementType*)&query[0], 1, query.size()); + ::cvflann::Matrix m_indices(&indices[0], 1, indices.size()); + ::cvflann::Matrix m_dists(&dists[0], 1, dists.size()); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + } + + CV_DEPRECATED int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams) + { + CV_Assert(query.type() == CvType::type()); + CV_Assert(query.isContinuous()); + ::cvflann::Matrix m_query((ElementType*)query.ptr(0), query.rows, query.cols); + + CV_Assert(indices.type() == CV_32S); + CV_Assert(indices.isContinuous()); + ::cvflann::Matrix m_indices((int*)indices.ptr(0), indices.rows, indices.cols); + + CV_Assert(dists.type() == CvType::type()); + CV_Assert(dists.isContinuous()); + ::cvflann::Matrix m_dists((DistanceType*)dists.ptr(0), dists.rows, dists.cols); + + if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams); + } + + CV_DEPRECATED void save(String filename) + { + if (nnIndex_L1) nnIndex_L1->save(filename); + if (nnIndex_L2) nnIndex_L2->save(filename); + } + + CV_DEPRECATED int veclen() const + { + if (nnIndex_L1) return nnIndex_L1->veclen(); + if (nnIndex_L2) return nnIndex_L2->veclen(); + } + + CV_DEPRECATED int size() const + { + if (nnIndex_L1) return nnIndex_L1->size(); + if (nnIndex_L2) return nnIndex_L2->size(); + } + + CV_DEPRECATED ::cvflann::IndexParams getParameters() + { + if (nnIndex_L1) return nnIndex_L1->getParameters(); + if (nnIndex_L2) return nnIndex_L2->getParameters(); + + } + + CV_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() + { + if (nnIndex_L1) return nnIndex_L1->getIndexParameters(); + if (nnIndex_L2) return nnIndex_L2->getIndexParameters(); + } + +private: + // providing backwards compatibility for L2 and L1 distances (most common) + ::cvflann::Index< L2 >* nnIndex_L2; + ::cvflann::Index< L1 >* nnIndex_L1; +}; + +//! @endcond + +/** @brief Clusters features using hierarchical k-means algorithm. + +@param features The points to be clustered. The matrix must have elements of type +Distance::ElementType. +@param centers The centers of the clusters obtained. The matrix must have type +Distance::CentersType. The number of rows in this matrix represents the number of clusters desired, +however, because of the way the cut in the hierarchical tree is chosen, the number of clusters +computed will be the highest number of the form (branching-1)\*k+1 that's lower than the number of +clusters desired, where branching is the tree's branching factor (see description of the +KMeansIndexParams). +@param params Parameters used in the construction of the hierarchical k-means tree. +@param d Distance to be used for clustering. + +The method clusters the given feature vectors by constructing a hierarchical k-means tree and +choosing a cut in the tree that minimizes the cluster's variance. It returns the number of clusters +found. + */ +template +int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params, + Distance d = Distance()) +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::CentersType CentersType; + + CV_Assert(features.type() == CvType::type()); + CV_Assert(features.isContinuous()); + ::cvflann::Matrix m_features((ElementType*)features.ptr(0), features.rows, features.cols); + + CV_Assert(centers.type() == CvType::type()); + CV_Assert(centers.isContinuous()); + ::cvflann::Matrix m_centers((CentersType*)centers.ptr(0), centers.rows, centers.cols); + + return ::cvflann::hierarchicalClustering(m_features, m_centers, params, d); +} + +//! @cond IGNORED + +template +CV_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params) +{ + printf("[WARNING] cv::flann::hierarchicalClustering is deprecated, use " + "cv::flann::hierarchicalClustering instead\n"); + + if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) { + return hierarchicalClustering< L2 >(features, centers, params); + } + else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) { + return hierarchicalClustering< L1 >(features, centers, params); + } + else { + printf("[ERROR] cv::flann::hierarchicalClustering only provides backwards " + "compatibility for the L1 and L2 distances. " + "For other distance types you must use cv::flann::hierarchicalClustering\n"); + CV_Assert(0); + } +} + +//! @endcond + +//! @} flann + +} } // namespace cv::flann + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/all_indices.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/all_indices.h new file mode 100644 index 00000000..03877ab6 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/all_indices.h @@ -0,0 +1,162 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_ALL_INDICES_H_ +#define OPENCV_FLANN_ALL_INDICES_H_ + +//! @cond IGNORED + +#include "general.h" + +#include "nn_index.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "hierarchical_clustering_index.h" +#include "lsh_index.h" +#include "autotuned_index.h" + + +namespace cvflann +{ + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE_SINGLE: + nnIndex = new KDTreeSingleIndex(dataset, params, distance); + break; + case FLANN_INDEX_KDTREE: + nnIndex = new KDTreeIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_COMPOSITE: + nnIndex = new CompositeIndex(dataset, params, distance); + break; + case FLANN_INDEX_AUTOTUNED: + nnIndex = new AutotunedIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + FLANN_THROW(cv::Error::StsBadArg, "Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + FLANN_THROW(cv::Error::StsBadArg, "Unknown index type"); + } + + return nnIndex; + } +}; + +template +struct index_creator +{ + static NNIndex* create(const Matrix& dataset, const IndexParams& params, const Distance& distance) + { + flann_algorithm_t index_type = get_param(params, "algorithm"); + + NNIndex* nnIndex; + switch (index_type) { + case FLANN_INDEX_LINEAR: + nnIndex = new LinearIndex(dataset, params, distance); + break; + case FLANN_INDEX_KMEANS: + nnIndex = new KMeansIndex(dataset, params, distance); + break; + case FLANN_INDEX_HIERARCHICAL: + nnIndex = new HierarchicalClusteringIndex(dataset, params, distance); + break; + case FLANN_INDEX_LSH: + nnIndex = new LshIndex(dataset, params, distance); + break; + default: + FLANN_THROW(cv::Error::StsBadArg, "Unknown index type"); + } + + return nnIndex; + } +}; + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance) +{ + return index_creator::create(dataset, params,distance); +} + +} + +//! @endcond + +#endif /* OPENCV_FLANN_ALL_INDICES_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/allocator.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/allocator.h new file mode 100644 index 00000000..d5870a01 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/allocator.h @@ -0,0 +1,196 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_ALLOCATOR_H_ +#define OPENCV_FLANN_ALLOCATOR_H_ + +//! @cond IGNORED + +#include +#include + + +namespace cvflann +{ + +/** + * Allocates (using C's malloc) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ +template +T* allocate(size_t count = 1) +{ + T* mem = (T*) ::malloc(sizeof(T)*count); + return mem; +} + + +/** + * Pooled storage allocator + * + * The following routines allow for the efficient allocation of storage in + * small chunks from a specified pool. Rather than allowing each structure + * to be freed individually, an entire pool of storage is freed at once. + * This method has two advantages over just using malloc() and free(). First, + * it is far more efficient for allocating small objects, as there is + * no overhead for remembering all the information needed to free each + * object or consolidating fragmented memory. Second, the decision about + * how long to keep an object is made at the time of allocation, and there + * is no need to track down all the objects to free them. + * + */ + +const size_t WORDSIZE=16; +const size_t BLOCKSIZE=8192; + +class PooledAllocator +{ + /* We maintain memory alignment to word boundaries by requiring that all + allocations be in multiples of the machine wordsize. */ + /* Size of machine word in bytes. Must be power of 2. */ + /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */ + + + int remaining; /* Number of bytes left in current block of storage. */ + void* base; /* Pointer to base of current block of storage. */ + void* loc; /* Current location in block to next allocate memory. */ + int blocksize; + + +public: + int usedMemory; + int wastedMemory; + + /** + Default constructor. Initializes a new pool. + */ + PooledAllocator(int blockSize = BLOCKSIZE) + { + blocksize = blockSize; + remaining = 0; + base = NULL; + loc = NULL; + + usedMemory = 0; + wastedMemory = 0; + } + + /** + * Destructor. Frees all the memory allocated in this pool. + */ + ~PooledAllocator() + { + void* prev; + + while (base != NULL) { + prev = *((void**) base); /* Get pointer to prev block. */ + ::free(base); + base = prev; + } + } + + /** + * Returns a pointer to a piece of new memory of the given size in bytes + * allocated from the pool. + */ + void* allocateMemory(int size) + { + int blockSize; + + /* Round size up to a multiple of wordsize. The following expression + only works for WORDSIZE that is a power of 2, by masking last bits of + incremented size to zero. + */ + size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1); + + /* Check whether a new block must be allocated. Note that the first word + of a block is reserved for a pointer to the previous block. + */ + if (size > remaining) { + + wastedMemory += remaining; + + /* Allocate new storage. */ + blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ? + size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE; + + // use the standard C malloc to allocate memory + void* m = ::malloc(blockSize); + if (!m) { + fprintf(stderr,"Failed to allocate memory.\n"); + return NULL; + } + + /* Fill first word of new block with pointer to previous block. */ + ((void**) m)[0] = base; + base = m; + + int shift = 0; + //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1); + + remaining = blockSize - sizeof(void*) - shift; + loc = ((char*)m + sizeof(void*) + shift); + } + void* rloc = loc; + loc = (char*)loc + size; + remaining -= size; + + usedMemory += size; + + return rloc; + } + + /** + * Allocates (using this pool) a generic type T. + * + * Params: + * count = number of instances to allocate. + * Returns: pointer (of type T*) to memory buffer + */ + template + T* allocate(size_t count = 1) + { + T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count)); + return mem; + } + +private: + PooledAllocator(const PooledAllocator &); // copy disabled + PooledAllocator& operator=(const PooledAllocator &); // assign disabled +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_ALLOCATOR_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/any.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/any.h new file mode 100644 index 00000000..4906fec0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/any.h @@ -0,0 +1,332 @@ +#ifndef OPENCV_FLANN_ANY_H_ +#define OPENCV_FLANN_ANY_H_ +/* + * (C) Copyright Christopher Diggins 2005-2011 + * (C) Copyright Pablo Aguilar 2005 + * (C) Copyright Kevlin Henney 2001 + * + * Distributed under the Boost Software License, Version 1.0. (See + * accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt + * + * Adapted for FLANN by Marius Muja + */ + +//! @cond IGNORED + +#include "defines.h" +#include +#include +#include + +namespace cvflann +{ + +namespace anyimpl +{ + +struct bad_any_cast +{ +}; + +struct empty_any +{ +}; + +inline std::ostream& operator <<(std::ostream& out, const empty_any&) +{ + out << "[empty_any]"; + return out; +} + +struct base_any_policy +{ + virtual void static_delete(void** x) = 0; + virtual void copy_from_value(void const* src, void** dest) = 0; + virtual void clone(void* const* src, void** dest) = 0; + virtual void move(void* const* src, void** dest) = 0; + virtual void* get_value(void** src) = 0; + virtual const void* get_value(void* const * src) = 0; + virtual ::size_t get_size() = 0; + virtual const std::type_info& type() = 0; + virtual void print(std::ostream& out, void* const* src) = 0; + virtual ~base_any_policy() {} +}; + +template +struct typed_base_any_policy : base_any_policy +{ + virtual ::size_t get_size() CV_OVERRIDE { return sizeof(T); } + virtual const std::type_info& type() CV_OVERRIDE { return typeid(T); } + +}; + +template +struct small_any_policy CV_FINAL : typed_base_any_policy +{ + virtual void static_delete(void**) CV_OVERRIDE { } + virtual void copy_from_value(void const* src, void** dest) CV_OVERRIDE + { + new (dest) T(* reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) CV_OVERRIDE { *dest = *src; } + virtual void move(void* const* src, void** dest) CV_OVERRIDE { *dest = *src; } + virtual void* get_value(void** src) CV_OVERRIDE { return reinterpret_cast(src); } + virtual const void* get_value(void* const * src) CV_OVERRIDE { return reinterpret_cast(src); } + virtual void print(std::ostream& out, void* const* src) CV_OVERRIDE { out << *reinterpret_cast(src); } +}; + +template +struct big_any_policy CV_FINAL : typed_base_any_policy +{ + virtual void static_delete(void** x) CV_OVERRIDE + { + if (* x) delete (* reinterpret_cast(x)); + *x = NULL; + } + virtual void copy_from_value(void const* src, void** dest) CV_OVERRIDE + { + *dest = new T(*reinterpret_cast(src)); + } + virtual void clone(void* const* src, void** dest) CV_OVERRIDE + { + *dest = new T(**reinterpret_cast(src)); + } + virtual void move(void* const* src, void** dest) CV_OVERRIDE + { + (*reinterpret_cast(dest))->~T(); + **reinterpret_cast(dest) = **reinterpret_cast(src); + } + virtual void* get_value(void** src) CV_OVERRIDE { return *src; } + virtual const void* get_value(void* const * src) CV_OVERRIDE { return *src; } + virtual void print(std::ostream& out, void* const* src) CV_OVERRIDE { out << *reinterpret_cast(*src); } +}; + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << int(*reinterpret_cast(*src)); +} + +template<> inline void big_any_policy::print(std::ostream& out, void* const* src) +{ + out << (*reinterpret_cast(*src)).c_str(); +} + +template +struct choose_policy +{ + typedef big_any_policy type; +}; + +template +struct choose_policy +{ + typedef small_any_policy type; +}; + +struct any; + +/// Choosing the policy for an any type is illegal, but should never happen. +/// This is designed to throw a compiler error. +template<> +struct choose_policy +{ + typedef void type; +}; + +/// Specializations for small types. +#define SMALL_POLICY(TYPE) \ + template<> \ + struct choose_policy { typedef small_any_policy type; \ + } + +SMALL_POLICY(signed char); +SMALL_POLICY(unsigned char); +SMALL_POLICY(signed short); +SMALL_POLICY(unsigned short); +SMALL_POLICY(signed int); +SMALL_POLICY(unsigned int); +SMALL_POLICY(signed long); +SMALL_POLICY(unsigned long); +SMALL_POLICY(float); +SMALL_POLICY(bool); + +#undef SMALL_POLICY + +template +class SinglePolicy +{ + SinglePolicy(); + SinglePolicy(const SinglePolicy& other); + SinglePolicy& operator=(const SinglePolicy& other); + +public: + static base_any_policy* get_policy(); +}; + +/// This function will return a different policy for each type. +template +inline base_any_policy* SinglePolicy::get_policy() +{ + static typename choose_policy::type policy; + return &policy; +} + +} // namespace anyimpl + +struct any +{ +private: + // fields + anyimpl::base_any_policy* policy; + void* object; + +public: + /// Initializing constructor. + template + any(const T& x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Empty constructor. + any() + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { } + + /// Special initializing constructor for string literals. + any(const char* x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Copy constructor. + any(const any& x) + : policy(anyimpl::SinglePolicy::get_policy()), object(NULL) + { + assign(x); + } + + /// Destructor. + ~any() + { + policy->static_delete(&object); + } + + /// Assignment function from another any. + any& assign(const any& x) + { + reset(); + policy = x.policy; + policy->clone(&x.object, &object); + return *this; + } + + /// Assignment function. + template + any& assign(const T& x) + { + reset(); + policy = anyimpl::SinglePolicy::get_policy(); + policy->copy_from_value(&x, &object); + return *this; + } + + /// Assignment operator. + template + any& operator=(const T& x) + { + return assign(x); + } + + /// Assignment operator. Template-based version above doesn't work as expected. We need regular assignment operator here. + any& operator=(const any& x) + { + return assign(x); + } + + /// Assignment operator, specialed for literal strings. + /// They have types like const char [6] which don't work as expected. + any& operator=(const char* x) + { + return assign(x); + } + + /// Utility functions + any& swap(any& x) + { + std::swap(policy, x.policy); + std::swap(object, x.object); + return *this; + } + + /// Cast operator. You can only cast to the original type. + template + T& cast() + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + T* r = reinterpret_cast(policy->get_value(&object)); + return *r; + } + + /// Cast operator. You can only cast to the original type. + template + const T& cast() const + { + if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast(); + const T* r = reinterpret_cast(policy->get_value(&object)); + return *r; + } + + /// Returns true if the any contains no value. + bool empty() const + { + return policy->type() == typeid(anyimpl::empty_any); + } + + /// Frees any allocated memory, and sets the value to NULL. + void reset() + { + policy->static_delete(&object); + policy = anyimpl::SinglePolicy::get_policy(); + } + + /// Returns true if the two types are the same. + bool compatible(const any& x) const + { + return policy->type() == x.policy->type(); + } + + /// Returns if the type is compatible with the policy + template + bool has_type() + { + return policy->type() == typeid(T); + } + + const std::type_info& type() const + { + return policy->type(); + } + + friend std::ostream& operator <<(std::ostream& out, const any& any_val); +}; + +inline std::ostream& operator <<(std::ostream& out, const any& any_val) +{ + any_val.policy->print(out,&any_val.object); + return out; +} + +} + +//! @endcond + +#endif // OPENCV_FLANN_ANY_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/autotuned_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/autotuned_index.h new file mode 100644 index 00000000..d90f739a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/autotuned_index.h @@ -0,0 +1,594 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ +#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_ +#define OPENCV_FLANN_AUTOTUNED_INDEX_H_ + +//! @cond IGNORED + +#include + +#include "nn_index.h" +#include "ground_truth.h" +#include "index_testing.h" +#include "sampling.h" +#include "kdtree_index.h" +#include "kdtree_single_index.h" +#include "kmeans_index.h" +#include "composite_index.h" +#include "linear_index.h" +#include "logger.h" + +namespace cvflann +{ + +template +NNIndex* create_index_by_type(const Matrix& dataset, const IndexParams& params, const Distance& distance); + + +struct AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1) + { + (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED; + // precision desired (used for autotuning, -1 otherwise) + (*this)["target_precision"] = target_precision; + // build tree time weighting factor + (*this)["build_weight"] = build_weight; + // index memory weighting factor + (*this)["memory_weight"] = memory_weight; + // what fraction of the dataset to use for autotuning + (*this)["sample_fraction"] = sample_fraction; + } +}; + + +template +class AutotunedIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + AutotunedIndex(const Matrix& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) : + dataset_(inputData), distance_(d) + { + target_precision_ = get_param(params, "target_precision",0.8f); + build_weight_ = get_param(params,"build_weight", 0.01f); + memory_weight_ = get_param(params, "memory_weight", 0.0f); + sample_fraction_ = get_param(params,"sample_fraction", 0.1f); + bestIndex_ = NULL; + speedup_ = 0; + } + + AutotunedIndex(const AutotunedIndex&); + AutotunedIndex& operator=(const AutotunedIndex&); + + virtual ~AutotunedIndex() + { + if (bestIndex_ != NULL) { + delete bestIndex_; + bestIndex_ = NULL; + } + } + + /** + * Method responsible with building the index. + */ + virtual void buildIndex() CV_OVERRIDE + { + std::ostringstream stream; + bestParams_ = estimateBuildParams(); + print_params(bestParams_, stream); + Logger::info("----------------------------------------------------\n"); + Logger::info("Autotuned parameters:\n"); + Logger::info("%s", stream.str().c_str()); + Logger::info("----------------------------------------------------\n"); + + bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_); + bestIndex_->buildIndex(); + speedup_ = estimateSearchParams(bestSearchParams_); + stream.str(std::string()); + print_params(bestSearchParams_, stream); + Logger::info("----------------------------------------------------\n"); + Logger::info("Search parameters:\n"); + Logger::info("%s", stream.str().c_str()); + Logger::info("----------------------------------------------------\n"); + } + + /** + * Saves the index to a stream + */ + virtual void saveIndex(FILE* stream) CV_OVERRIDE + { + save_value(stream, (int)bestIndex_->getType()); + bestIndex_->saveIndex(stream); + save_value(stream, get_param(bestSearchParams_, "checks")); + } + + /** + * Loads the index from a stream + */ + virtual void loadIndex(FILE* stream) CV_OVERRIDE + { + int index_type; + + load_value(stream, index_type); + IndexParams params; + params["algorithm"] = (flann_algorithm_t)index_type; + bestIndex_ = create_index_by_type(dataset_, params, distance_); + bestIndex_->loadIndex(stream); + int checks; + load_value(stream, checks); + bestSearchParams_["checks"] = checks; + } + + /** + * Method that searches for nearest-neighbors + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + int checks = get_param(searchParams,"checks",FLANN_CHECKS_AUTOTUNED); + if (checks == FLANN_CHECKS_AUTOTUNED) { + bestIndex_->findNeighbors(result, vec, bestSearchParams_); + } + else { + bestIndex_->findNeighbors(result, vec, searchParams); + } + } + + + IndexParams getParameters() const CV_OVERRIDE + { + return bestIndex_->getParameters(); + } + + SearchParams getSearchParameters() const + { + return bestSearchParams_; + } + + float getSpeedup() const + { + return speedup_; + } + + + /** + * Number of features in this index. + */ + virtual size_t size() const CV_OVERRIDE + { + return bestIndex_->size(); + } + + /** + * The length of each vector in this index. + */ + virtual size_t veclen() const CV_OVERRIDE + { + return bestIndex_->veclen(); + } + + /** + * The amount of memory (in bytes) this index uses. + */ + virtual int usedMemory() const CV_OVERRIDE + { + return bestIndex_->usedMemory(); + } + + /** + * Algorithm name + */ + virtual flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_AUTOTUNED; + } + +private: + + struct CostData + { + float searchTimeCost; + float buildTimeCost; + float memoryCost; + float totalCost; + IndexParams params; + }; + + void evaluate_kmeans(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n", + get_param(cost.params,"iterations"), + get_param(cost.params,"branching")); + KMeansIndex kmeans(sampledDataset_, cost.params, distance_); + // measure index build time + t.start(); + kmeans.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + // measure search time + float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_); + } + + + void evaluate_kdtree(CostData& cost) + { + StartStopTimer t; + int checks; + const int nn = 1; + + Logger::info("KDTree using params: trees=%d\n", get_param(cost.params,"trees")); + KDTreeIndex kdtree(sampledDataset_, cost.params, distance_); + + t.start(); + kdtree.buildIndex(); + t.stop(); + float buildTime = (float)t.value; + + //measure search time + float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn); + + float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float)); + cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory; + cost.searchTimeCost = searchTime; + cost.buildTimeCost = buildTime; + Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime); + } + + + // struct KMeansSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {} + // + // float operator()(int* params) { + // + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<2) return maxFloat; + // if (params[1]<0) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KMEANS; + // c.params["centers-init"] = CENTERS_RANDOM; + // c.params["branching"] = params[0]; + // c.params["max-iterations"] = params[1]; + // + // autotuner.evaluate_kmeans(c); + // + // return c.timeCost; + // + // } + // }; + // + // struct KDTreeSimpleDownhillFunctor { + // + // Autotune& autotuner; + // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {} + // + // float operator()(int* params) { + // float maxFloat = numeric_limits::max(); + // + // if (params[0]<1) return maxFloat; + // + // CostData c; + // c.params["algorithm"] = KDTREE; + // c.params["trees"] = params[0]; + // + // autotuner.evaluate_kdtree(c); + // + // return c.timeCost; + // + // } + // }; + + + + void optimizeKMeans(std::vector& costs) + { + Logger::info("KMEANS, Step 1: Exploring parameter space\n"); + + // explore kmeans parameters space using combinations of the parameters below + int maxIterations[] = { 1, 5, 10, 15 }; + int branchingFactors[] = { 16, 32, 64, 128, 256 }; + + int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors); + costs.reserve(costs.size() + kmeansParamSpaceSize); + + // evaluate kmeans for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) { + for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KMEANS; + cost.params["centers_init"] = FLANN_CENTERS_RANDOM; + cost.params["iterations"] = maxIterations[i]; + cost.params["branching"] = branchingFactors[j]; + + evaluate_kmeans(cost); + costs.push_back(cost); + } + } + + // Logger::info("KMEANS, Step 2: simplex-downhill optimization\n"); + // + // const int n = 2; + // // choose initial simplex points as the best parameters so far + // int kmeansNMPoints[n*(n+1)]; + // float kmeansVals[n+1]; + // for (int i=0;i& costs) + { + Logger::info("KD-TREE, Step 1: Exploring parameter space\n"); + + // explore kd-tree parameters space using the parameters below + int testTrees[] = { 1, 4, 8, 16, 32 }; + + // evaluate kdtree for all parameter combinations + for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) { + CostData cost; + cost.params["algorithm"] = FLANN_INDEX_KDTREE; + cost.params["trees"] = testTrees[i]; + + evaluate_kdtree(cost); + costs.push_back(cost); + } + + // Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n"); + // + // const int n = 1; + // // choose initial simplex points as the best parameters so far + // int kdtreeNMPoints[n*(n+1)]; + // float kdtreeVals[n+1]; + // for (int i=0;i costs; + + int sampleSize = int(sample_fraction_ * dataset_.rows); + int testSampleSize = std::min(sampleSize / 10, 1000); + + Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_); + + // For a very small dataset, it makes no sense to build any fancy index, just + // use linear search + if (testSampleSize < 10) { + Logger::info("Choosing linear, dataset too small\n"); + return LinearIndexParams(); + } + + // We use a fraction of the original dataset to speedup the autotune algorithm + sampledDataset_ = random_sample(dataset_, sampleSize); + // We use a cross-validation approach, first we sample a testset from the dataset + testDataset_ = random_sample(sampledDataset_, testSampleSize, true); + + // We compute the ground truth using linear search + Logger::info("Computing ground truth... \n"); + gt_matches_ = Matrix(new int[testDataset_.rows], testDataset_.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(sampledDataset_, testDataset_, gt_matches_, 0, distance_); + t.stop(); + + CostData linear_cost; + linear_cost.searchTimeCost = (float)t.value; + linear_cost.buildTimeCost = 0; + linear_cost.memoryCost = 0; + linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR; + + costs.push_back(linear_cost); + + // Start parameter autotune process + Logger::info("Autotuning parameters...\n"); + + optimizeKMeans(costs); + optimizeKDTree(costs); + + float bestTimeCost = costs[0].searchTimeCost; + for (size_t i = 0; i < costs.size(); ++i) { + float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost; + if (timeCost < bestTimeCost) { + bestTimeCost = timeCost; + } + } + + float bestCost = costs[0].searchTimeCost / bestTimeCost; + IndexParams bestParams = costs[0].params; + if (bestTimeCost > 0) { + for (size_t i = 0; i < costs.size(); ++i) { + float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost + + memory_weight_ * costs[i].memoryCost; + if (crtCost < bestCost) { + bestCost = crtCost; + bestParams = costs[i].params; + } + } + } + + delete[] gt_matches_.data; + delete[] testDataset_.data; + delete[] sampledDataset_.data; + + return bestParams; + } + + + + /** + * Estimates the search time parameters needed to get the desired precision. + * Precondition: the index is built + * Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search. + */ + float estimateSearchParams(SearchParams& searchParams) + { + const int nn = 1; + const size_t SAMPLE_COUNT = 1000; + + CV_Assert(bestIndex_ != NULL && "Requires a valid index"); // must have a valid index + + float speedup = 0; + + int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT); + if (samples > 0) { + Matrix testDataset = random_sample(dataset_, samples); + + Logger::info("Computing ground truth\n"); + + // we need to compute the ground truth first + Matrix gt_matches(new int[testDataset.rows], testDataset.rows, 1); + StartStopTimer t; + t.start(); + compute_ground_truth(dataset_, testDataset, gt_matches, 1, distance_); + t.stop(); + float linear = (float)t.value; + + int checks; + Logger::info("Estimating number of checks\n"); + + float searchTime; + float cb_index; + if (bestIndex_->getType() == FLANN_INDEX_KMEANS) { + Logger::info("KMeans algorithm, estimating cluster border factor\n"); + KMeansIndex* kmeans = (KMeansIndex*)bestIndex_; + float bestSearchTime = -1; + float best_cb_index = -1; + int best_checks = -1; + for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) { + kmeans->set_cb_index(cb_index); + searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) { + bestSearchTime = searchTime; + best_cb_index = cb_index; + best_checks = checks; + } + } + searchTime = bestSearchTime; + cb_index = best_cb_index; + checks = best_checks; + + kmeans->set_cb_index(best_cb_index); + Logger::info("Optimum cb_index: %g\n", cb_index); + bestParams_["cb_index"] = cb_index; + } + else { + searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1); + } + + Logger::info("Required number of checks: %d \n", checks); + searchParams["checks"] = checks; + + speedup = linear / searchTime; + + delete[] gt_matches.data; + delete[] testDataset.data; + } + + return speedup; + } + +private: + NNIndex* bestIndex_; + + IndexParams bestParams_; + SearchParams bestSearchParams_; + + Matrix sampledDataset_; + Matrix testDataset_; + Matrix gt_matches_; + + float speedup_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** + * Index parameters + */ + float target_precision_; + float build_weight_; + float memory_weight_; + float sample_fraction_; + + Distance distance_; + + +}; +} + +//! @endcond + +#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/composite_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/composite_index.h new file mode 100644 index 00000000..f1af41ac --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/composite_index.h @@ -0,0 +1,197 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_ +#define OPENCV_FLANN_COMPOSITE_INDEX_H_ + +//! @cond IGNORED + +#include "nn_index.h" +#include "kdtree_index.h" +#include "kmeans_index.h" + +namespace cvflann +{ + +/** + * Index parameters for the CompositeIndex. + */ +struct CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 ) + { + (*this)["algorithm"] = FLANN_INDEX_KMEANS; + // number of randomized trees to use (for kdtree) + (*this)["trees"] = trees; + // branching factor + (*this)["branching"] = branching; + // max iterations to perform in one kmeans clustering (kmeans tree) + (*this)["iterations"] = iterations; + // algorithm used for picking the initial cluster centers for kmeans tree + (*this)["centers_init"] = centers_init; + // cluster boundary index. Used when searching the kmeans tree + (*this)["cb_index"] = cb_index; + } +}; + + +/** + * This index builds a kd-tree index and a k-means index and performs nearest + * neighbour search both indexes. This gives a slight boost in search performance + * as some of the neighbours that are missed by one index are found by the other. + */ +template +class CompositeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** + * Index constructor + * @param inputData dataset containing the points to index + * @param params Index parameters + * @param d Distance functor + * @return + */ + CompositeIndex(const Matrix& inputData, const IndexParams& params = CompositeIndexParams(), + Distance d = Distance()) : index_params_(params) + { + kdtree_index_ = new KDTreeIndex(inputData, params, d); + kmeans_index_ = new KMeansIndex(inputData, params, d); + + } + + CompositeIndex(const CompositeIndex&); + CompositeIndex& operator=(const CompositeIndex&); + + virtual ~CompositeIndex() + { + delete kdtree_index_; + delete kmeans_index_; + } + + /** + * @return The index type + */ + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_COMPOSITE; + } + + /** + * @return Size of the index + */ + size_t size() const CV_OVERRIDE + { + return kdtree_index_->size(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t veclen() const CV_OVERRIDE + { + return kdtree_index_->veclen(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + int usedMemory() const CV_OVERRIDE + { + return kmeans_index_->usedMemory() + kdtree_index_->usedMemory(); + } + + /** + * \brief Builds the index + */ + void buildIndex() CV_OVERRIDE + { + Logger::info("Building kmeans tree...\n"); + kmeans_index_->buildIndex(); + Logger::info("Building kdtree tree...\n"); + kdtree_index_->buildIndex(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + void saveIndex(FILE* stream) CV_OVERRIDE + { + kmeans_index_->saveIndex(stream); + kdtree_index_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + void loadIndex(FILE* stream) CV_OVERRIDE + { + kmeans_index_->loadIndex(stream); + kdtree_index_->loadIndex(stream); + } + + /** + * \returns The index parameters + */ + IndexParams getParameters() const CV_OVERRIDE + { + return index_params_; + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + kmeans_index_->findNeighbors(result, vec, searchParams); + kdtree_index_->findNeighbors(result, vec, searchParams); + } + +private: + /** The k-means index */ + KMeansIndex* kmeans_index_; + + /** The kd-tree index */ + KDTreeIndex* kdtree_index_; + + /** The index parameters */ + const IndexParams index_params_; +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/config.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/config.h new file mode 100644 index 00000000..c9342c00 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/config.h @@ -0,0 +1,42 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_CONFIG_H_ +#define OPENCV_FLANN_CONFIG_H_ + +//! @cond IGNORED + +#ifdef FLANN_VERSION_ +#undef FLANN_VERSION_ +#endif +#define FLANN_VERSION_ "1.6.10" + +//! @endcond + +#endif /* OPENCV_FLANN_CONFIG_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/defines.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/defines.h new file mode 100644 index 00000000..8ab83293 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/defines.h @@ -0,0 +1,169 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_DEFINES_H_ +#define OPENCV_FLANN_DEFINES_H_ + +//! @cond IGNORED + +#include "config.h" + +#ifdef FLANN_EXPORT +#undef FLANN_EXPORT +#endif +#ifdef _WIN32 +/* win32 dll export/import directives */ + #ifdef FLANN_EXPORTS + #define FLANN_EXPORT __declspec(dllexport) + #elif defined(FLANN_STATIC) + #define FLANN_EXPORT + #else + #define FLANN_EXPORT __declspec(dllimport) + #endif +#else +/* unix needs nothing */ + #define FLANN_EXPORT +#endif + + +#undef FLANN_PLATFORM_32_BIT +#undef FLANN_PLATFORM_64_BIT +#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 +#define FLANN_PLATFORM_64_BIT +#else +#define FLANN_PLATFORM_32_BIT +#endif + + +#undef FLANN_ARRAY_LEN +#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0])) + +namespace cvflann { + +/* Nearest neighbour index algorithms */ +enum flann_algorithm_t +{ + FLANN_INDEX_LINEAR = 0, + FLANN_INDEX_KDTREE = 1, + FLANN_INDEX_KMEANS = 2, + FLANN_INDEX_COMPOSITE = 3, + FLANN_INDEX_KDTREE_SINGLE = 4, + FLANN_INDEX_HIERARCHICAL = 5, + FLANN_INDEX_LSH = 6, + FLANN_INDEX_SAVED = 254, + FLANN_INDEX_AUTOTUNED = 255, + + // deprecated constants, should use the FLANN_INDEX_* ones instead + LINEAR = 0, + KDTREE = 1, + KMEANS = 2, + COMPOSITE = 3, + KDTREE_SINGLE = 4, + SAVED = 254, + AUTOTUNED = 255 +}; + + + +enum flann_centers_init_t +{ + FLANN_CENTERS_RANDOM = 0, + FLANN_CENTERS_GONZALES = 1, + FLANN_CENTERS_KMEANSPP = 2, + FLANN_CENTERS_GROUPWISE = 3, + + // deprecated constants, should use the FLANN_CENTERS_* ones instead + CENTERS_RANDOM = 0, + CENTERS_GONZALES = 1, + CENTERS_KMEANSPP = 2 +}; + +enum flann_log_level_t +{ + FLANN_LOG_NONE = 0, + FLANN_LOG_FATAL = 1, + FLANN_LOG_ERROR = 2, + FLANN_LOG_WARN = 3, + FLANN_LOG_INFO = 4 +}; + +enum flann_distance_t +{ + FLANN_DIST_EUCLIDEAN = 1, + FLANN_DIST_L2 = 1, + FLANN_DIST_MANHATTAN = 2, + FLANN_DIST_L1 = 2, + FLANN_DIST_MINKOWSKI = 3, + FLANN_DIST_MAX = 4, + FLANN_DIST_HIST_INTERSECT = 5, + FLANN_DIST_HELLINGER = 6, + FLANN_DIST_CHI_SQUARE = 7, + FLANN_DIST_CS = 7, + FLANN_DIST_KULLBACK_LEIBLER = 8, + FLANN_DIST_KL = 8, + FLANN_DIST_HAMMING = 9, + FLANN_DIST_DNAMMING = 10, + + // deprecated constants, should use the FLANN_DIST_* ones instead + EUCLIDEAN = 1, + MANHATTAN = 2, + MINKOWSKI = 3, + MAX_DIST = 4, + HIST_INTERSECT = 5, + HELLINGER = 6, + CS = 7, + KL = 8, + KULLBACK_LEIBLER = 8 +}; + +enum flann_datatype_t +{ + FLANN_INT8 = 0, + FLANN_INT16 = 1, + FLANN_INT32 = 2, + FLANN_INT64 = 3, + FLANN_UINT8 = 4, + FLANN_UINT16 = 5, + FLANN_UINT32 = 6, + FLANN_UINT64 = 7, + FLANN_FLOAT32 = 8, + FLANN_FLOAT64 = 9 +}; + +enum +{ + FLANN_CHECKS_UNLIMITED = -1, + FLANN_CHECKS_AUTOTUNED = -2 +}; + +} + +//! @endcond + +#endif /* OPENCV_FLANN_DEFINES_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/dist.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/dist.h new file mode 100644 index 00000000..608f8a50 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/dist.h @@ -0,0 +1,1292 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DIST_H_ +#define OPENCV_FLANN_DIST_H_ + +//! @cond IGNORED + +#include +#include +#include +#ifdef _MSC_VER +typedef unsigned __int32 uint32_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif + +#include "defines.h" + +#if defined _WIN32 && (defined(_M_ARM) || defined(_M_ARM64)) +# include +#endif + +#if defined(__ARM_NEON__) && !defined(__CUDACC__) +# include "arm_neon.h" +#endif + +namespace cvflann +{ + +template +inline T abs(T x) { return (x<0) ? -x : x; } + +template<> +inline int abs(int x) { return ::abs(x); } + +template<> +inline float abs(float x) { return fabsf(x); } + +template<> +inline double abs(double x) { return fabs(x); } + + +template +inline TargetType round(float x) { return static_cast(x); } + +template<> +inline unsigned int round(float x) { return static_cast(x + 0.5f); } + +template<> +inline unsigned short round(float x) { return static_cast(x + 0.5f); } + +template<> +inline unsigned char round(float x) { return static_cast(x + 0.5f); } + +template<> +inline long long round(float x) { return static_cast(x + 0.5f); } + +template<> +inline long round(float x) { return static_cast(x + 0.5f); } + +template<> +inline int round(float x) { return static_cast(x + 0.5f) - (x<0); } + +template<> +inline short round(float x) { return static_cast(x + 0.5f) - (x<0); } + +template<> +inline char round(float x) { return static_cast(x + 0.5f) - (x<0); } + + +template +inline TargetType round(double x) { return static_cast(x); } + +template<> +inline unsigned int round(double x) { return static_cast(x + 0.5); } + +template<> +inline unsigned short round(double x) { return static_cast(x + 0.5); } + +template<> +inline unsigned char round(double x) { return static_cast(x + 0.5); } + +template<> +inline long long round(double x) { return static_cast(x + 0.5); } + +template<> +inline long round(double x) { return static_cast(x + 0.5); } + +template<> +inline int round(double x) { return static_cast(x + 0.5) - (x<0); } + +template<> +inline short round(double x) { return static_cast(x + 0.5) - (x<0); } + +template<> +inline char round(double x) { return static_cast(x + 0.5) - (x<0); } + + +template +struct Accumulator { typedef T Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; +template<> +struct Accumulator { typedef float Type; }; + +#undef True +#undef False + +class True +{ +public: + static const bool val = true; +}; + +class False +{ +public: + static const bool val = false; +}; + + +/* + * This is a "zero iterator". It basically behaves like a zero filled + * array to all algorithms that use arrays as iterators (STL style). + * It's useful when there's a need to compute the distance between feature + * and origin it and allows for better compiler optimisation than using a + * zero-filled array. + */ +template +struct ZeroIterator +{ + + T operator*() + { + return 0; + } + + T operator[](int) + { + return 0; + } + + const ZeroIterator& operator ++() + { + return *this; + } + + ZeroIterator operator ++(int) + { + return *this; + } + + ZeroIterator& operator+=(int) + { + return *this; + } + +}; + + + +/** + * Squared Euclidean distance functor. + * + * This is the simpler, unrolled version. This is preferable for + * very low dimensionality data (eg 3D points) + */ +template +struct L2_Simple +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff; + for(size_t i = 0; i < size; ++i ) { + diff = (ResultType)(*a++ - *b++); + result += diff*diff; + } + return result; + } + + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + + +/** + * Squared Euclidean distance functor, optimized version + */ +template +struct L2 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the squared Euclidean distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)(a[0] - b[0]); + diff1 = (ResultType)(a[1] - b[1]); + diff2 = (ResultType)(a[2] - b[2]); + diff3 = (ResultType)(a[3] - b[3]); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)(*a++ - *b++); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial euclidean distance, using just one dimension. This is used by the + * kd-tree when computing partial distances while traversing the tree. + * + * Squared root is omitted for efficiency. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return (a-b)*(a-b); + } +}; + + +/* + * Manhattan distance functor, optimized version + */ +template +struct L1 +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the Manhattan (L_1) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += diff0 + diff1 + diff2 + diff3; + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return abs(a-b); + } +}; + + + +template +struct MinkowskiDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + int order; + + MinkowskiDistance(int order_) : order(order_) {} + + /** + * Compute the Minkowsky (L_p) distance between two vectors. + * + * This is highly optimised, with loop unrolling, as it is one + * of the most expensive inner loops. + * + * The computation of squared root at the end is omitted for + * efficiency. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = (ResultType)abs(a[0] - b[0]); + diff1 = (ResultType)abs(a[1] - b[1]); + diff2 = (ResultType)abs(a[2] - b[2]); + diff3 = (ResultType)abs(a[3] - b[3]); + result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order); + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = (ResultType)abs(*a++ - *b++); + result += pow(diff0,order); + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return pow(static_cast(abs(a-b)),order); + } +}; + + + +template +struct MaxDistance +{ + typedef False is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the max distance (L_infinity) between two vectors. + * + * This distance is not a valid kdtree distance, it's not dimensionwise additive. + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = abs(a[0] - b[0]); + diff1 = abs(a[1] - b[1]); + diff2 = abs(a[2] - b[2]); + diff3 = abs(a[3] - b[3]); + if (diff0>result) {result = diff0; } + if (diff1>result) {result = diff1; } + if (diff2>result) {result = diff2; } + if (diff3>result) {result = diff3; } + a += 4; + b += 4; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + diff0 = abs(*a++ - *b++); + result = (diff0>result) ? diff0 : result; + } + return result; + } + + /* This distance functor is not dimension-wise additive, which + * makes it an invalid kd-tree distance, not implementing the accum_dist method */ + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** + * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor + * bit count of A exclusive XOR'ed with B + */ +struct HammingLUT +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + typedef ElementType CentersType; + + /** this will count the bits in a ^ b + */ + template + ResultType operator()(const unsigned char* a, const Iterator2 b, size_t size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + const unsigned char* b2 = reinterpret_cast (b); + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b2[i]]; + } + return result; + } + + + ResultType operator()(const unsigned char* a, const ZeroIterator b, size_t size) const + { + (void)b; + static const uchar popCountTable[] = + { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + ResultType result = 0; + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i]]; + } + return result; + } +}; + +/** + * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set) + * That code was taken from brief.cpp in OpenCV + */ +template +struct Hamming +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + + typedef T ElementType; + typedef int ResultType; + typedef ElementType CentersType; + + template + ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = 0; +#if defined(__ARM_NEON__) && !defined(__CUDACC__) + { + const unsigned char* a2 = reinterpret_cast (a); + const unsigned char* b2 = reinterpret_cast (b); + uint32x4_t bits = vmovq_n_u32(0); + for (size_t i = 0; i < size; i += 16) { + uint8x16_t A_vec = vld1q_u8 (a2 + i); + uint8x16_t B_vec = vld1q_u8 (b2 + i); + uint8x16_t AxorB = veorq_u8 (A_vec, B_vec); + uint8x16_t bitsSet = vcntq_u8 (AxorB); + uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet); + uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8); + bits = vaddq_u32(bits, bitSet4); + } + uint64x2_t bitSet2 = vpaddlq_u32 (bits); + result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0); + result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2); + } +#elif defined(__GNUC__) + { + //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll) + typedef unsigned long long pop_t; + const size_t modulo = size % sizeof(pop_t); + const pop_t* a2 = reinterpret_cast (a); + const pop_t* b2 = reinterpret_cast (b); + const pop_t* a2_end = a2 + (size / sizeof(pop_t)); + + for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2)); + + if (modulo) { + //in the case where size is not dividable by sizeof(size_t) + //need to mask off the bits at the end + pop_t a_final = 0, b_final = 0; + memcpy(&a_final, a2, modulo); + memcpy(&b_final, b2, modulo); + result += __builtin_popcountll(a_final ^ b_final); + } + } +#else // NO NEON and NOT GNUC + HammingLUT lut; + result = lut(reinterpret_cast (a), + reinterpret_cast (b), size); +#endif + return result; + } + + + template + ResultType operator()(const Iterator1 a, ZeroIterator b, size_t size, ResultType /*worst_dist*/ = -1) const + { + (void)b; + ResultType result = 0; +#if defined(__ARM_NEON__) && !defined(__CUDACC__) + { + const unsigned char* a2 = reinterpret_cast (a); + uint32x4_t bits = vmovq_n_u32(0); + for (size_t i = 0; i < size; i += 16) { + uint8x16_t A_vec = vld1q_u8 (a2 + i); + uint8x16_t bitsSet = vcntq_u8 (A_vec); + uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet); + uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8); + bits = vaddq_u32(bits, bitSet4); + } + uint64x2_t bitSet2 = vpaddlq_u32 (bits); + result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0); + result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2); + } +#elif defined(__GNUC__) + { + //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll) + typedef unsigned long long pop_t; + const size_t modulo = size % sizeof(pop_t); + const pop_t* a2 = reinterpret_cast (a); + const pop_t* a2_end = a2 + (size / sizeof(pop_t)); + + for (; a2 != a2_end; ++a2) result += __builtin_popcountll(*a2); + + if (modulo) { + //in the case where size is not dividable by sizeof(size_t) + //need to mask off the bits at the end + pop_t a_final = 0; + memcpy(&a_final, a2, modulo); + result += __builtin_popcountll(a_final); + } + } +#else // NO NEON and NOT GNUC + HammingLUT lut; + result = lut(reinterpret_cast (a), b, size); +#endif + return result; + } +}; + +template +struct Hamming2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef T ElementType; + typedef int ResultType; + typedef ElementType CentersType; + + /** This is popcount_3() from: + * http://en.wikipedia.org/wiki/Hamming_weight */ + unsigned int popcnt32(uint32_t n) const + { + n -= ((n >> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24; + } + +#ifdef FLANN_PLATFORM_64_BIT + unsigned int popcnt64(uint64_t n) const + { + n -= ((n >> 1) & 0x5555555555555555); + n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333); + return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56; + } +#endif + + template + ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)"); + +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + const uint64_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa ^ *pb); + ++pa; + ++pb; + } +#else + const uint32_t* pa = reinterpret_cast(a); + const uint32_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa ^ *pb); + ++pa; + ++pb; + } +#endif + return result; + } + + + template + ResultType operator()(const Iterator1 a, ZeroIterator b, size_t size, ResultType /*worst_dist*/ = -1) const + { + CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)"); + + (void)b; +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa); + ++pa; + } +#else + const uint32_t* pa = reinterpret_cast(a); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa); + ++pa; + } +#endif + return result; + } + +private: +#ifdef FLANN_PLATFORM_64_BIT + static const size_t long_word_size_ = sizeof(uint64_t)/sizeof(unsigned char); +#else + static const size_t long_word_size_ = sizeof(uint32_t)/sizeof(unsigned char); +#endif +}; + + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +struct DNAmmingLUT +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef unsigned char ElementType; + typedef int ResultType; + typedef ElementType CentersType; + + /** this will count the bits in a ^ b + */ + template + ResultType operator()(const unsigned char* a, const Iterator2 b, size_t size) const + { + static const uchar popCountTable[] = + { + 0, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4 + }; + ResultType result = 0; + const unsigned char* b2 = reinterpret_cast (b); + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i] ^ b2[i]]; + } + return result; + } + + + ResultType operator()(const unsigned char* a, const ZeroIterator b, size_t size) const + { + (void)b; + static const uchar popCountTable[] = + { + 0, 1, 1, 1, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, + 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4, 2, 3, 3, 3, 3, 4, 4, 4, 3, 4, 4, 4, 3, 4, 4, 4 + }; + ResultType result = 0; + for (size_t i = 0; i < size; i++) { + result += popCountTable[a[i]]; + } + return result; + } +}; + + +template +struct DNAmming2 +{ + typedef False is_kdtree_distance; + typedef False is_vector_space_distance; + + typedef T ElementType; + typedef int ResultType; + typedef ElementType CentersType; + + /** This is popcount_3() from: + * http://en.wikipedia.org/wiki/Hamming_weight */ + unsigned int popcnt32(uint32_t n) const + { + n = ((n >> 1) | n) & 0x55555555; + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + return (((n + (n >> 4))& 0x0F0F0F0F)* 0x01010101) >> 24; + } + +#ifdef FLANN_PLATFORM_64_BIT + unsigned int popcnt64(uint64_t n) const + { + n = ((n >> 1) | n) & 0x5555555555555555; + n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333); + return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56; + } +#endif + + template + ResultType operator()(const Iterator1 a, const Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)"); + +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + const uint64_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa ^ *pb); + ++pa; + ++pb; + } +#else + const uint32_t* pa = reinterpret_cast(a); + const uint32_t* pb = reinterpret_cast(b); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa ^ *pb); + ++pa; + ++pb; + } +#endif + return result; + } + + + template + ResultType operator()(const Iterator1 a, ZeroIterator b, size_t size, ResultType /*worst_dist*/ = -1) const + { + CV_DbgAssert(!(size % long_word_size_) && "vectors size must be multiple of long words size (i.e. 8)"); + + (void)b; +#ifdef FLANN_PLATFORM_64_BIT + const uint64_t* pa = reinterpret_cast(a); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt64(*pa); + ++pa; + } +#else + const uint32_t* pa = reinterpret_cast(a); + ResultType result = 0; + size /= long_word_size_; + for(size_t i = 0; i < size; ++i ) { + result += popcnt32(*pa); + ++pa; + } +#endif + return result; + } + +private: +#ifdef FLANN_PLATFORM_64_BIT + static const size_t long_word_size_= sizeof(uint64_t)/sizeof(unsigned char); +#else + static const size_t long_word_size_= sizeof(uint32_t)/sizeof(unsigned char); +#endif +}; + + + +template +struct HistIntersectionDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the histogram intersection distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType min0, min1, min2, min3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]); + min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]); + min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]); + min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]); + result += min0 + min1 + min2 + min3; + a += 4; + b += 4; + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + /* Process last 0-3 pixels. Not needed for standard vector lengths. */ + while (a < last) { + min0 = (ResultType)(*a < *b ? *a : *b); + result += min0; + ++a; + ++b; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + return a +struct HellingerDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the Hellinger distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const + { + ResultType result = ResultType(); + ResultType diff0, diff1, diff2, diff3; + Iterator1 last = a + size; + Iterator1 lastgroup = last - 3; + + /* Process 4 items with each loop for efficiency. */ + while (a < lastgroup) { + diff0 = sqrt(static_cast(a[0])) - sqrt(static_cast(b[0])); + diff1 = sqrt(static_cast(a[1])) - sqrt(static_cast(b[1])); + diff2 = sqrt(static_cast(a[2])) - sqrt(static_cast(b[2])); + diff3 = sqrt(static_cast(a[3])) - sqrt(static_cast(b[3])); + result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; + a += 4; + b += 4; + } + while (a < last) { + diff0 = sqrt(static_cast(*a++)) - sqrt(static_cast(*b++)); + result += diff0 * diff0; + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType diff = sqrt(static_cast(a)) - sqrt(static_cast(b)); + return diff * diff; + } +}; + + +template +struct ChiSquareDistance +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the chi-square distance + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + ResultType sum, diff; + Iterator1 last = a + size; + + while (a < last) { + sum = (ResultType)(*a + *b); + if (sum>0) { + diff = (ResultType)(*a - *b); + result += diff*diff/sum; + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + ResultType sum, diff; + + sum = (ResultType)(a+b); + if (sum>0) { + diff = (ResultType)(a-b); + result = diff*diff/sum; + } + return result; + } +}; + + +template +struct KL_Divergence +{ + typedef True is_kdtree_distance; + typedef True is_vector_space_distance; + + typedef T ElementType; + typedef typename Accumulator::Type ResultType; + typedef ResultType CentersType; + + /** + * Compute the Kullback-Leibler divergence + */ + template + ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const + { + ResultType result = ResultType(); + Iterator1 last = a + size; + + while (a < last) { + if ( *a != 0 && *b != 0 ) { + ResultType ratio = (ResultType)(*a / *b); + if (ratio>0) { + result += *a * log(ratio); + } + } + ++a; + ++b; + + if ((worst_dist>0)&&(result>worst_dist)) { + return result; + } + } + return result; + } + + /** + * Partial distance, used by the kd-tree. + */ + template + inline ResultType accum_dist(const U& a, const V& b, int) const + { + ResultType result = ResultType(); + if( a != 0 && b != 0 ) { + ResultType ratio = (ResultType)(a / b); + if (ratio>0) { + result = a * log(ratio); + } + } + return result; + } +}; + + +/* + * Depending on processed distances, some of them are already squared (e.g. L2) + * and some are not (e.g.Hamming). In KMeans++ for instance we want to be sure + * we are working on ^2 distances, thus following templates to ensure that. + */ +template +struct squareDistance +{ + typedef typename Distance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist*dist; } +}; + + +template +struct squareDistance, ElementType> +{ + typedef typename L2_Simple::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename L2::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +struct squareDistance, ElementType> +{ + typedef typename MinkowskiDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename HellingerDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + +template +struct squareDistance, ElementType> +{ + typedef typename ChiSquareDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +typename Distance::ResultType ensureSquareDistance( typename Distance::ResultType dist ) +{ + typedef typename Distance::ElementType ElementType; + + squareDistance dummy; + return dummy( dist ); +} + + +/* + * ...a template to tell the user if the distance he is working with is actually squared + */ + +template +struct isSquareDist +{ + bool operator()() { return false; } +}; + + +template +struct isSquareDist, ElementType> +{ + bool operator()() { return true; } +}; + +template +struct isSquareDist, ElementType> +{ + bool operator()() { return true; } +}; + + +template +struct isSquareDist, ElementType> +{ + bool operator()() { return true; } +}; + +template +struct isSquareDist, ElementType> +{ + bool operator()() { return true; } +}; + +template +struct isSquareDist, ElementType> +{ + bool operator()() { return true; } +}; + + +template +bool isSquareDistance() +{ + typedef typename Distance::ElementType ElementType; + + isSquareDist dummy; + return dummy(); +} + +/* + * ...and a template to ensure the user that he will process the normal distance, + * and not squared distance, without losing processing time calling sqrt(ensureSquareDistance) + * that will result in doing actually sqrt(dist*dist) for L1 distance for instance. + */ +template +struct simpleDistance +{ + typedef typename Distance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return dist; } +}; + + +template +struct simpleDistance, ElementType> +{ + typedef typename L2_Simple::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename L2::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + + +template +struct simpleDistance, ElementType> +{ + typedef typename MinkowskiDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename HellingerDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + +template +struct simpleDistance, ElementType> +{ + typedef typename ChiSquareDistance::ResultType ResultType; + ResultType operator()( ResultType dist ) { return sqrt(dist); } +}; + + +template +typename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist ) +{ + typedef typename Distance::ElementType ElementType; + + simpleDistance dummy; + return dummy( dist ); +} + +} + +//! @endcond + +#endif //OPENCV_FLANN_DIST_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/dummy.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/dummy.h new file mode 100644 index 00000000..c176f2e4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/dummy.h @@ -0,0 +1,16 @@ + +#ifndef OPENCV_FLANN_DUMMY_H_ +#define OPENCV_FLANN_DUMMY_H_ + +//! @cond IGNORED + +namespace cvflann +{ + +CV_DEPRECATED inline void dummyfunc() {} + +} + +//! @endcond + +#endif /* OPENCV_FLANN_DUMMY_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/dynamic_bitset.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/dynamic_bitset.h new file mode 100644 index 00000000..a00ce1bb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/dynamic_bitset.h @@ -0,0 +1,163 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_ +#define OPENCV_FLANN_DYNAMIC_BITSET_H_ + +//! @cond IGNORED + +#ifndef FLANN_USE_BOOST +# define FLANN_USE_BOOST 0 +#endif +//#define FLANN_USE_BOOST 1 +#if FLANN_USE_BOOST +#include +typedef boost::dynamic_bitset<> DynamicBitset; +#else + +#include + +#include "dist.h" + +namespace cvflann { + +/** Class re-implementing the boost version of it + * This helps not depending on boost, it also does not do the bound checks + * and has a way to reset a block for speed + */ +class DynamicBitset +{ +public: + /** default constructor + */ + DynamicBitset() : size_(0) + { + } + + /** only constructor we use in our code + * @param sz the size of the bitset (in bits) + */ + DynamicBitset(size_t sz) + { + resize(sz); + reset(); + } + + /** Sets all the bits to 0 + */ + void clear() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief checks if the bitset is empty + * @return true if the bitset is empty + */ + bool empty() const + { + return bitset_.empty(); + } + + /** set all the bits to 0 + */ + void reset() + { + std::fill(bitset_.begin(), bitset_.end(), 0); + } + + /** @brief set one bit to 0 + * @param index + */ + void reset(size_t index) + { + bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_)); + } + + /** @brief sets a specific bit to 0, and more bits too + * This function is useful when resetting a given set of bits so that the + * whole bitset ends up being 0: if that's the case, we don't care about setting + * other bits to 0 + * @param index + */ + void reset_block(size_t index) + { + bitset_[index / cell_bit_size_] = 0; + } + + /** resize the bitset so that it contains at least sz bits + * @param sz + */ + void resize(size_t sz) + { + size_ = sz; + bitset_.resize(sz / cell_bit_size_ + 1); + } + + /** set a bit to true + * @param index the index of the bit to set to 1 + */ + void set(size_t index) + { + bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_); + } + + /** gives the number of contained bits + */ + size_t size() const + { + return size_; + } + + /** check if a bit is set + * @param index the index of the bit to check + * @return true if the bit is set + */ + bool test(size_t index) const + { + return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0; + } + +private: + std::vector bitset_; + size_t size_; + static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t); +}; + +} // namespace cvflann + +#endif + +//! @endcond + +#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/flann.hpp b/third_party/opencv/uos/sw_64/include/opencv2/flann/flann.hpp new file mode 100644 index 00000000..227683f9 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/flann.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/flann.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/flann_base.hpp b/third_party/opencv/uos/sw_64/include/opencv2/flann/flann_base.hpp new file mode 100644 index 00000000..258ec38d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/flann_base.hpp @@ -0,0 +1,299 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_BASE_HPP_ +#define OPENCV_FLANN_BASE_HPP_ + +//! @cond IGNORED + +#include +#include + +#include "general.h" +#include "matrix.h" +#include "params.h" +#include "saving.h" + +#include "all_indices.h" + +namespace cvflann +{ + +/** + * Sets the log level used for all flann functions + * @param level Verbosity level + */ +inline void log_verbosity(int level) +{ + if (level >= 0) { + Logger::setLevel(level); + } +} + +/** + * (Deprecated) Index parameters for creating a saved index. + */ +struct SavedIndexParams : public IndexParams +{ + SavedIndexParams(cv::String filename) + { + (* this)["algorithm"] = FLANN_INDEX_SAVED; + (*this)["filename"] = filename; + } +}; + + +template +NNIndex* load_saved_index(const Matrix& dataset, const cv::String& filename, Distance distance) +{ + typedef typename Distance::ElementType ElementType; + + FILE* fin = fopen(filename.c_str(), "rb"); + if (fin == NULL) { + return NULL; + } + IndexHeader header = load_header(fin); + if (header.data_type != Datatype::type()) { + fclose(fin); + FLANN_THROW(cv::Error::StsError, "Datatype of saved index is different than of the one to be created."); + } + if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) { + fclose(fin); + FLANN_THROW(cv::Error::StsError, "The index saved belongs to a different dataset"); + } + + IndexParams params; + params["algorithm"] = header.index_type; + NNIndex* nnIndex = create_index_by_type(dataset, params, distance); + nnIndex->loadIndex(fin); + fclose(fin); + + return nnIndex; +} + + +template +class Index : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + Index(const Matrix& features, const IndexParams& params, Distance distance = Distance() ) + : index_params_(params) + { + flann_algorithm_t index_type = get_param(params,"algorithm"); + loaded_ = false; + + if (index_type == FLANN_INDEX_SAVED) { + nnIndex_ = load_saved_index(features, get_param(params,"filename"), distance); + loaded_ = true; + } + else { + nnIndex_ = create_index_by_type(features, params, distance); + } + } + + ~Index() + { + delete nnIndex_; + } + + /** + * Builds the index. + */ + void buildIndex() CV_OVERRIDE + { + if (!loaded_) { + nnIndex_->buildIndex(); + } + } + + void save(cv::String filename) + { + FILE* fout = fopen(filename.c_str(), "wb"); + if (fout == NULL) { + FLANN_THROW(cv::Error::StsError, "Cannot open file"); + } + save_header(fout, *nnIndex_); + saveIndex(fout); + fclose(fout); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) CV_OVERRIDE + { + nnIndex_->saveIndex(stream); + } + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) CV_OVERRIDE + { + nnIndex_->loadIndex(stream); + } + + /** + * \returns number of features in this index. + */ + size_t veclen() const CV_OVERRIDE + { + return nnIndex_->veclen(); + } + + /** + * \returns The dimensionality of the features in this index. + */ + size_t size() const CV_OVERRIDE + { + return nnIndex_->size(); + } + + /** + * \returns The index type (kdtree, kmeans,...) + */ + flann_algorithm_t getType() const CV_OVERRIDE + { + return nnIndex_->getType(); + } + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const CV_OVERRIDE + { + return nnIndex_->usedMemory(); + } + + + /** + * \returns The index parameters + */ + IndexParams getParameters() const CV_OVERRIDE + { + return nnIndex_->getParameters(); + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) CV_OVERRIDE + { + nnIndex_->knnSearch(queries, indices, dists, knn, params); + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) CV_OVERRIDE + { + return nnIndex_->radiusSearch(query, indices, dists, radius, params); + } + + /** + * \brief Method that searches for nearest-neighbours + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + nnIndex_->findNeighbors(result, vec, searchParams); + } + + /** + * \brief Returns actual index + */ + CV_DEPRECATED NNIndex* getIndex() + { + return nnIndex_; + } + + /** + * \brief Returns index parameters. + * \deprecated use getParameters() instead. + */ + CV_DEPRECATED const IndexParams* getIndexParameters() + { + return &index_params_; + } + +private: + /** Pointer to actual index class */ + NNIndex* nnIndex_; + /** Indices if the index was loaded from a file */ + bool loaded_; + /** Parameters passed to the index */ + IndexParams index_params_; + + Index(const Index &); // copy disabled + Index& operator=(const Index &); // assign disabled +}; + +/** + * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the + * the clustering tree to return a flat clustering. + * @param[in] points Points to be clustered + * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the + * number of clusters requested. + * @param params Clustering parameters (The same as for cvflann::KMeansIndex) + * @param d Distance to be used for clustering (eg: cvflann::L2) + * @return number of clusters computed (can be different than clusters.rows and is the highest number + * of the form (branching-1)*K+1 smaller than clusters.rows). + */ +template +int hierarchicalClustering(const Matrix& points, Matrix& centers, + const KMeansIndexParams& params, Distance d = Distance()) +{ + KMeansIndex kmeans(points, params, d); + kmeans.buildIndex(); + + int clusterNum = kmeans.getClusterCenters(centers); + return clusterNum; +} + +} + +//! @endcond + +#endif /* OPENCV_FLANN_BASE_HPP_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/general.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/general.h new file mode 100644 index 00000000..29fa8be1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/general.h @@ -0,0 +1,63 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GENERAL_H_ +#define OPENCV_FLANN_GENERAL_H_ + +#if CV_VERSION_MAJOR <= 4 + +//! @cond IGNORED + +#include "opencv2/core.hpp" + +namespace cvflann +{ + +class FLANNException : public cv::Exception +{ +public: + FLANNException(const char* message) : cv::Exception(0, message, "", __FILE__, __LINE__) { } + + FLANNException(const cv::String& message) : cv::Exception(0, message, "", __FILE__, __LINE__) { } +}; + +} + +#define FLANN_THROW(TYPE, STR) throw FLANNException(STR) + +#else + +#define FLANN_THROW(TYPE, STR) CV_Error(TYPE, STR) + +#endif + +//! @endcond + +#endif /* OPENCV_FLANN_GENERAL_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/ground_truth.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/ground_truth.h new file mode 100644 index 00000000..17f2a8e8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/ground_truth.h @@ -0,0 +1,98 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_GROUND_TRUTH_H_ +#define OPENCV_FLANN_GROUND_TRUTH_H_ + +//! @cond IGNORED + +#include "dist.h" +#include "matrix.h" + + +namespace cvflann +{ + +template +void find_nearest(const Matrix& dataset, typename Distance::ElementType* query, int* matches, int nn, + int skip = 0, Distance distance = Distance()) +{ + typedef typename Distance::ResultType DistanceType; + int n = nn + skip; + + std::vector match(n); + std::vector dists(n); + + dists[0] = distance(dataset[0], query, dataset.cols); + match[0] = 0; + int dcnt = 1; + + for (size_t i=1; i=1 && dists[j] +void compute_ground_truth(const Matrix& dataset, const Matrix& testset, Matrix& matches, + int skip=0, Distance d = Distance()) +{ + for (size_t i=0; i(dataset, testset[i], matches[i], (int)matches.cols, skip, d); + } +} + + +} + +//! @endcond + +#endif //OPENCV_FLANN_GROUND_TRUTH_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/hdf5.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/hdf5.h new file mode 100644 index 00000000..75543840 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/hdf5.h @@ -0,0 +1,235 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_HDF5_H_ +#define OPENCV_FLANN_HDF5_H_ + +//! @cond IGNORED + +#include + +#include "matrix.h" + + +namespace cvflann +{ + +namespace +{ + +template +hid_t get_hdf5_type() +{ + throw FLANNException("Unsupported type for IO operations"); +} + +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_CHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UCHAR; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_SHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_USHORT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_INT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_UINT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_LONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_ULONG; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_FLOAT; } +template<> +hid_t get_hdf5_type() { return H5T_NATIVE_DOUBLE; } +} + + +#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y)); + +template +void save_to_file(const cvflann::Matrix& dataset, const String& filename, const String& name) +{ + +#if H5Eset_auto_vers == 2 + H5Eset_auto( H5E_DEFAULT, NULL, NULL ); +#else + H5Eset_auto( NULL, NULL ); +#endif + + herr_t status; + hid_t file_id; + file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + if (file_id < 0) { + file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT); + } + CHECK_ERROR(file_id,"Error creating hdf5 file."); + + hsize_t dimsf[2]; // dataset dimensions + dimsf[0] = dataset.rows; + dimsf[1] = dataset.cols; + + hid_t space_id = H5Screate_simple(2, dimsf, NULL); + hid_t memspace_id = H5Screate_simple(2, dimsf, NULL); + + hid_t dataset_id; +#if H5Dcreate_vers == 2 + dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); +#else + dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type(), space_id, H5P_DEFAULT); +#endif + + if (dataset_id<0) { +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + } + CHECK_ERROR(dataset_id,"Error creating or opening dataset in file."); + + status = H5Dwrite(dataset_id, get_hdf5_type(), memspace_id, space_id, H5P_DEFAULT, dataset.data ); + CHECK_ERROR(status, "Error writing to dataset"); + + H5Sclose(memspace_id); + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); + +} + + +template +void load_from_file(cvflann::Matrix& dataset, const String& filename, const String& name) +{ + herr_t status; + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + + hsize_t dims_out[2]; + H5Sget_simple_extent_dims(space_id, dims_out, NULL); + + dataset = cvflann::Matrix(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]); + + status = H5Dread(dataset_id, get_hdf5_type(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]); + CHECK_ERROR(status, "Error reading dataset"); + + H5Sclose(space_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} + + +#ifdef HAVE_MPI + +namespace mpi +{ +/** + * Loads a the hyperslice corresponding to this processor from a hdf5 file. + * @param flann_dataset Dataset where the data is loaded + * @param filename HDF5 file name + * @param name Name of dataset inside file + */ +template +void load_from_file(cvflann::Matrix& dataset, const String& filename, const String& name) +{ + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + int mpi_size, mpi_rank; + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + herr_t status; + + hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist_id, comm, info); + hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id); + CHECK_ERROR(file_id,"Error opening hdf5 file."); + H5Pclose(plist_id); + hid_t dataset_id; +#if H5Dopen_vers == 2 + dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT); +#else + dataset_id = H5Dopen(file_id, name.c_str()); +#endif + CHECK_ERROR(dataset_id,"Error opening dataset in file."); + + hid_t space_id = H5Dget_space(dataset_id); + hsize_t dims[2]; + H5Sget_simple_extent_dims(space_id, dims, NULL); + + hsize_t count[2]; + hsize_t offset[2]; + + hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1); + hsize_t cnt = (mpi_rank(), memspace_id, space_id, plist_id, dataset.data); + CHECK_ERROR(status, "Error reading dataset"); + + H5Pclose(plist_id); + H5Sclose(space_id); + H5Sclose(memspace_id); + H5Dclose(dataset_id); + H5Fclose(file_id); +} +} +#endif // HAVE_MPI +} // namespace cvflann::mpi + +//! @endcond + +#endif /* OPENCV_FLANN_HDF5_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/heap.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/heap.h new file mode 100644 index 00000000..ee1c682c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/heap.h @@ -0,0 +1,169 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HEAP_H_ +#define OPENCV_FLANN_HEAP_H_ + +//! @cond IGNORED + +#include +#include + +namespace cvflann +{ + +/** + * Priority Queue Implementation + * + * The priority queue is implemented with a heap. A heap is a complete + * (full) binary tree in which each parent is less than both of its + * children, but the order of the children is unspecified. + */ +template +class Heap +{ + + /** + * Storage array for the heap. + * Type T must be comparable. + */ + std::vector heap; + int length; + + /** + * Number of element in the heap + */ + int count; + + + +public: + /** + * Constructor. + * + * Params: + * sz = heap size + */ + + Heap(int sz) + { + length = sz; + heap.reserve(length); + count = 0; + } + + /** + * + * Returns: heap size + */ + int size() + { + return count; + } + + /** + * Tests if the heap is empty + * + * Returns: true is heap empty, false otherwise + */ + bool empty() + { + return size()==0; + } + + /** + * Clears the heap. + */ + void clear() + { + heap.clear(); + count = 0; + } + + struct CompareT + { + bool operator()(const T& t_1, const T& t_2) const + { + return t_2 < t_1; + } + }; + + /** + * Insert a new element in the heap. + * + * We select the next empty leaf node, and then keep moving any larger + * parents down until the right location is found to store this element. + * + * Params: + * value = the new element to be inserted in the heap + */ + void insert(T value) + { + /* If heap is full, then return without adding this element. */ + if (count == length) { + return; + } + + heap.push_back(value); + static CompareT compareT; + std::push_heap(heap.begin(), heap.end(), compareT); + ++count; + } + + + + /** + * Returns the node of minimum value from the heap (top of the heap). + * + * Params: + * value = out parameter used to return the min element + * Returns: false if heap empty + */ + bool popMin(T& value) + { + if (count == 0) { + return false; + } + + value = heap[0]; + static CompareT compareT; + std::pop_heap(heap.begin(), heap.end(), compareT); + heap.pop_back(); + --count; + + return true; /* Return old last node. */ + } +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_HEAP_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/hierarchical_clustering_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/hierarchical_clustering_index.h new file mode 100644 index 00000000..7085d690 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/hierarchical_clustering_index.h @@ -0,0 +1,847 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ +#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ + +//! @cond IGNORED + +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, + int trees = 4, int leaf_size = 100) + { + (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL; + // The branching factor used in the hierarchical clustering + (*this)["branching"] = branching; + // Algorithm used for picking the initial cluster centers + (*this)["centers_init"] = centers_init; + // number of parallel trees to build + (*this)["trees"] = trees; + // maximum leaf size + (*this)["leaf_size"] = leaf_size; + } +}; + + +/** + * Hierarchical index + * + * Contains a tree constructed through a hierarchical clustering + * and other information for indexing a set of points for nearest-neighbour matching. + */ +template +class HierarchicalClusteringIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +private: + + + typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = dsindices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = dsindices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + CV_DbgAssert(index >=0 && index < n); + centers[0] = dsindices[index]; + + // Computing distance^2 will have the advantage of even higher probability further to pick new centers + // far from previous centers (and this complies to "k-means++: the advantages of careful seeding" article) + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + closestDistSq[i] = ensureSquareDistance( closestDistSq[i] ); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = 0; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) { + DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + newPot += std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = dsindices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) { + DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols); + closestDistSq[i] = std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + + /** + * Chooses the initial centers in a way inspired by Gonzales (by Pierre-Emmanuel Viel): + * select the first point of the list as a candidate, then parse the points list. If another + * point is further than current candidate from the other centers, test if it is a good center + * of a local aggregation. If it is, replace current candidate by this point. And so on... + * + * Used with KMeansIndex that computes centers coordinates by averaging positions of clusters points, + * this doesn't make a real difference with previous methods. But used with HierarchicalClusteringIndex + * class that pick centers among existing points instead of computing the barycenters, there is a real + * improvement. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void GroupWiseCenterChooser(int k, int* dsindices, int indices_length, int* centers, int& centers_length) + { + const float kSpeedUpFactor = 1.3f; + + int n = indices_length; + + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + CV_DbgAssert(index >=0 && index < n); + centers[0] = dsindices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols); + } + + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = 0; + DistanceType furthest = 0; + for (index = 0; index < n; index++) { + + // We will test only the potential of the points further than current candidate + if( closestDistSq[index] > kSpeedUpFactor * (float)furthest ) { + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) { + newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols) + , closestDistSq[i] ); + } + + // Store the best result + if ((bestNewPot < 0)||(newPot <= bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + furthest = closestDistSq[index]; + } + } + } + + // Add the appropriate center + centers[centerCount] = dsindices[bestNewIndex]; + for (int i = 0; i < n; i++) { + closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols) + , closestDistSq[i] ); + } + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + +public: + + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + HierarchicalClusteringIndex(const Matrix& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(), + Distance d = Distance()) + : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d) + { + memoryCounter = 0; + + size_ = dataset.rows; + veclen_ = dataset.cols; + + branching_ = get_param(params,"branching",32); + centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM); + trees_ = get_param(params,"trees",4); + leaf_size_ = get_param(params,"leaf_size",100); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp; + } + else if (centers_init_==FLANN_CENTERS_GROUPWISE) { + chooseCenters = &HierarchicalClusteringIndex::GroupWiseCenterChooser; + } + else { + FLANN_THROW(cv::Error::StsError, "Unknown algorithm for choosing initial centers."); + } + + root = new NodePtr[trees_]; + indices = new int*[trees_]; + + for (int i=0; i(); + computeClustering(root[i], indices[i], (int)size_, branching_,0); + } + } + + + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_HIERARCHICAL; + } + + + void saveIndex(FILE* stream) CV_OVERRIDE + { + save_value(stream, branching_); + save_value(stream, trees_); + save_value(stream, centers_init_); + save_value(stream, leaf_size_); + save_value(stream, memoryCounter); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + + const int maxChecks = get_param(searchParams,"checks",32); + + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + std::vector checked(size_,false); + int checks = 0; + for (int i=0; i= maxChecks) && result.full()) + break; + } + + BranchSt branch; + while (heap->popMin(branch) && (checks BranchSt; + + + + void save_tree(FILE* stream, NodePtr node, int num) + { + save_value(stream, *node); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices[num]); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i], num); + } + } + } + + + void load_tree(FILE* stream, NodePtr& node, int num) + { + node = pool.allocate(); + load_value(stream, *node); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices[num] + indices_offset; + } + else { + node->childs = pool.allocate(branching_); + for(int i=0; ichilds[i], num); + } + } + } + + + /** + * Release the inner elements of indices[] + */ + void free_indices() + { + if (indices!=NULL) { + for(int i=0; inew_dist) { + labels[i] = j; + dist = new_dist; + } + } + cost += dist; + } + } + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < leaf_size_) { // leaf node + node->indices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + std::vector centers(branching); + std::vector labels(indices_length); + + int centers_length; + (this->*chooseCenters)(branching, dsindices, indices_length, ¢ers[0], centers_length); + + if (centers_lengthindices = dsindices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + + // assign points to clusters + DistanceType cost; + computeLabels(dsindices, indices_length, ¢ers[0], centers_length, &labels[0], cost); + + node->childs = pool.allocate(branching); + int start = 0; + int end = start; + for (int i=0; ichilds[i] = pool.allocate(); + node->childs[i]->pivot = centers[i]; + node->childs[i]->indices = NULL; + computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1); + start=end; + } + } + + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(NodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap, std::vector& checked) + { + if (node->childs==NULL) { + if ((checks>=maxChecks) && result.full()) { + return; + } + for (int i=0; isize; ++i) { + int index = node->indices[i]; + if (!checked[index]) { + DistanceType dist = distance(dataset[index], vec, veclen_); + result.addPoint(dist, index); + checked[index] = true; + ++checks; + } + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int best_index = 0; + domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_); + for (int i=1; ichilds[i]->pivot], veclen_); + if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + delete[] domain_distances; + findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked); + } + } + +private: + + + /** + * The dataset used by this index + */ + const Matrix dataset; + + /** + * Parameters used by this index + */ + IndexParams params; + + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + NodePtr* root; + + /** + * Array of indices to vectors in the dataset. + */ + int** indices; + + + /** + * The distance + */ + Distance distance; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool; + + /** + * Memory occupied by the index. + */ + int memoryCounter; + + /** index parameters */ + int branching_; + int trees_; + flann_centers_init_t centers_init_; + int leaf_size_; + + +}; + +} + +//! @endcond + +#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/index_testing.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/index_testing.h new file mode 100644 index 00000000..207adef4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/index_testing.h @@ -0,0 +1,321 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_INDEX_TESTING_H_ +#define OPENCV_FLANN_INDEX_TESTING_H_ + +//! @cond IGNORED + +#include +#include + +#include "matrix.h" +#include "nn_index.h" +#include "result_set.h" +#include "logger.h" +#include "timer.h" + + +namespace cvflann +{ + +inline int countCorrectMatches(int* neighbors, int* groundTruth, int n) +{ + int count = 0; + for (int i=0; i +typename Distance::ResultType computeDistanceRaport(const Matrix& inputData, typename Distance::ElementType* target, + int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance) +{ + typedef typename Distance::ResultType DistanceType; + + DistanceType ret = 0; + for (int i=0; i +float search_with_ground_truth(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, int nn, int checks, + float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches) +{ + typedef typename Distance::ResultType DistanceType; + + if (matches.cols resultSet(nn+skipMatches); + SearchParams searchParams(checks); + + std::vector indices(nn+skipMatches); + std::vector dists(nn+skipMatches); + int* neighbors = &indices[skipMatches]; + + int correct = 0; + DistanceType distR = 0; + StartStopTimer t; + int repeats = 0; + while (t.value<0.2) { + repeats++; + t.start(); + correct = 0; + distR = 0; + for (size_t i = 0; i < testData.rows; i++) { + resultSet.init(&indices[0], &dists[0]); + index.findNeighbors(resultSet, testData[i], searchParams); + + correct += countCorrectMatches(neighbors,matches[i], nn); + distR += computeDistanceRaport(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance); + } + t.stop(); + } + time = float(t.value/repeats); + + float precicion = (float)correct/(nn*testData.rows); + + dist = distR/(testData.rows*nn); + + Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n", + checks, precicion, time, 1000.0 * time / testData.rows, dist); + + return precicion; +} + + +template +float test_index_checks(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + float time = 0; + DistanceType dist = 0; + precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches); + + return time; +} + +template +float test_index_precision(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0) +{ + typedef typename Distance::ResultType DistanceType; + const float SEARCH_EPS = 0.001f; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + int c1 = 1; + //float p1; + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + if (p2>precision) { + Logger::info("Got as close as I can\n"); + checks = c2; + return time; + } + + while (p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +void test_index_precisions(NNIndex& index, const Matrix& inputData, + const Matrix& testData, const Matrix& matches, + float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0) +{ + typedef typename Distance::ResultType DistanceType; + + const float SEARCH_EPS = 0.001; + + // make sure precisions array is sorted + std::sort(precisions, precisions+precisions_length); + + int pindex = 0; + float precision = precisions[pindex]; + + Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n"); + Logger::info("---------------------------------------------------------\n"); + + int c2 = 1; + float p2; + + int c1 = 1; + float p1; + + float time; + DistanceType dist; + + p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches); + + // if precision for 1 run down the tree is already + // better then some of the requested precisions, then + // skip those + while (precisions[pindex] 0)&&(time > maxTime)&&(p2SEARCH_EPS) { + Logger::info("Start linear estimation\n"); + // after we got to values in the vecinity of the desired precision + // use linear approximation get a better estimation + + cx = (c1+c2)/2; + realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches); + while (fabs(realPrecision-precision)>SEARCH_EPS) { + + if (realPrecision +#include +#include + +#include "nn_index.h" +#include "dynamic_bitset.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + + +namespace cvflann +{ + +struct KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees = 4) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE; + (*this)["trees"] = trees; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeIndex(const Matrix& inputData, const IndexParams& params = KDTreeIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + trees_ = get_param(index_params_,"trees",4); + tree_roots_ = new NodePtr[trees_]; + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; ++i) { + vind_[i] = int(i); + } + + mean_ = new DistanceType[veclen_]; + var_ = new DistanceType[veclen_]; + } + + + KDTreeIndex(const KDTreeIndex&); + KDTreeIndex& operator=(const KDTreeIndex&); + + /** + * Standard destructor + */ + ~KDTreeIndex() + { + if (tree_roots_!=NULL) { + delete[] tree_roots_; + } + delete[] mean_; + delete[] var_; + } + + /** + * Builds the index + */ + void buildIndex() CV_OVERRIDE + { + /* Construct the randomized trees. */ + for (int i = 0; i < trees_; i++) { + /* Randomize the order of vectors to allow for unbiased sampling. */ +#ifndef OPENCV_FLANN_USE_STD_RAND + cv::randShuffle(vind_); +#else + std::random_shuffle(vind_.begin(), vind_.end()); +#endif + + tree_roots_[i] = divideTree(&vind_[0], int(size_) ); + } + } + + + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_KDTREE; + } + + + void saveIndex(FILE* stream) CV_OVERRIDE + { + save_value(stream, trees_); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + int maxChecks = get_param(searchParams,"checks", 32); + float epsError = 1+get_param(searchParams,"eps",0.0f); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + getExactNeighbors(result, vec, epsError); + } + else { + getNeighbors(result, vec, maxChecks, epsError); + } + } + + IndexParams getParameters() const CV_OVERRIDE + { + return index_params_; + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divval; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int* ind, int count) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( count == 1) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->divfeat = *ind; /* Store index of this vec. */ + } + else { + int idx; + int cutfeat; + DistanceType cutval; + meanSplit(ind, count, idx, cutfeat, cutval); + + node->divfeat = cutfeat; + node->divval = cutval; + node->child1 = divideTree(ind, idx); + node->child2 = divideTree(ind+idx, count-idx); + } + + return node; + } + + + /** + * Choose which feature to use in order to subdivide this set of vectors. + * Make a random choice among those with the highest variance, and use + * its variance as the threshold value. + */ + void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval) + { + memset(mean_,0,veclen_*sizeof(DistanceType)); + memset(var_,0,veclen_*sizeof(DistanceType)); + + /* Compute mean values. Only the first SAMPLE_MEAN values need to be + sampled to get a good estimate. + */ + int cnt = std::min((int)SAMPLE_MEAN+1, count); + for (int j = 0; j < cnt; ++j) { + ElementType* v = dataset_[ind[j]]; + for (size_t k=0; kcount/2) index = lim1; + else if (lim2 v[topind[num-1]])) { + /* Put this element at end of topind. */ + if (num < RAND_DIM) { + topind[num++] = i; /* Add to list. */ + } + else { + topind[num-1] = i; /* Replace last element. */ + } + /* Bubble end value down to right location by repeated swapping. */ + int j = num - 1; + while (j > 0 && v[topind[j]] > v[topind[j-1]]) { + std::swap(topind[j], topind[j-1]); + --j; + } + } + } + /* Select a random integer in range [0,num-1], and return that index. */ + int rnd = rand_int(num); + return (int)topind[rnd]; + } + + + /** + * Subdivide the list of points by a plane perpendicular on axe corresponding + * to the 'cutfeat' dimension at 'cutval' position. + * + * On return: + * dataset[ind[0..lim1-1]][cutfeat]cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + /** + * Performs an exact nearest neighbor search. The exact search performs a full + * traversal of the tree. + */ + void getExactNeighbors(ResultSet& result, const ElementType* vec, float epsError) + { + // checkID -= 1; /* Set a different unique ID for each search. */ + + if (trees_ > 1) { + fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search"); + } + if (trees_>0) { + searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError); + } + CV_Assert(result.full()); + } + + /** + * Performs the approximate nearest-neighbor search. The search is approximate + * because the tree traversal is abandoned after a given number of descends in + * the tree. + */ + void getNeighbors(ResultSet& result, const ElementType* vec, int maxCheck, float epsError) + { + int i; + BranchSt branch; + + int checkCount = 0; + Heap* heap = new Heap((int)size_); + DynamicBitset checked(size_); + + /* Search once through each tree down to root. */ + for (i = 0; i < trees_; ++i) { + searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); + } + + /* Keep searching other branches from heap until finished. */ + while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { + searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); + } + + delete heap; + + CV_Assert(result.full()); + } + + + /** + * Search starting from a given node of the tree. Based on any mismatches at + * higher levels, all exemplars below this level must have a distance of + * at least "mindistsq". + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck, + float epsError, Heap* heap, DynamicBitset& checked) + { + if (result_set.worstDist()child1 == NULL)&&(node->child2 == NULL)) { + /* Do not check same node more than once when searching multiple trees. + Once a vector is checked, we set its location in vind to the + current checkID. + */ + int index = node->divfeat; + if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return; + checked.set(index); + checkCount++; + + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + // if (2 * checkCount < maxCheck || !result.full()) { + if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) { + heap->insert( BranchSt(otherChild, new_distsq) ); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked); + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevelExact(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + int index = node->divfeat; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result_set.addPoint(dist,index); + return; + } + + /* Which child branch should be taken first? */ + ElementType val = vec[node->divfeat]; + DistanceType diff = val - node->divval; + NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; + NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; + + /* Create a branch record for the branch not taken. Add distance + of this feature boundary (we don't attempt to correct for any + use of this feature in a parent node, which is unlikely to + happen and would have only a small effect). Don't bother + adding more branches to heap after halfway point, as cost of + adding exceeds their value. + */ + + DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); + + /* Call recursively to search next level down. */ + searchLevelExact(result_set, vec, bestChild, mindist, epsError); + + if (new_distsq*epsError<=result_set.worstDist()) { + searchLevelExact(result_set, vec, otherChild, new_distsq, epsError); + } + } + + +private: + + enum + { + /** + * To improve efficiency, only SAMPLE_MEAN random values are used to + * compute the mean and variance at each level when building a tree. + * A value of 100 seems to perform as well as using all values. + */ + SAMPLE_MEAN = 100, + /** + * Top random dimensions to consider + * + * When creating random trees, the dimension on which to subdivide is + * selected at random from among the top RAND_DIM dimensions with the + * highest variance. A value of 5 works well. + */ + RAND_DIM=5 + }; + + + /** + * Number of randomized trees that are used + */ + int trees_; + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + size_t size_; + size_t veclen_; + + + DistanceType* mean_; + DistanceType* var_; + + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr* tree_roots_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; + + +}; // class KDTreeForest + +} + +//! @endcond + +#endif //OPENCV_FLANN_KDTREE_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/kdtree_single_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/kdtree_single_index.h new file mode 100644 index 00000000..ed95c3db --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/kdtree_single_index.h @@ -0,0 +1,645 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ +#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ + +//! @cond IGNORED + +#include +#include +#include + +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +namespace cvflann +{ + +struct KDTreeSingleIndexParams : public IndexParams +{ + KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1) + { + (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE; + (*this)["leaf_max_size"] = leaf_max_size; + (*this)["reorder"] = reorder; + (*this)["dim"] = dim; + } +}; + + +/** + * Randomized kd-tree index + * + * Contains the k-d trees and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class KDTreeSingleIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + /** + * KDTree constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the kdtree algorithm + */ + KDTreeSingleIndex(const Matrix& inputData, const IndexParams& params = KDTreeSingleIndexParams(), + Distance d = Distance() ) : + dataset_(inputData), index_params_(params), distance_(d) + { + size_ = dataset_.rows; + dim_ = dataset_.cols; + root_node_ = 0; + int dim_param = get_param(params,"dim",-1); + if (dim_param>0) dim_ = dim_param; + leaf_max_size_ = get_param(params,"leaf_max_size",10); + reorder_ = get_param(params,"reorder",true); + + // Create a permutable array of indices to the input vectors. + vind_.resize(size_); + for (size_t i = 0; i < size_; i++) { + vind_[i] = (int)i; + } + } + + KDTreeSingleIndex(const KDTreeSingleIndex&); + KDTreeSingleIndex& operator=(const KDTreeSingleIndex&); + + /** + * Standard destructor + */ + ~KDTreeSingleIndex() + { + if (reorder_) delete[] data_.data; + } + + /** + * Builds the index + */ + void buildIndex() CV_OVERRIDE + { + computeBoundingBox(root_bbox_); + root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree + + if (reorder_) { + delete[] data_.data; + data_ = cvflann::Matrix(new ElementType[size_*dim_], size_, dim_); + for (size_t i=0; i& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) CV_OVERRIDE + { + CV_Assert(queries.cols == veclen()); + CV_Assert(indices.rows >= queries.rows); + CV_Assert(dists.rows >= queries.rows); + CV_Assert(int(indices.cols) >= knn); + CV_Assert(int(dists.cols) >= knn); + + KNNSimpleResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } + } + + IndexParams getParameters() const CV_OVERRIDE + { + return index_params_; + } + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + float epsError = 1+get_param(searchParams,"eps",0.0f); + + std::vector dists(dim_,0); + DistanceType distsq = computeInitialDistances(vec, dists); + searchLevel(result, vec, root_node_, distsq, dists, epsError); + } + +private: + + + /*--------------------- Internal Data Structures --------------------------*/ + struct Node + { + /** + * Indices of points in leaf node + */ + int left, right; + /** + * Dimension used for subdivision. + */ + int divfeat; + /** + * The values used for subdivision. + */ + DistanceType divlow, divhigh; + /** + * The child nodes. + */ + Node* child1, * child2; + }; + typedef Node* NodePtr; + + + struct Interval + { + DistanceType low, high; + }; + + typedef std::vector BoundingBox; + + typedef BranchStruct BranchSt; + typedef BranchSt* Branch; + + + + + void save_tree(FILE* stream, NodePtr tree) + { + save_value(stream, *tree); + if (tree->child1!=NULL) { + save_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + save_tree(stream, tree->child2); + } + } + + + void load_tree(FILE* stream, NodePtr& tree) + { + tree = pool_.allocate(); + load_value(stream, *tree); + if (tree->child1!=NULL) { + load_tree(stream, tree->child1); + } + if (tree->child2!=NULL) { + load_tree(stream, tree->child2); + } + } + + + void computeBoundingBox(BoundingBox& bbox) + { + bbox.resize(dim_); + for (size_t i=0; ibbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i]; + } + } + } + + + /** + * Create a tree node that subdivides the list of vecs from vind[first] + * to vind[last]. The routine is called recursively on each sublist. + * Place a pointer to this new tree node in the location pTree. + * + * Params: pTree = the new node to create + * first = index of the first vector + * last = index of the last vector + */ + NodePtr divideTree(int left, int right, BoundingBox& bbox) + { + NodePtr node = pool_.allocate(); // allocate memory + + /* If too few exemplars remain, then make this a leaf node. */ + if ( (right-left) <= leaf_max_size_) { + node->child1 = node->child2 = NULL; /* Mark as leaf node. */ + node->left = left; + node->right = right; + + // compute bounding-box of leaf points + for (size_t i=0; idataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i]; + if (bbox[i].highdivfeat = cutfeat; + + BoundingBox left_bbox(bbox); + left_bbox[cutfeat].high = cutval; + node->child1 = divideTree(left, left+idx, left_bbox); + + BoundingBox right_bbox(bbox); + right_bbox[cutfeat].low = cutval; + node->child2 = divideTree(left+idx, right, right_bbox); + + node->divlow = left_bbox[cutfeat].high; + node->divhigh = right_bbox[cutfeat].low; + + for (size_t i=0; imax_elem) max_elem = val; + } + } + + void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox) + { + // find the largest span from the approximate bounding box + ElementType max_span = bbox[0].high-bbox[0].low; + cutfeat = 0; + cutval = (bbox[0].high+bbox[0].low)/2; + for (size_t i=1; imax_span) { + max_span = span; + cutfeat = i; + cutval = (bbox[i].high+bbox[i].low)/2; + } + } + + // compute exact span on the found dimension + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + cutval = (min_elem+max_elem)/2; + max_span = max_elem - min_elem; + + // check if a dimension of a largest span exists + size_t k = cutfeat; + for (size_t i=0; imax_span) { + computeMinMax(ind, count, i, min_elem, max_elem); + span = max_elem - min_elem; + if (span>max_span) { + max_span = span; + cutfeat = i; + cutval = (min_elem+max_elem)/2; + } + } + } + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2max_span) { + max_span = span; + } + } + DistanceType max_spread = -1; + cutfeat = 0; + for (size_t i=0; i(DistanceType)((1-EPS)*max_span)) { + ElementType min_elem, max_elem; + computeMinMax(ind, count, (int)i, min_elem, max_elem); + DistanceType spread = (DistanceType)(max_elem-min_elem); + if (spread>max_spread) { + cutfeat = (int)i; + max_spread = spread; + } + } + } + // split in the middle + DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2; + ElementType min_elem, max_elem; + computeMinMax(ind, count, cutfeat, min_elem, max_elem); + + if (split_valmax_elem) cutval = (DistanceType)max_elem; + else cutval = split_val; + + int lim1, lim2; + planeSplit(ind, count, cutfeat, cutval, lim1, lim2); + + if (lim1>count/2) index = lim1; + else if (lim2cutval + */ + void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) + { + /* Move vector indices for left subtree to front of list. */ + int left = 0; + int right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]=cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + /* If either list is empty, it means that all remaining features + * are identical. Split in the middle to maintain a balanced tree. + */ + lim1 = left; + right = count-1; + for (;; ) { + while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left; + while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right; + if (left>right) break; + std::swap(ind[left], ind[right]); ++left; --right; + } + lim2 = left; + } + + DistanceType computeInitialDistances(const ElementType* vec, std::vector& dists) + { + DistanceType distsq = 0.0; + + for (size_t i = 0; i < dim_; ++i) { + if (vec[i] < root_bbox_[i].low) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i); + distsq += dists[i]; + } + if (vec[i] > root_bbox_[i].high) { + dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i); + distsq += dists[i]; + } + } + + return distsq; + } + + /** + * Performs an exact search in the tree starting from a node. + */ + void searchLevel(ResultSet& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq, + std::vector& dists, const float epsError) + { + /* If this is a leaf node, then do check and return. */ + if ((node->child1 == NULL)&&(node->child2 == NULL)) { + DistanceType worst_dist = result_set.worstDist(); + if (reorder_) { + for (int i=node->left; iright; ++i) { + DistanceType dist = distance_(vec, data_[i], dim_, worst_dist); + if (distleft; iright; ++i) { + DistanceType dist = distance_(vec, data_[vind_[i]], dim_, worst_dist); + if (distdivfeat; + ElementType val = vec[idx]; + DistanceType diff1 = val - node->divlow; + DistanceType diff2 = val - node->divhigh; + + NodePtr bestChild; + NodePtr otherChild; + DistanceType cut_dist; + if ((diff1+diff2)<0) { + bestChild = node->child1; + otherChild = node->child2; + cut_dist = distance_.accum_dist(val, node->divhigh, idx); + } + else { + bestChild = node->child2; + otherChild = node->child1; + cut_dist = distance_.accum_dist( val, node->divlow, idx); + } + + /* Call recursively to search next level down. */ + searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError); + + DistanceType dst = dists[idx]; + mindistsq = mindistsq + cut_dist - dst; + dists[idx] = cut_dist; + if (mindistsq*epsError<=result_set.worstDist()) { + searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError); + } + dists[idx] = dst; + } + +private: + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + IndexParams index_params_; + + int leaf_max_size_; + bool reorder_; + + + /** + * Array of indices to vectors in the dataset. + */ + std::vector vind_; + + Matrix data_; + + size_t size_; + size_t dim_; + + /** + * Array of k-d trees used to find neighbours. + */ + NodePtr root_node_; + + BoundingBox root_bbox_; + + /** + * Pooled memory allocator. + * + * Using a pooled memory allocator is more efficient + * than allocating memory directly when there is a large + * number small of memory allocations. + */ + PooledAllocator pool_; + + Distance distance_; +}; // class KDTree + +} + +//! @endcond + +#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/kmeans_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/kmeans_index.h new file mode 100644 index 00000000..10b272da --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/kmeans_index.h @@ -0,0 +1,1832 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_KMEANS_INDEX_H_ +#define OPENCV_FLANN_KMEANS_INDEX_H_ + +//! @cond IGNORED + +#include +#include +#include +#include + +#include "general.h" +#include "nn_index.h" +#include "dist.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" +#include "logger.h" + +#define BITS_PER_CHAR 8 +#define BITS_PER_BASE 2 // for DNA/RNA sequences +#define BASE_PER_CHAR (BITS_PER_CHAR/BITS_PER_BASE) +#define HISTOS_PER_BASE (1< +class KMeansIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + typedef typename Distance::CentersType CentersType; + + typedef typename Distance::is_kdtree_distance is_kdtree_distance; + typedef typename Distance::is_vector_space_distance is_vector_space_distance; + + + + typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&); + + /** + * The function used for choosing the cluster centers. + */ + centersAlgFunction chooseCenters; + + + + /** + * Chooses the initial centers in the k-means clustering in a random manner. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * indices_length = length of indices vector + * + */ + void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + UniqueRandom r(indices_length); + + int index; + for (index=0; index=0 && rnd < n); + + centers[0] = indices[rnd]; + + int index; + for (index=1; indexbest_val) { + best_val = dist; + best_index = j; + } + } + if (best_index!=-1) { + centers[index] = indices[best_index]; + } + else { + break; + } + } + centers_length = index; + } + + + /** + * Chooses the initial centers in the k-means using the algorithm + * proposed in the KMeans++ paper: + * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding + * + * Implementation of this function was converted from the one provided in Arthur's code. + * + * Params: + * k = number of centers + * vecs = the dataset of points + * indices = indices in the dataset + * Returns: + */ + void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length) + { + int n = indices_length; + + double currentPot = 0; + DistanceType* closestDistSq = new DistanceType[n]; + + // Choose one random center and set the closestDistSq values + int index = rand_int(n); + CV_DbgAssert(index >=0 && index < n); + centers[0] = indices[index]; + + for (int i = 0; i < n; i++) { + closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + closestDistSq[i] = ensureSquareDistance( closestDistSq[i] ); + currentPot += closestDistSq[i]; + } + + + const int numLocalTries = 1; + + // Choose each center + int centerCount; + for (centerCount = 1; centerCount < k; centerCount++) { + + // Repeat several trials + double bestNewPot = -1; + int bestNewIndex = -1; + for (int localTrial = 0; localTrial < numLocalTries; localTrial++) { + + // Choose our center - have to be slightly careful to return a valid answer even accounting + // for possible rounding errors + double randVal = rand_double(currentPot); + for (index = 0; index < n-1; index++) { + if (randVal <= closestDistSq[index]) break; + else randVal -= closestDistSq[index]; + } + + // Compute the new potential + double newPot = 0; + for (int i = 0; i < n; i++) { + DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols); + newPot += std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + + // Store the best result + if ((bestNewPot < 0)||(newPot < bestNewPot)) { + bestNewPot = newPot; + bestNewIndex = index; + } + } + + // Add the appropriate center + centers[centerCount] = indices[bestNewIndex]; + currentPot = bestNewPot; + for (int i = 0; i < n; i++) { + DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols); + closestDistSq[i] = std::min( ensureSquareDistance(dist), closestDistSq[i] ); + } + } + + centers_length = centerCount; + + delete[] closestDistSq; + } + + + +public: + + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_KMEANS; + } + + template + class KMeansDistanceComputer : public cv::ParallelLoopBody + { + public: + KMeansDistanceComputer(Distance _distance, const Matrix& _dataset, + const int _branching, const int* _indices, const CentersContainerType& _dcenters, + const size_t _veclen, std::vector &_new_centroids, + std::vector &_sq_dists) + : distance(_distance) + , dataset(_dataset) + , branching(_branching) + , indices(_indices) + , dcenters(_dcenters) + , veclen(_veclen) + , new_centroids(_new_centroids) + , sq_dists(_sq_dists) + { + } + + void operator()(const cv::Range& range) const CV_OVERRIDE + { + const int begin = range.start; + const int end = range.end; + + for( int i = begin; inew_sq_dist) { + new_centroid = j; + sq_dist = new_sq_dist; + } + } + sq_dists[i] = sq_dist; + new_centroids[i] = new_centroid; + } + } + + private: + Distance distance; + const Matrix& dataset; + const int branching; + const int* indices; + const CentersContainerType& dcenters; + const size_t veclen; + std::vector &new_centroids; + std::vector &sq_dists; + KMeansDistanceComputer& operator=( const KMeansDistanceComputer & ) { return *this; } + }; + + /** + * Index constructor + * + * Params: + * inputData = dataset with the input features + * params = parameters passed to the hierarchical k-means algorithm + */ + KMeansIndex(const Matrix& inputData, const IndexParams& params = KMeansIndexParams(), + Distance d = Distance()) + : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d) + { + memoryCounter_ = 0; + + size_ = dataset_.rows; + veclen_ = dataset_.cols; + + branching_ = get_param(params,"branching",32); + trees_ = get_param(params,"trees",1); + iterations_ = get_param(params,"iterations",11); + if (iterations_<0) { + iterations_ = (std::numeric_limits::max)(); + } + centers_init_ = get_param(params,"centers_init",FLANN_CENTERS_RANDOM); + + if (centers_init_==FLANN_CENTERS_RANDOM) { + chooseCenters = &KMeansIndex::chooseCentersRandom; + } + else if (centers_init_==FLANN_CENTERS_GONZALES) { + chooseCenters = &KMeansIndex::chooseCentersGonzales; + } + else if (centers_init_==FLANN_CENTERS_KMEANSPP) { + chooseCenters = &KMeansIndex::chooseCentersKMeanspp; + } + else { + FLANN_THROW(cv::Error::StsBadArg, "Unknown algorithm for choosing initial centers."); + } + cb_index_ = 0.4f; + + root_ = new KMeansNodePtr[trees_]; + indices_ = new int*[trees_]; + + for (int i=0; i(); + std::memset(root_[i], 0, sizeof(KMeansNode)); + + Distance* dummy = NULL; + computeNodeStatistics(root_[i], indices_[i], (unsigned int)size_, dummy); + + computeClustering(root_[i], indices_[i], (int)size_, branching_,0); + } + } + + + void saveIndex(FILE* stream) CV_OVERRIDE + { + save_value(stream, branching_); + save_value(stream, iterations_); + save_value(stream, memoryCounter_); + save_value(stream, cb_index_); + save_value(stream, trees_); + for (int i=0; i& result, const ElementType* vec, const SearchParams& searchParams) CV_OVERRIDE + { + + const int maxChecks = get_param(searchParams,"checks",32); + + if (maxChecks==FLANN_CHECKS_UNLIMITED) { + findExactNN(root_[0], result, vec); + } + else { + // Priority queue storing intermediate branches in the best-bin-first search + Heap* heap = new Heap((int)size_); + + int checks = 0; + for (int i=0; i= maxChecks) && result.full()) + break; + } + + BranchSt branch; + while (heap->popMin(branch) && (checks& centers) + { + int numClusters = centers.rows; + if (numClusters<1) { + FLANN_THROW(cv::Error::StsBadArg, "Number of clusters must be at least 1"); + } + + DistanceType variance; + KMeansNodePtr* clusters = new KMeansNodePtr[numClusters]; + + int clusterCount = getMinVarianceClusters(root_[0], clusters, numClusters, variance); + + Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount); + + for (int i=0; ipivot; + for (size_t j=0; j BranchSt; + + + + + void save_tree(FILE* stream, KMeansNodePtr node, int num) + { + save_value(stream, *node); + save_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset = (int)(node->indices - indices_[num]); + save_value(stream, indices_offset); + } + else { + for(int i=0; ichilds[i], num); + } + } + } + + + void load_tree(FILE* stream, KMeansNodePtr& node, int num) + { + node = pool_.allocate(); + load_value(stream, *node); + node->pivot = new CentersType[veclen_]; + load_value(stream, *(node->pivot), (int)veclen_); + if (node->childs==NULL) { + int indices_offset; + load_value(stream, indices_offset); + node->indices = indices_[num] + indices_offset; + } + else { + node->childs = pool_.allocate(branching_); + for(int i=0; ichilds[i], num); + } + } + } + + + /** + * Helper function + */ + void free_centers(KMeansNodePtr node) + { + delete[] node->pivot; + if (node->childs!=NULL) { + for (int k=0; kchilds[k]); + } + } + } + + void free_centers() + { + if (root_ != NULL) { + for(int i=0; i(), veclen_); + } + float length = static_cast(indices_length); + for (size_t j=0; j( mean[j] / static_cast(indices_length) ); + } + variance /= static_cast( length ); + variance -= distance_(mean, ZeroIterator(), veclen_); + + DistanceType radius = 0; + for (unsigned int i=0; iradius) { + radius = tmp; + } + } + + node->variance = variance; + node->radius = radius; + node->pivot = mean; + } + + + void computeBitfieldNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length) + { + const unsigned int accumulator_veclen = static_cast( + veclen_*sizeof(CentersType)*BITS_PER_CHAR); + + unsigned long long variance = 0ull; + CentersType* mean = new CentersType[veclen_]; + memoryCounter_ += int(veclen_*sizeof(CentersType)); + unsigned int* mean_accumulator = new unsigned int[accumulator_veclen]; + + memset(mean_accumulator, 0, sizeof(unsigned int)*accumulator_veclen); + + for (unsigned int i=0; i( ensureSquareDistance( + distance_(dataset_[indices[i]], ZeroIterator(), veclen_))); + unsigned char* vec = (unsigned char*)dataset_[indices[i]]; + for (size_t k=0, l=0; k>1) & 0x01; + mean_accumulator[k+2] += (vec[l]>>2) & 0x01; + mean_accumulator[k+3] += (vec[l]>>3) & 0x01; + mean_accumulator[k+4] += (vec[l]>>4) & 0x01; + mean_accumulator[k+5] += (vec[l]>>5) & 0x01; + mean_accumulator[k+6] += (vec[l]>>6) & 0x01; + mean_accumulator[k+7] += (vec[l]>>7) & 0x01; + } + } + double cnt = static_cast(indices_length); + unsigned char* char_mean = (unsigned char*)mean; + for (size_t k=0, l=0; k( + (((int)(0.5 + (double)(mean_accumulator[k]) / cnt))) + | (((int)(0.5 + (double)(mean_accumulator[k+1]) / cnt))<<1) + | (((int)(0.5 + (double)(mean_accumulator[k+2]) / cnt))<<2) + | (((int)(0.5 + (double)(mean_accumulator[k+3]) / cnt))<<3) + | (((int)(0.5 + (double)(mean_accumulator[k+4]) / cnt))<<4) + | (((int)(0.5 + (double)(mean_accumulator[k+5]) / cnt))<<5) + | (((int)(0.5 + (double)(mean_accumulator[k+6]) / cnt))<<6) + | (((int)(0.5 + (double)(mean_accumulator[k+7]) / cnt))<<7)); + } + variance = static_cast( + 0.5 + static_cast(variance) / static_cast(indices_length)); + variance -= static_cast( + ensureSquareDistance( + distance_(mean, ZeroIterator(), veclen_))); + + DistanceType radius = 0; + for (unsigned int i=0; iradius) { + radius = tmp; + } + } + + node->variance = static_cast(variance); + node->radius = radius; + node->pivot = mean; + + delete[] mean_accumulator; + } + + + void computeDnaNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length) + { + const unsigned int histos_veclen = static_cast( + veclen_*sizeof(CentersType)*(HISTOS_PER_BASE*BASE_PER_CHAR)); + + unsigned long long variance = 0ull; + unsigned int* histograms = new unsigned int[histos_veclen]; + memset(histograms, 0, sizeof(unsigned int)*histos_veclen); + + for (unsigned int i=0; i( ensureSquareDistance( + distance_(dataset_[indices[i]], ZeroIterator(), veclen_))); + + unsigned char* vec = (unsigned char*)dataset_[indices[i]]; + for (size_t k=0, l=0; k>2) & 0x03)]++; + histograms[k + 8 + ((vec[l]>>4) & 0x03)]++; + histograms[k +12 + ((vec[l]>>6) & 0x03)]++; + } + } + + CentersType* mean = new CentersType[veclen_]; + memoryCounter_ += int(veclen_*sizeof(CentersType)); + unsigned char* char_mean = (unsigned char*)mean; + unsigned int* h = histograms; + for (size_t k=0, l=0; k h[k+1] ? h[k+2] > h[k+3] ? h[k] > h[k+2] ? 0x00 : 0x10 + : h[k] > h[k+3] ? 0x00 : 0x11 + : h[k+2] > h[k+3] ? h[k+1] > h[k+2] ? 0x01 : 0x10 + : h[k+1] > h[k+3] ? 0x01 : 0x11) + | (h[k+4]>h[k+5] ? h[k+6] > h[k+7] ? h[k+4] > h[k+6] ? 0x00 : 0x1000 + : h[k+4] > h[k+7] ? 0x00 : 0x1100 + : h[k+6] > h[k+7] ? h[k+5] > h[k+6] ? 0x0100 : 0x1000 + : h[k+5] > h[k+7] ? 0x0100 : 0x1100) + | (h[k+8]>h[k+9] ? h[k+10]>h[k+11] ? h[k+8] >h[k+10] ? 0x00 : 0x100000 + : h[k+8] >h[k+11] ? 0x00 : 0x110000 + : h[k+10]>h[k+11] ? h[k+9] >h[k+10] ? 0x010000 : 0x100000 + : h[k+9] >h[k+11] ? 0x010000 : 0x110000) + | (h[k+12]>h[k+13] ? h[k+14]>h[k+15] ? h[k+12] >h[k+14] ? 0x00 : 0x10000000 + : h[k+12] >h[k+15] ? 0x00 : 0x11000000 + : h[k+14]>h[k+15] ? h[k+13] >h[k+14] ? 0x01000000 : 0x10000000 + : h[k+13] >h[k+15] ? 0x01000000 : 0x11000000); + } + variance = static_cast( + 0.5 + static_cast(variance) / static_cast(indices_length)); + variance -= static_cast( + ensureSquareDistance( + distance_(mean, ZeroIterator(), veclen_))); + + DistanceType radius = 0; + for (unsigned int i=0; iradius) { + radius = tmp; + } + } + + node->variance = static_cast(variance); + node->radius = radius; + node->pivot = mean; + + delete[] histograms; + } + + + template + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const DistType* identifier) + { + (void)identifier; + computeNodeStatistics(node, indices, indices_length); + } + + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const cvflann::HammingLUT* identifier) + { + (void)identifier; + computeBitfieldNodeStatistics(node, indices, indices_length); + } + + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const cvflann::Hamming* identifier) + { + (void)identifier; + computeBitfieldNodeStatistics(node, indices, indices_length); + } + + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const cvflann::Hamming2* identifier) + { + (void)identifier; + computeBitfieldNodeStatistics(node, indices, indices_length); + } + + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const cvflann::DNAmmingLUT* identifier) + { + (void)identifier; + computeDnaNodeStatistics(node, indices, indices_length); + } + + void computeNodeStatistics(KMeansNodePtr node, int* indices, + unsigned int indices_length, + const cvflann::DNAmming2* identifier) + { + (void)identifier; + computeDnaNodeStatistics(node, indices, indices_length); + } + + + void refineClustering(int* indices, int indices_length, int branching, CentersType** centers, + std::vector& radiuses, int* belongs_to, int* count) + { + cv::AutoBuffer dcenters_buf(branching*veclen_); + Matrix dcenters(dcenters_buf.data(), branching, veclen_); + + bool converged = false; + int iteration = 0; + while (!converged && iteration new_centroids(indices_length); + std::vector sq_dists(indices_length); + + // reassign points to clusters + KMeansDistanceComputer > invoker( + distance_, dataset_, branching, indices, dcenters, veclen_, new_centroids, sq_dists); + parallel_for_(cv::Range(0, (int)indices_length), invoker); + + for (int i=0; i < (int)indices_length; ++i) { + DistanceType sq_dist(sq_dists[i]); + int new_centroid(new_centroids[i]); + if (sq_dist > radiuses[new_centroid]) { + radiuses[new_centroid] = sq_dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + converged = false; + } + } + + for (int i=0; i& radiuses, int* belongs_to, int* count) + { + for (int i=0; i( + veclen_*sizeof(ElementType)*BITS_PER_CHAR); + cv::AutoBuffer dcenters_buf(branching*accumulator_veclen); + Matrix dcenters(dcenters_buf.data(), branching, accumulator_veclen); + + bool converged = false; + int iteration = 0; + while (!converged && iteration>1) & 0x01; + dcenter[k+2] += (vec[l]>>2) & 0x01; + dcenter[k+3] += (vec[l]>>3) & 0x01; + dcenter[k+4] += (vec[l]>>4) & 0x01; + dcenter[k+5] += (vec[l]>>5) & 0x01; + dcenter[k+6] += (vec[l]>>6) & 0x01; + dcenter[k+7] += (vec[l]>>7) & 0x01; + } + } + for (int i=0; i(count[i]); + unsigned int* dcenter = dcenters[i]; + unsigned char* charCenter = (unsigned char*)centers[i]; + for (size_t k=0, l=0; k( + (((int)(0.5 + (double)(dcenter[k]) / cnt))) + | (((int)(0.5 + (double)(dcenter[k+1]) / cnt))<<1) + | (((int)(0.5 + (double)(dcenter[k+2]) / cnt))<<2) + | (((int)(0.5 + (double)(dcenter[k+3]) / cnt))<<3) + | (((int)(0.5 + (double)(dcenter[k+4]) / cnt))<<4) + | (((int)(0.5 + (double)(dcenter[k+5]) / cnt))<<5) + | (((int)(0.5 + (double)(dcenter[k+6]) / cnt))<<6) + | (((int)(0.5 + (double)(dcenter[k+7]) / cnt))<<7)); + } + } + + std::vector new_centroids(indices_length); + std::vector dists(indices_length); + + // reassign points to clusters + KMeansDistanceComputer invoker( + distance_, dataset_, branching, indices, centers, veclen_, new_centroids, dists); + parallel_for_(cv::Range(0, (int)indices_length), invoker); + + for (int i=0; i < indices_length; ++i) { + DistanceType dist(dists[i]); + int new_centroid(new_centroids[i]); + if (dist > radiuses[new_centroid]) { + radiuses[new_centroid] = dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + converged = false; + } + } + + for (int i=0; i& radiuses, int* belongs_to, int* count) + { + for (int i=0; i( + veclen_*sizeof(CentersType)*(HISTOS_PER_BASE*BASE_PER_CHAR)); + cv::AutoBuffer histos_buf(branching*histos_veclen); + Matrix histos(histos_buf.data(), branching, histos_veclen); + + bool converged = false; + int iteration = 0; + while (!converged && iteration>2) & 0x03)]++; + h[k + 8 + ((vec[l]>>4) & 0x03)]++; + h[k +12 + ((vec[l]>>6) & 0x03)]++; + } + } + for (int i=0; i h[k+1] ? h[k+2] > h[k+3] ? h[k] > h[k+2] ? 0x00 : 0x10 + : h[k] > h[k+3] ? 0x00 : 0x11 + : h[k+2] > h[k+3] ? h[k+1] > h[k+2] ? 0x01 : 0x10 + : h[k+1] > h[k+3] ? 0x01 : 0x11) + | (h[k+4]>h[k+5] ? h[k+6] > h[k+7] ? h[k+4] > h[k+6] ? 0x00 : 0x1000 + : h[k+4] > h[k+7] ? 0x00 : 0x1100 + : h[k+6] > h[k+7] ? h[k+5] > h[k+6] ? 0x0100 : 0x1000 + : h[k+5] > h[k+7] ? 0x0100 : 0x1100) + | (h[k+8]>h[k+9] ? h[k+10]>h[k+11] ? h[k+8] >h[k+10] ? 0x00 : 0x100000 + : h[k+8] >h[k+11] ? 0x00 : 0x110000 + : h[k+10]>h[k+11] ? h[k+9] >h[k+10] ? 0x010000 : 0x100000 + : h[k+9] >h[k+11] ? 0x010000 : 0x110000) + | (h[k+12]>h[k+13] ? h[k+14]>h[k+15] ? h[k+12] >h[k+14] ? 0x00 : 0x10000000 + : h[k+12] >h[k+15] ? 0x00 : 0x11000000 + : h[k+14]>h[k+15] ? h[k+13] >h[k+14] ? 0x01000000 : 0x10000000 + : h[k+13] >h[k+15] ? 0x01000000 : 0x11000000); + } + } + + std::vector new_centroids(indices_length); + std::vector dists(indices_length); + + // reassign points to clusters + KMeansDistanceComputer invoker( + distance_, dataset_, branching, indices, centers, veclen_, new_centroids, dists); + parallel_for_(cv::Range(0, (int)indices_length), invoker); + + for (int i=0; i < indices_length; ++i) { + DistanceType dist(dists[i]); + int new_centroid(new_centroids[i]); + if (dist > radiuses[new_centroid]) { + radiuses[new_centroid] = dist; + } + if (new_centroid != belongs_to[i]) { + count[belongs_to[i]]--; + count[new_centroid]++; + belongs_to[i] = new_centroid; + converged = false; + } + } + + for (int i=0; i& radiuses, int* belongs_to, int* count) + { + // compute kmeans clustering for each of the resulting clusters + node->childs = pool_.allocate(branching); + int start = 0; + int end = start; + for (int c=0; c(), veclen_); + variance += d; + mean_radius += static_cast( sqrt(d) ); + std::swap(indices[i],indices[end]); + std::swap(belongs_to[i],belongs_to[end]); + end++; + } + } + variance /= s; + mean_radius /= s; + variance -= distance_(centers[c], ZeroIterator(), veclen_); + + node->childs[c] = pool_.allocate(); + std::memset(node->childs[c], 0, sizeof(KMeansNode)); + node->childs[c]->radius = radiuses[c]; + node->childs[c]->pivot = centers[c]; + node->childs[c]->variance = variance; + node->childs[c]->mean_radius = mean_radius; + computeClustering(node->childs[c],indices+start, end-start, branching, level+1); + start=end; + } + } + + + void computeAnyBitfieldSubClustering(KMeansNodePtr node, int* indices, int indices_length, + int branching, int level, CentersType** centers, + std::vector& radiuses, int* belongs_to, int* count) + { + // compute kmeans clustering for each of the resulting clusters + node->childs = pool_.allocate(branching); + int start = 0; + int end = start; + for (int c=0; c(), veclen_); + variance += static_cast( ensureSquareDistance(d) ); + mean_radius += ensureSimpleDistance(d); + std::swap(indices[i],indices[end]); + std::swap(belongs_to[i],belongs_to[end]); + end++; + } + } + mean_radius = static_cast( + 0.5f + static_cast(mean_radius) / static_cast(s)); + variance = static_cast( + 0.5 + static_cast(variance) / static_cast(s)); + variance -= static_cast( + ensureSquareDistance( + distance_(centers[c], ZeroIterator(), veclen_))); + + node->childs[c] = pool_.allocate(); + std::memset(node->childs[c], 0, sizeof(KMeansNode)); + node->childs[c]->radius = radiuses[c]; + node->childs[c]->pivot = centers[c]; + node->childs[c]->variance = static_cast(variance); + node->childs[c]->mean_radius = mean_radius; + computeClustering(node->childs[c],indices+start, end-start, branching, level+1); + start=end; + } + } + + + template + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const DistType* identifier) + { + (void)identifier; + refineClustering(indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + /** + * The methods responsible with doing the recursive hierarchical clustering on + * binary vectors. + * As some might have heard that KMeans on binary data doesn't make sense, + * it's worth a little explanation why it actually fairly works. As + * with the Hierarchical Clustering algortihm, we seed several centers for the + * current node by picking some of its points. Then in a first pass each point + * of the node is then related to its closest center. Now let's have a look at + * the 5 central dimensions of the 9 following points: + * + * xxxxxx11100xxxxx (1) + * xxxxxx11010xxxxx (2) + * xxxxxx11001xxxxx (3) + * xxxxxx10110xxxxx (4) + * xxxxxx10101xxxxx (5) + * xxxxxx10011xxxxx (6) + * xxxxxx01110xxxxx (7) + * xxxxxx01101xxxxx (8) + * xxxxxx01011xxxxx (9) + * sum _____ + * of 1: 66555 + * + * Even if the barycenter notion doesn't apply, we can set a center + * xxxxxx11111xxxxx that will better fit the five dimensions we are focusing + * on for these points. + * + * Note that convergence isn't ensured anymore. In practice, using Gonzales + * as seeding algorithm should be fine for getting convergence ("iterations" + * value can be set to -1). But with KMeans++ seeding you should definitely + * set a maximum number of iterations (but make it higher than the "iterations" + * default value of 11). + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * indices_length = number of points in the current node + * branching = the branching factor to use in the clustering + * level = 0 for the root node, it increases with the subdivision levels + * centers = clusters centers to compute + * radiuses = radiuses of clusters + * belongs_to = LookUp Table returning, for a given indice id, the center id it belongs to + * count = array storing the number of indices for a given center id + * identifier = dummy pointer on an instance of Distance (use to branch correctly among templates) + */ + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const cvflann::HammingLUT* identifier) + { + (void)identifier; + refineBitfieldClustering( + indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeAnyBitfieldSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const cvflann::Hamming* identifier) + { + (void)identifier; + refineBitfieldClustering( + indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeAnyBitfieldSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const cvflann::Hamming2* identifier) + { + (void)identifier; + refineBitfieldClustering( + indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeAnyBitfieldSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const cvflann::DNAmmingLUT* identifier) + { + (void)identifier; + refineDnaClustering( + indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeAnyBitfieldSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + void refineAndSplitClustering( + KMeansNodePtr node, int* indices, int indices_length, int branching, + int level, CentersType** centers, std::vector& radiuses, + int* belongs_to, int* count, const cvflann::DNAmming2* identifier) + { + (void)identifier; + refineDnaClustering( + indices, indices_length, branching, centers, radiuses, belongs_to, count); + + computeAnyBitfieldSubClustering(node, indices, indices_length, branching, + level, centers, radiuses, belongs_to, count); + } + + + /** + * The method responsible with actually doing the recursive hierarchical + * clustering + * + * Params: + * node = the node to cluster + * indices = indices of the points belonging to the current node + * branching = the branching factor to use in the clustering + * + * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point) + */ + void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level) + { + node->size = indices_length; + node->level = level; + + if (indices_length < branching) { + node->indices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + cv::AutoBuffer centers_idx_buf(branching); + int* centers_idx = centers_idx_buf.data(); + int centers_length; + (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length); + + if (centers_lengthindices = indices; + std::sort(node->indices,node->indices+indices_length); + node->childs = NULL; + return; + } + + + std::vector radiuses(branching); + cv::AutoBuffer count_buf(branching); + int* count = count_buf.data(); + for (int i=0; i belongs_to_buf(indices_length); + int* belongs_to = belongs_to_buf.data(); + for (int i=0; inew_sq_dist) { + belongs_to[i] = j; + sq_dist = new_sq_dist; + } + } + if (sq_dist>radiuses[belongs_to[i]]) { + radiuses[belongs_to[i]] = sq_dist; + } + count[belongs_to[i]]++; + } + + CentersType** centers = new CentersType*[branching]; + + Distance* dummy = NULL; + refineAndSplitClustering(node, indices, indices_length, branching, level, + centers, radiuses, belongs_to, count, dummy); + + delete[] centers; + } + + + /** + * Performs one descent in the hierarchical k-means tree. The branches not + * visited are stored in a priority queue. + * + * Params: + * node = node to explore + * result = container for the k-nearest neighbors found + * vec = query points + * checks = how many points in the dataset have been checked so far + * maxChecks = maximum dataset points to checks + */ + + + void findNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec, int& checks, int maxChecks, + Heap* heap) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + if (isSquareDistance()) + { + DistanceType val = bsq-rsq-wsq; + if ((val>0) && (val*val > 4*rsq*wsq)) + return; + } + else + { + if (bsq-rsq > wsq) + return; + } + } + + if (node->childs==NULL) { + if ((checks>=maxChecks) && result.full()) { + return; + } + checks += node->size; + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + DistanceType* domain_distances = new DistanceType[branching_]; + int closest_center = exploreNodeBranches(node, vec, domain_distances, heap); + delete[] domain_distances; + findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap); + } + } + + /** + * Helper function that computes the nearest childs of a node to a given query point. + * Params: + * node = the node + * q = the query point + * distances = array with the distances to each child node. + * Returns: + */ + int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap* heap) + { + + int best_index = 0; + domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_); + for (int i=1; ichilds[i]->pivot, veclen_); + if (domain_distances[i]childs[best_index]->pivot; + for (int i=0; i( + cb_index_*node->childs[i]->variance ); + + // float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q); + // if (domain_distances[i]insert(BranchSt(node->childs[i],domain_distances[i])); + } + } + + return best_index; + } + + + /** + * Function the performs exact nearest neighbor search by traversing the entire tree. + */ + void findExactNN(KMeansNodePtr node, ResultSet& result, const ElementType* vec) + { + // Ignore those clusters that are too far away + { + DistanceType bsq = distance_(vec, node->pivot, veclen_); + DistanceType rsq = node->radius; + DistanceType wsq = result.worstDist(); + + if (isSquareDistance()) + { + DistanceType val = bsq-rsq-wsq; + if ((val>0) && (val*val > 4*rsq*wsq)) + return; + } + else + { + if (bsq-rsq > wsq) + return; + } + } + + + if (node->childs==NULL) { + for (int i=0; isize; ++i) { + int index = node->indices[i]; + DistanceType dist = distance_(dataset_[index], vec, veclen_); + result.addPoint(dist, index); + } + } + else { + int* sort_indices = new int[branching_]; + + getCenterOrdering(node, vec, sort_indices); + + for (int i=0; ichilds[sort_indices[i]],result,vec); + } + + delete[] sort_indices; + } + } + + + /** + * Helper function. + * + * I computes the order in which to traverse the child nodes of a particular node. + */ + void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices) + { + DistanceType* domain_distances = new DistanceType[branching_]; + for (int i=0; ichilds[i]->pivot, veclen_); + + int j=0; + while (domain_distances[j]j; --k) { + domain_distances[k] = domain_distances[k-1]; + sort_indices[k] = sort_indices[k-1]; + } + domain_distances[j] = dist; + sort_indices[j] = i; + } + delete[] domain_distances; + } + + /** + * Method that computes the squared distance from the query point q + * from inside region with center c to the border between this + * region and the region with center p + */ + DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q) + { + DistanceType sum = 0; + DistanceType sum2 = 0; + + for (int i=0; ivariance*root->size; + + while (clusterCount::max)(); + int splitIndex = -1; + + for (int i=0; ichilds != NULL) { + + DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size; + + for (int j=0; jchilds[j]->variance*clusters[i]->childs[j]->size; + } + if (variance clusters_length) break; + + meanVariance = minVariance; + + // split node + KMeansNodePtr toSplit = clusters[splitIndex]; + clusters[splitIndex] = toSplit->childs[0]; + for (int i=1; ichilds[i]; + } + } + + varianceValue = meanVariance/root->size; + return clusterCount; + } + +private: + /** The branching factor used in the hierarchical k-means clustering */ + int branching_; + + /** Number of kmeans trees (default is one) */ + int trees_; + + /** Maximum number of iterations to use when performing k-means clustering */ + int iterations_; + + /** Algorithm for choosing the cluster centers */ + flann_centers_init_t centers_init_; + + /** + * Cluster border index. This is used in the tree search phase when determining + * the closest cluster to explore next. A zero value takes into account only + * the cluster centres, a value greater then zero also take into account the size + * of the cluster. + */ + float cb_index_; + + /** + * The dataset used by this index + */ + const Matrix dataset_; + + /** Index parameters */ + IndexParams index_params_; + + /** + * Number of features in the dataset. + */ + size_t size_; + + /** + * Length of each feature. + */ + size_t veclen_; + + /** + * The root node in the tree. + */ + KMeansNodePtr* root_; + + /** + * Array of indices to vectors in the dataset. + */ + int** indices_; + + /** + * The distance + */ + Distance distance_; + + /** + * Pooled memory allocator. + */ + PooledAllocator pool_; + + /** + * Memory occupied by the index. + */ + int memoryCounter_; +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_KMEANS_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/linear_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/linear_index.h new file mode 100644 index 00000000..6428c0d7 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/linear_index.h @@ -0,0 +1,135 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LINEAR_INDEX_H_ +#define OPENCV_FLANN_LINEAR_INDEX_H_ + +//! @cond IGNORED + +#include "nn_index.h" + +namespace cvflann +{ + +struct LinearIndexParams : public IndexParams +{ + LinearIndexParams() + { + (* this)["algorithm"] = FLANN_INDEX_LINEAR; + } +}; + +template +class LinearIndex : public NNIndex +{ +public: + + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + + LinearIndex(const Matrix& inputData, const IndexParams& params = LinearIndexParams(), + Distance d = Distance()) : + dataset_(inputData), index_params_(params), distance_(d) + { + } + + LinearIndex(const LinearIndex&); + LinearIndex& operator=(const LinearIndex&); + + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_LINEAR; + } + + + size_t size() const CV_OVERRIDE + { + return dataset_.rows; + } + + size_t veclen() const CV_OVERRIDE + { + return dataset_.cols; + } + + + int usedMemory() const CV_OVERRIDE + { + return 0; + } + + void buildIndex() CV_OVERRIDE + { + /* nothing to do here for linear search */ + } + + void saveIndex(FILE*) CV_OVERRIDE + { + /* nothing to do here for linear search */ + } + + + void loadIndex(FILE*) CV_OVERRIDE + { + /* nothing to do here for linear search */ + + index_params_["algorithm"] = getType(); + } + + void findNeighbors(ResultSet& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/) CV_OVERRIDE + { + ElementType* data = dataset_.data; + for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) { + DistanceType dist = distance_(data, vec, dataset_.cols); + resultSet.addPoint(dist, (int)i); + } + } + + IndexParams getParameters() const CV_OVERRIDE + { + return index_params_; + } + +private: + /** The dataset */ + const Matrix dataset_; + /** Index parameters */ + IndexParams index_params_; + /** Index distance */ + Distance distance_; + +}; + +} + +//! @endcond + +#endif // OPENCV_FLANN_LINEAR_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/logger.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/logger.h new file mode 100644 index 00000000..8911812a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/logger.h @@ -0,0 +1,139 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_LOGGER_H +#define OPENCV_FLANN_LOGGER_H + +//! @cond IGNORED + +#include +#include + +#include "defines.h" + + +namespace cvflann +{ + +class Logger +{ + Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {} + + ~Logger() + { + if ((stream!=NULL)&&(stream!=stdout)) { + fclose(stream); + } + } + + static Logger& instance() + { + static Logger logger; + return logger; + } + + void _setDestination(const char* name) + { + if (name==NULL) { + stream = stdout; + } + else { +#ifdef _MSC_VER + if (fopen_s(&stream, name, "w") != 0) + stream = NULL; +#else + stream = fopen(name,"w"); +#endif + if (stream == NULL) { + stream = stdout; + } + } + } + + int _log(int level, const char* fmt, va_list arglist) + { + if (level > logLevel ) return -1; + int ret = vfprintf(stream, fmt, arglist); + return ret; + } + +public: + /** + * Sets the logging level. All messages with lower priority will be ignored. + * @param level Logging level + */ + static void setLevel(int level) { instance().logLevel = level; } + + /** + * Sets the logging destination + * @param name Filename or NULL for console + */ + static void setDestination(const char* name) { instance()._setDestination(name); } + + /** + * Print log message + * @param level Log level + * @param fmt Message format + * @return + */ + static int log(int level, const char* fmt, ...) + { + va_list arglist; + va_start(arglist, fmt); + int ret = instance()._log(level,fmt,arglist); + va_end(arglist); + return ret; + } + +#define LOG_METHOD(NAME,LEVEL) \ + static int NAME(const char* fmt, ...) \ + { \ + va_list ap; \ + va_start(ap, fmt); \ + int ret = instance()._log(LEVEL, fmt, ap); \ + va_end(ap); \ + return ret; \ + } + + LOG_METHOD(fatal, FLANN_LOG_FATAL) + LOG_METHOD(error, FLANN_LOG_ERROR) + LOG_METHOD(warn, FLANN_LOG_WARN) + LOG_METHOD(info, FLANN_LOG_INFO) + +private: + FILE* stream; + int logLevel; +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_LOGGER_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_index.h new file mode 100644 index 00000000..9ff86b8b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_index.h @@ -0,0 +1,403 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_INDEX_H_ +#define OPENCV_FLANN_LSH_INDEX_H_ + +//! @cond IGNORED + +#include +#include +#include +#include + +#include "nn_index.h" +#include "matrix.h" +#include "result_set.h" +#include "heap.h" +#include "lsh_table.h" +#include "allocator.h" +#include "random.h" +#include "saving.h" + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4702) //disable unreachable code +#endif + +namespace cvflann +{ + +struct LshIndexParams : public IndexParams +{ + LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2) + { + (*this)["algorithm"] = FLANN_INDEX_LSH; + // The number of hash tables to use + (*this)["table_number"] = static_cast(table_number); + // The length of the key in the hash tables + (*this)["key_size"] = static_cast(key_size); + // Number of levels to use in multi-probe (0 for standard LSH) + (*this)["multi_probe_level"] = static_cast(multi_probe_level); + } +}; + +/** + * Locality-sensitive hashing index + * + * Contains the tables and other information for indexing a set of points + * for nearest-neighbor matching. + */ +template +class LshIndex : public NNIndex +{ +public: + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + + /** Constructor + * @param input_data dataset with the input features + * @param params parameters passed to the LSH algorithm + * @param d the distance used + */ + LshIndex(const Matrix& input_data, const IndexParams& params = LshIndexParams(), + Distance d = Distance()) : + dataset_(input_data), index_params_(params), distance_(d) + { + // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param + // in place of 'unsigned int' + table_number_ = get_param(index_params_,"table_number",12); + key_size_ = get_param(index_params_,"key_size",20); + multi_probe_level_ = get_param(index_params_,"multi_probe_level",2); + + feature_size_ = (unsigned)dataset_.cols; + fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_); + } + + + LshIndex(const LshIndex&); + LshIndex& operator=(const LshIndex&); + + /** + * Builds the index + */ + void buildIndex() CV_OVERRIDE + { + tables_.resize(table_number_); + for (int i = 0; i < table_number_; ++i) { + lsh::LshTable& table = tables_[i]; + table = lsh::LshTable(feature_size_, key_size_); + + // Add the features to the table + table.add(dataset_); + } + } + + flann_algorithm_t getType() const CV_OVERRIDE + { + return FLANN_INDEX_LSH; + } + + + void saveIndex(FILE* stream) CV_OVERRIDE + { + save_value(stream,table_number_); + save_value(stream,key_size_); + save_value(stream,multi_probe_level_); + save_value(stream, dataset_); + } + + void loadIndex(FILE* stream) CV_OVERRIDE + { + load_value(stream, table_number_); + load_value(stream, key_size_); + load_value(stream, multi_probe_level_); + load_value(stream, dataset_); + // Building the index is so fast we can afford not storing it + buildIndex(); + + index_params_["algorithm"] = getType(); + index_params_["table_number"] = table_number_; + index_params_["key_size"] = key_size_; + index_params_["multi_probe_level"] = multi_probe_level_; + } + + /** + * Returns size of index. + */ + size_t size() const CV_OVERRIDE + { + return dataset_.rows; + } + + /** + * Returns the length of an index feature. + */ + size_t veclen() const CV_OVERRIDE + { + return feature_size_; + } + + /** + * Computes the index memory usage + * Returns: memory used by the index + */ + int usedMemory() const CV_OVERRIDE + { + return (int)(dataset_.rows * sizeof(int)); + } + + + IndexParams getParameters() const CV_OVERRIDE + { + return index_params_; + } + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) CV_OVERRIDE + { + CV_Assert(queries.cols == veclen()); + CV_Assert(indices.rows >= queries.rows); + CV_Assert(dists.rows >= queries.rows); + CV_Assert(int(indices.cols) >= knn); + CV_Assert(int(dists.cols) >= knn); + + + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + std::fill_n(indices[i], knn, -1); + std::fill_n(dists[i], knn, std::numeric_limits::max()); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } + } + + + /** + * Find set of nearest neighbors to vec. Their indices are stored inside + * the result object. + * + * Params: + * result = the result object in which the indices of the nearest-neighbors are stored + * vec = the vector for which to search the nearest neighbors + * maxCheck = the maximum number of restarts (in a best-bin-first manner) + */ + void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& /*searchParams*/) CV_OVERRIDE + { + getNeighbors(vec, result); + } + +private: + /** Defines the comparator on score and index + */ + typedef std::pair ScoreIndexPair; + struct SortScoreIndexPairOnSecond + { + bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const + { + return left.second < right.second; + } + }; + + /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH + * @param key the key we build neighbors from + * @param lowest_index the lowest index of the bit set + * @param level the multi-probe level we are at + * @param xor_masks all the xor mask + */ + void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level, + std::vector& xor_masks) + { + xor_masks.push_back(key); + if (level == 0) return; + for (int index = lowest_index - 1; index >= 0; --index) { + // Create a new key + lsh::BucketKey new_key = key | (1 << index); + fill_xor_mask(new_key, index, level - 1, xor_masks); + } + } + + /** Performs the approximate nearest-neighbor search. + * @param vec the feature to analyze + * @param do_radius flag indicating if we check the radius too + * @param radius the radius if it is a radius search + * @param do_k flag indicating if we limit the number of nn + * @param k_nn the number of nearest neighbors + * @param checked_average used for debugging + */ + void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn, + float& /*checked_average*/) + { + static std::vector score_index_heap; + + if (do_k) { + unsigned int worst_score = std::numeric_limits::max(); + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + + if (hamming_distance < worst_score) { + // Insert the new element + score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + std::push_heap(score_index_heap.begin(), score_index_heap.end()); + + if (score_index_heap.size() > (unsigned int)k_nn) { + // Remove the highest distance value as we have too many elements + std::pop_heap(score_index_heap.begin(), score_index_heap.end()); + score_index_heap.pop_back(); + // Keep track of the worst score + worst_score = score_index_heap.front().first; + } + } + } + } + } + } + else { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey(sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols); + if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index)); + } + } + } + } + } + + /** Performs the approximate nearest-neighbor search. + * This is a slower version than the above as it uses the ResultSet + * @param vec the feature to analyze + */ + void getNeighbors(const ElementType* vec, ResultSet& result) + { + typename std::vector >::const_iterator table = tables_.begin(); + typename std::vector >::const_iterator table_end = tables_.end(); + for (; table != table_end; ++table) { + size_t key = table->getKey(vec); + std::vector::const_iterator xor_mask = xor_masks_.begin(); + std::vector::const_iterator xor_mask_end = xor_masks_.end(); + for (; xor_mask != xor_mask_end; ++xor_mask) { + size_t sub_key = key ^ (*xor_mask); + const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key); + if (bucket == 0) continue; + + // Go over each descriptor index + std::vector::const_iterator training_index = bucket->begin(); + std::vector::const_iterator last_training_index = bucket->end(); + DistanceType hamming_distance; + + // Process the rest of the candidates + for (; training_index < last_training_index; ++training_index) { + // Compute the Hamming distance + hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols); + result.addPoint(hamming_distance, *training_index); + } + } + } + } + + /** The different hash tables */ + std::vector > tables_; + + /** The data the LSH tables where built from */ + Matrix dataset_; + + /** The size of the features (as ElementType[]) */ + unsigned int feature_size_; + + IndexParams index_params_; + + /** table number */ + int table_number_; + /** key size */ + int key_size_; + /** How far should we look for neighbors in multi-probe LSH */ + int multi_probe_level_; + + /** The XOR masks to apply to a key to get the neighboring buckets */ + std::vector xor_masks_; + + Distance distance_; +}; +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +//! @endcond + +#endif //OPENCV_FLANN_LSH_INDEX_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_table.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_table.h new file mode 100644 index 00000000..a189562d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/lsh_table.h @@ -0,0 +1,525 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +/*********************************************************************** + * Author: Vincent Rabaud + *************************************************************************/ + +#ifndef OPENCV_FLANN_LSH_TABLE_H_ +#define OPENCV_FLANN_LSH_TABLE_H_ + +//! @cond IGNORED + +#include +#include +#include +#include +// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP +#ifdef __GXX_EXPERIMENTAL_CXX0X__ +# define USE_UNORDERED_MAP 1 +#else +# define USE_UNORDERED_MAP 0 +#endif +#if USE_UNORDERED_MAP +#include +#else +#include +#endif +#include +#include + +#include "dynamic_bitset.h" +#include "matrix.h" + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4702) //disable unreachable code +#endif + + +namespace cvflann +{ + +namespace lsh +{ + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** What is stored in an LSH bucket + */ +typedef uint32_t FeatureIndex; +/** The id from which we can get a bucket back in an LSH table + */ +typedef unsigned int BucketKey; + +/** A bucket in an LSH table + */ +typedef std::vector Bucket; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** POD for stats about an LSH table + */ +struct LshStats +{ + std::vector bucket_sizes_; + size_t n_buckets_; + size_t bucket_size_mean_; + size_t bucket_size_median_; + size_t bucket_size_min_; + size_t bucket_size_max_; + size_t bucket_size_std_dev; + /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin + */ + std::vector > size_histogram_; +}; + +/** Overload the << operator for LshStats + * @param out the streams + * @param stats the stats to display + * @return the streams + */ +inline std::ostream& operator <<(std::ostream& out, const LshStats& stats) +{ + int w = 20; + out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : " + << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w) + << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left) + << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : " + << std::setiosflags(std::ios::left) << stats.bucket_size_max_; + + // Display the histogram + out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : " + << std::setiosflags(std::ios::left); + for (std::vector >::const_iterator iterator = stats.size_histogram_.begin(), end = + stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ", "; + + return out; +} + + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Lsh hash table. As its key is a sub-feature, and as usually + * the size of it is pretty small, we keep it as a continuous memory array. + * The value is an index in the corpus of features (we keep it as an unsigned + * int for pure memory reasons, it could be a size_t) + */ +template +class LshTable +{ +public: + /** A container of all the feature indices. Optimized for space + */ +#if USE_UNORDERED_MAP + typedef std::unordered_map BucketsSpace; +#else + typedef std::map BucketsSpace; +#endif + + /** A container of all the feature indices. Optimized for speed + */ + typedef std::vector BucketsSpeed; + + /** Default constructor + */ + LshTable() + { + key_size_ = 0; + feature_size_ = 0; + speed_level_ = kArray; + } + + /** Default constructor + * Create the mask and allocate the memory + * @param feature_size is the size of the feature (considered as a ElementType[]) + * @param key_size is the number of bits that are turned on in the feature + */ + LshTable(unsigned int feature_size, unsigned int key_size) + { + feature_size_ = feature_size; + CV_UNUSED(key_size); + CV_Error(cv::Error::StsUnsupportedFormat, "LSH is not implemented for that type" ); + } + + /** Add a feature to the table + * @param value the value to store for that feature + * @param feature the feature itself + */ + void add(unsigned int value, const ElementType* feature) + { + // Add the value to the corresponding bucket + BucketKey key = (lsh::BucketKey)getKey(feature); + + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + buckets_speed_[key].push_back(value); + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + key_bitset_.set(key); + buckets_space_[key].push_back(value); + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + buckets_space_[key].push_back(value); + break; + } + } + } + + /** Add a set of features to the table + * @param dataset the values to store + */ + void add(Matrix dataset) + { +#if USE_UNORDERED_MAP + buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2); +#endif + // Add the features to the table + for (unsigned int i = 0; i < dataset.rows; ++i) add(i, dataset[i]); + // Now that the table is full, optimize it for speed/space + optimize(); + } + + /** Get a bucket given the key + * @param key + * @return + */ + inline const Bucket* getBucketFromKey(BucketKey key) const + { + // Generate other buckets + switch (speed_level_) { + case kArray: + // That means we get the buckets from an array + return &buckets_speed_[key]; + break; + case kBitsetHash: + // That means we can check the bitset for the presence of a key + if (key_bitset_.test(key)) return &buckets_space_.find(key)->second; + else return 0; + break; + case kHash: + { + // That means we have to check for the hash table for the presence of a key + BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end(); + bucket_it = buckets_space_.find(key); + // Stop here if that bucket does not exist + if (bucket_it == bucket_end) return 0; + else return &bucket_it->second; + break; + } + } + return 0; + } + + /** Compute the sub-signature of a feature + */ + size_t getKey(const ElementType* /*feature*/) const + { + CV_Error(cv::Error::StsUnsupportedFormat, "LSH is not implemented for that type" ); + return 0; + } + + /** Get statistics about the table + * @return + */ + LshStats getStats() const; + +private: + /** defines the speed fo the implementation + * kArray uses a vector for storing data + * kBitsetHash uses a hash map but checks for the validity of a key with a bitset + * kHash uses a hash map only + */ + enum SpeedLevel + { + kArray, kBitsetHash, kHash + }; + + /** Initialize some variables + */ + void initialize(size_t key_size) + { + const size_t key_size_lower_bound = 1; + //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t + const size_t key_size_upper_bound = (std::min)(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT); + if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound) + { + CV_Error(cv::Error::StsBadArg, cv::format("Invalid key_size (=%d). Valid values for your system are %d <= key_size < %d.", (int)key_size, (int)key_size_lower_bound, (int)key_size_upper_bound)); + } + + speed_level_ = kHash; + key_size_ = (unsigned)key_size; + } + + /** Optimize the table for speed/space + */ + void optimize() + { + // If we are already using the fast storage, no need to do anything + if (speed_level_ == kArray) return; + + // Use an array if it will be more than half full + if (buckets_space_.size() > ((size_t(1) << key_size_) / 2)) { + speed_level_ = kArray; + // Fill the array version of it + buckets_speed_.resize(size_t(1) << key_size_); + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second; + + // Empty the hash table + buckets_space_.clear(); + return; + } + + // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two + // for the vector) or less than 512MB (key_size_ <= 30) + if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10 + >= (size_t(1) << key_size_)) || (key_size_ <= 32)) { + speed_level_ = kBitsetHash; + key_bitset_.resize(size_t(1) << key_size_); + key_bitset_.reset(); + // Try with the BucketsSpace + for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first); + } + else { + speed_level_ = kHash; + key_bitset_.clear(); + } + } + + /** The vector of all the buckets if they are held for speed + */ + BucketsSpeed buckets_speed_; + + /** The hash table of all the buckets in case we cannot use the speed version + */ + BucketsSpace buckets_space_; + + /** What is used to store the data */ + SpeedLevel speed_level_; + + /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset + * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset) + */ + DynamicBitset key_bitset_; + + /** The size of the sub-signature in bits + */ + unsigned int key_size_; + + unsigned int feature_size_; + + // Members only used for the unsigned char specialization + /** The mask to apply to a feature to get the hash key + * Only used in the unsigned char case + */ + std::vector mask_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Specialization for unsigned char + +template<> +inline LshTable::LshTable(unsigned int feature_size, unsigned int subsignature_size) +{ + feature_size_ = feature_size; + initialize(subsignature_size); + // Allocate the mask + mask_ = std::vector((feature_size * sizeof(char) + sizeof(size_t) - 1) / sizeof(size_t), 0); + + // A bit brutal but fast to code + std::vector indices(feature_size * CHAR_BIT); + for (size_t i = 0; i < feature_size * CHAR_BIT; ++i) indices[i] = (int)i; +#ifndef OPENCV_FLANN_USE_STD_RAND + cv::randShuffle(indices); +#else + std::random_shuffle(indices.begin(), indices.end()); +#endif + + // Generate a random set of order of subsignature_size_ bits + for (unsigned int i = 0; i < key_size_; ++i) { + size_t index = indices[i]; + + // Set that bit in the mask + size_t divisor = CHAR_BIT * sizeof(size_t); + size_t idx = index / divisor; //pick the right size_t index + mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset + } + + // Set to 1 if you want to display the mask for debug +#if 0 + { + size_t bcount = 0; + BOOST_FOREACH(size_t mask_block, mask_){ + out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block + << std::endl; + bcount += __builtin_popcountll(mask_block); + } + out << "bit count : " << std::dec << bcount << std::endl; + out << "mask size : " << mask_.size() << std::endl; + return out; + } +#endif +} + +/** Return the Subsignature of a feature + * @param feature the feature to analyze + */ +template<> +inline size_t LshTable::getKey(const unsigned char* feature) const +{ + // no need to check if T is dividable by sizeof(size_t) like in the Hamming + // distance computation as we have a mask + // FIXIT: This is bad assumption, because we reading tail bytes after of the allocated features buffer + const size_t* feature_block_ptr = reinterpret_cast ((const void*)feature); + + // Figure out the subsignature of the feature + // Given the feature ABCDEF, and the mask 001011, the output will be + // 000CEF + size_t subsignature = 0; + size_t bit_index = 1; + + for (unsigned i = 0; i < feature_size_; i += sizeof(size_t)) { + // get the mask and signature blocks + size_t feature_block; + if (i <= feature_size_ - sizeof(size_t)) + { + feature_block = *feature_block_ptr; + } + else + { + size_t tmp = 0; + memcpy(&tmp, feature_block_ptr, feature_size_ - i); // preserve bytes order + feature_block = tmp; + } + size_t mask_block = mask_[i / sizeof(size_t)]; + while (mask_block) { + // Get the lowest set bit in the mask block + size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block); + // Add it to the current subsignature if necessary + subsignature += (feature_block & lowest_bit) ? bit_index : 0; + // Reset the bit in the mask block + mask_block ^= lowest_bit; + // increment the bit index for the subsignature + bit_index <<= 1; + } + // Check the next feature block + ++feature_block_ptr; + } + return subsignature; +} + +template<> +inline LshStats LshTable::getStats() const +{ + LshStats stats; + stats.bucket_size_mean_ = 0; + if ((buckets_speed_.empty()) && (buckets_space_.empty())) { + stats.n_buckets_ = 0; + stats.bucket_size_median_ = 0; + stats.bucket_size_min_ = 0; + stats.bucket_size_max_ = 0; + return stats; + } + + if (!buckets_speed_.empty()) { + for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size()); + stats.bucket_size_mean_ += pbucket->size(); + } + stats.bucket_size_mean_ /= buckets_speed_.size(); + stats.n_buckets_ = buckets_speed_.size(); + } + else { + for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) { + stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size()); + stats.bucket_size_mean_ += x->second.size(); + } + stats.bucket_size_mean_ /= buckets_space_.size(); + stats.n_buckets_ = buckets_space_.size(); + } + + std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end()); + + // BOOST_FOREACH(int size, stats.bucket_sizes_) + // std::cout << size << " "; + // std::cout << std::endl; + stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2]; + stats.bucket_size_min_ = stats.bucket_sizes_.front(); + stats.bucket_size_max_ = stats.bucket_sizes_.back(); + + // TODO compute mean and std + /*float mean, stddev; + stats.bucket_size_mean_ = mean; + stats.bucket_size_std_dev = stddev;*/ + + // Include a histogram of the buckets + unsigned int bin_start = 0; + unsigned int bin_end = 20; + bool is_new_bin = true; + for (std::vector::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator + != end; ) + if (*iterator < bin_end) { + if (is_new_bin) { + stats.size_histogram_.push_back(std::vector(3, 0)); + stats.size_histogram_.back()[0] = bin_start; + stats.size_histogram_.back()[1] = bin_end - 1; + is_new_bin = false; + } + ++stats.size_histogram_.back()[2]; + ++iterator; + } + else { + bin_start += 20; + bin_end += 20; + is_new_bin = true; + } + + return stats; +} + +// End the two namespaces +} +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +//! @endcond + +#endif /* OPENCV_FLANN_LSH_TABLE_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/matrix.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/matrix.h new file mode 100644 index 00000000..fb871bd7 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/matrix.h @@ -0,0 +1,118 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_DATASET_H_ +#define OPENCV_FLANN_DATASET_H_ + +//! @cond IGNORED + +#include + +namespace cvflann +{ + +/** + * Class that implements a simple rectangular matrix stored in a memory buffer and + * provides convenient matrix-like access using the [] operators. + */ +template +class Matrix +{ +public: + typedef T type; + + size_t rows; + size_t cols; + size_t stride; + T* data; + + Matrix() : rows(0), cols(0), stride(0), data(NULL) + { + } + + Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) : + rows(rows_), cols(cols_), stride(stride_), data(data_) + { + if (stride==0) stride = cols; + } + + /** + * Convenience function for deallocating the storage data. + */ + CV_DEPRECATED void free() + { + fprintf(stderr, "The cvflann::Matrix::free() method is deprecated " + "and it does not do any memory deallocation any more. You are" + "responsible for deallocating the matrix memory (by doing" + "'delete[] matrix.data' for example)"); + } + + /** + * Operator that return a (pointer to a) row of the data. + */ + T* operator[](size_t index) const + { + return data+index*stride; + } +}; + + +class UntypedMatrix +{ +public: + size_t rows; + size_t cols; + void* data; + flann_datatype_t type; + + UntypedMatrix(void* data_, long rows_, long cols_) : + rows(rows_), cols(cols_), data(data_) + { + } + + ~UntypedMatrix() + { + } + + + template + Matrix as() + { + return Matrix((T*)data, rows, cols); + } +}; + + + +} + +//! @endcond + +#endif //OPENCV_FLANN_DATASET_H_ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/miniflann.hpp b/third_party/opencv/uos/sw_64/include/opencv2/flann/miniflann.hpp new file mode 100644 index 00000000..1deeeb93 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/miniflann.hpp @@ -0,0 +1,170 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_MINIFLANN_HPP +#define OPENCV_MINIFLANN_HPP + +//! @cond IGNORED + +#include "opencv2/core.hpp" +#include "opencv2/flann/defines.h" + +namespace cv +{ + +namespace flann +{ + +struct CV_EXPORTS IndexParams +{ + IndexParams(); + ~IndexParams(); + + String getString(const String& key, const String& defaultVal=String()) const; + int getInt(const String& key, int defaultVal=-1) const; + double getDouble(const String& key, double defaultVal=-1) const; + + void setString(const String& key, const String& value); + void setInt(const String& key, int value); + void setDouble(const String& key, double value); + void setFloat(const String& key, float value); + void setBool(const String& key, bool value); + void setAlgorithm(int value); + + void getAll(std::vector& names, + std::vector& types, + std::vector& strValues, + std::vector& numValues) const; + + void* params; + +private: + IndexParams(const IndexParams &); // copy disabled + IndexParams& operator=(const IndexParams &); // assign disabled +}; + +struct CV_EXPORTS KDTreeIndexParams : public IndexParams +{ + KDTreeIndexParams(int trees=4); +}; + +struct CV_EXPORTS LinearIndexParams : public IndexParams +{ + LinearIndexParams(); +}; + +struct CV_EXPORTS CompositeIndexParams : public IndexParams +{ + CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); +}; + +struct CV_EXPORTS AutotunedIndexParams : public IndexParams +{ + AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f, + float memory_weight = 0, float sample_fraction = 0.1f); +}; + +struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams +{ + HierarchicalClusteringIndexParams(int branching = 32, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 ); +}; + +struct CV_EXPORTS KMeansIndexParams : public IndexParams +{ + KMeansIndexParams(int branching = 32, int iterations = 11, + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); +}; + +struct CV_EXPORTS LshIndexParams : public IndexParams +{ + LshIndexParams(int table_number, int key_size, int multi_probe_level); +}; + +struct CV_EXPORTS SavedIndexParams : public IndexParams +{ + SavedIndexParams(const String& filename); +}; + +struct CV_EXPORTS SearchParams : public IndexParams +{ + SearchParams( int checks = 32, float eps = 0, bool sorted = true ); +}; + +class CV_EXPORTS Index +{ +public: + Index(); + + /// @note 'features' must have extended lifetime (as cv::Mat) than this #Index instance (to avoid dangling pointers) + Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + virtual ~Index(); + + /// @note 'features' must have extended lifetime (as cv::Mat) than this #Index instance (to avoid dangling pointers) + virtual void build(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2); + + virtual void knnSearch(InputArray query, OutputArray indices, + OutputArray dists, int knn, const SearchParams& params=SearchParams()); + + virtual int radiusSearch(InputArray query, OutputArray indices, + OutputArray dists, double radius, int maxResults, + const SearchParams& params=SearchParams()); + + virtual void save(const String& filename) const; + virtual bool load(InputArray features, const String& filename); + virtual void release(); + cvflann::flann_distance_t getDistance() const; + cvflann::flann_algorithm_t getAlgorithm() const; + +protected: + cvflann::flann_distance_t distType; + cvflann::flann_algorithm_t algo; + int featureType; + void* index; +}; + +} } // namespace cv::flann + +//! @endcond + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/nn_index.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/nn_index.h new file mode 100644 index 00000000..f6e17d19 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/nn_index.h @@ -0,0 +1,180 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_NNINDEX_H +#define OPENCV_FLANN_NNINDEX_H + +#include "matrix.h" +#include "result_set.h" +#include "params.h" + +//! @cond IGNORED + +namespace cvflann +{ + +/** + * Nearest-neighbour index base class + */ +template +class NNIndex +{ + typedef typename Distance::ElementType ElementType; + typedef typename Distance::ResultType DistanceType; + +public: + + virtual ~NNIndex() {} + + /** + * \brief Builds the index + */ + virtual void buildIndex() = 0; + + /** + * \brief Perform k-nearest neighbor search + * \param[in] queries The query points for which to find the nearest neighbors + * \param[out] indices The indices of the nearest neighbors found + * \param[out] dists Distances to the nearest neighbors found + * \param[in] knn Number of nearest neighbors to return + * \param[in] params Search parameters + */ + virtual void knnSearch(const Matrix& queries, Matrix& indices, Matrix& dists, int knn, const SearchParams& params) + { + CV_Assert(queries.cols == veclen()); + CV_Assert(indices.rows >= queries.rows); + CV_Assert(dists.rows >= queries.rows); + CV_Assert(int(indices.cols) >= knn); + CV_Assert(int(dists.cols) >= knn); + +#if 0 + KNNResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.init(indices[i], dists[i]); + findNeighbors(resultSet, queries[i], params); + } +#else + KNNUniqueResultSet resultSet(knn); + for (size_t i = 0; i < queries.rows; i++) { + resultSet.clear(); + findNeighbors(resultSet, queries[i], params); + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn); + else resultSet.copy(indices[i], dists[i], knn); + } +#endif + } + + /** + * \brief Perform radius search + * \param[in] query The query point + * \param[out] indices The indinces of the neighbors found within the given radius + * \param[out] dists The distances to the nearest neighbors found + * \param[in] radius The radius used for search + * \param[in] params Search parameters + * \returns Number of neighbors found + */ + virtual int radiusSearch(const Matrix& query, Matrix& indices, Matrix& dists, float radius, const SearchParams& params) + { + if (query.rows != 1) { + fprintf(stderr, "I can only search one feature at a time for range search\n"); + return -1; + } + assert(query.cols == veclen()); + assert(indices.cols == dists.cols); + + int n = 0; + int* indices_ptr = NULL; + DistanceType* dists_ptr = NULL; + if (indices.cols > 0) { + n = (int)indices.cols; + indices_ptr = indices[0]; + dists_ptr = dists[0]; + } + + RadiusUniqueResultSet resultSet((DistanceType)radius); + resultSet.clear(); + findNeighbors(resultSet, query[0], params); + if (n>0) { + if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n); + else resultSet.copy(indices_ptr, dists_ptr, n); + } + + return (int)resultSet.size(); + } + + /** + * \brief Saves the index to a stream + * \param stream The stream to save the index to + */ + virtual void saveIndex(FILE* stream) = 0; + + /** + * \brief Loads the index from a stream + * \param stream The stream from which the index is loaded + */ + virtual void loadIndex(FILE* stream) = 0; + + /** + * \returns number of features in this index. + */ + virtual size_t size() const = 0; + + /** + * \returns The dimensionality of the features in this index. + */ + virtual size_t veclen() const = 0; + + /** + * \returns The amount of memory (in bytes) used by the index. + */ + virtual int usedMemory() const = 0; + + /** + * \returns The index type (kdtree, kmeans,...) + */ + virtual flann_algorithm_t getType() const = 0; + + /** + * \returns The index parameters + */ + virtual IndexParams getParameters() const = 0; + + + /** + * \brief Method that searches for nearest-neighbours + */ + virtual void findNeighbors(ResultSet& result, const ElementType* vec, const SearchParams& searchParams) = 0; +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_NNINDEX_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/object_factory.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/object_factory.h new file mode 100644 index 00000000..5cc45ad1 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/object_factory.h @@ -0,0 +1,95 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_ +#define OPENCV_FLANN_OBJECT_FACTORY_H_ + +//! @cond IGNORED + +#include + +namespace cvflann +{ + +class CreatorNotFound +{ +}; + +template +class ObjectFactory +{ + typedef ObjectFactory ThisClass; + typedef std::map ObjectRegistry; + + // singleton class, private constructor + ObjectFactory() {} + +public: + + bool subscribe(UniqueIdType id, ObjectCreator creator) + { + if (object_registry.find(id) != object_registry.end()) return false; + + object_registry[id] = creator; + return true; + } + + bool unregister(UniqueIdType id) + { + return object_registry.erase(id) == 1; + } + + ObjectCreator create(UniqueIdType id) + { + typename ObjectRegistry::const_iterator iter = object_registry.find(id); + + if (iter == object_registry.end()) { + throw CreatorNotFound(); + } + + return iter->second; + } + + static ThisClass& instance() + { + static ThisClass the_factory; + return the_factory; + } +private: + ObjectRegistry object_registry; +}; + +} + +//! @endcond + +#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/params.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/params.h new file mode 100644 index 00000000..a7920d0b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/params.h @@ -0,0 +1,102 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_PARAMS_H_ +#define OPENCV_FLANN_PARAMS_H_ + +//! @cond IGNORED + +#include "any.h" +#include "general.h" +#include +#include + + +namespace cvflann +{ + +typedef std::map IndexParams; + +struct SearchParams : public IndexParams +{ + SearchParams(int checks = 32, float eps = 0, bool sorted = true ) + { + // how many leafs to visit when searching for neighbours (-1 for unlimited) + (*this)["checks"] = checks; + // search for eps-approximate neighbours (default: 0) + (*this)["eps"] = eps; + // only for radius search, require neighbours sorted by distance (default: true) + (*this)["sorted"] = sorted; + } +}; + + +template +T get_param(const IndexParams& params, cv::String name, const T& default_value) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + return default_value; + } +} + +template +T get_param(const IndexParams& params, cv::String name) +{ + IndexParams::const_iterator it = params.find(name); + if (it != params.end()) { + return it->second.cast(); + } + else { + FLANN_THROW(cv::Error::StsBadArg, cv::String("Missing parameter '")+name+cv::String("' in the parameters given")); + } +} + +inline void print_params(const IndexParams& params, std::ostream& stream) +{ + IndexParams::const_iterator it; + + for(it=params.begin(); it!=params.end(); ++it) { + stream << it->first << " : " << it->second << std::endl; + } +} + +inline void print_params(const IndexParams& params) +{ + print_params(params, std::cout); +} + +} + +//! @endcond + +#endif /* OPENCV_FLANN_PARAMS_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/random.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/random.h new file mode 100644 index 00000000..2c1809c3 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/random.h @@ -0,0 +1,157 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RANDOM_H +#define OPENCV_FLANN_RANDOM_H + +//! @cond IGNORED + +#include +#include +#include + +namespace cvflann +{ + +inline int rand() +{ +#ifndef OPENCV_FLANN_USE_STD_RAND +# if INT_MAX == RAND_MAX + int v = cv::theRNG().next() & INT_MAX; +# else + int v = cv::theRNG().uniform(0, RAND_MAX + 1); +# endif +#else + int v = std::rand(); +#endif // OPENCV_FLANN_USE_STD_RAND + return v; +} + +/** + * Seeds the random number generator + * @param seed Random seed + */ +inline void seed_random(unsigned int seed) +{ +#ifndef OPENCV_FLANN_USE_STD_RAND + cv::theRNG() = cv::RNG(seed); +#else + std::srand(seed); +#endif +} + +/* + * Generates a random double value. + */ +/** + * Generates a random double value. + * @param high Upper limit + * @param low Lower limit + * @return Random double value + */ +inline double rand_double(double high = 1.0, double low = 0) +{ + return low + ((high-low) * (rand() / (RAND_MAX + 1.0))); +} + +/** + * Generates a random integer value. + * @param high Upper limit + * @param low Lower limit + * @return Random integer value + */ +inline int rand_int(int high = RAND_MAX, int low = 0) +{ + return low + (int) ( double(high-low) * (rand() / (RAND_MAX + 1.0))); +} + +/** + * Random number generator that returns a distinct number from + * the [0,n) interval each time. + */ +class UniqueRandom +{ + std::vector vals_; + int size_; + int counter_; + +public: + /** + * Constructor. + * @param n Size of the interval from which to generate + * @return + */ + UniqueRandom(int n) + { + init(n); + } + + /** + * Initializes the number generator. + * @param n the size of the interval from which to generate random numbers. + */ + void init(int n) + { + // create and initialize an array of size n + vals_.resize(n); + size_ = n; + for (int i = 0; i < size_; ++i) vals_[i] = i; + + // shuffle the elements in the array +#ifndef OPENCV_FLANN_USE_STD_RAND + cv::randShuffle(vals_); +#else + std::random_shuffle(vals_.begin(), vals_.end()); +#endif + + counter_ = 0; + } + + /** + * Return a distinct random integer in greater or equal to 0 and less + * than 'n' on each call. It should be called maximum 'n' times. + * Returns: a random integer + */ + int next() + { + if (counter_ == size_) { + return -1; + } + else { + return vals_[counter_++]; + } + } +}; + +} + +//! @endcond + +#endif //OPENCV_FLANN_RANDOM_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/result_set.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/result_set.h new file mode 100644 index 00000000..47ad2311 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/result_set.h @@ -0,0 +1,548 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_RESULTSET_H +#define OPENCV_FLANN_RESULTSET_H + +//! @cond IGNORED + +#include +#include +#include +#include +#include +#include + +namespace cvflann +{ + +/* This record represents a branch point when finding neighbors in + the tree. It contains a record of the minimum distance to the query + point, as well as the node at which the search resumes. + */ + +template +struct BranchStruct +{ + T node; /* Tree node at which search resumes */ + DistanceType mindist; /* Minimum distance to query for all nodes below. */ + + BranchStruct() {} + BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {} + + bool operator<(const BranchStruct& rhs) const + { + return mindist +class ResultSet +{ +public: + virtual ~ResultSet() {} + + virtual bool full() const = 0; + + virtual void addPoint(DistanceType dist, int index) = 0; + + virtual DistanceType worstDist() const = 0; + +}; + +/** + * KNNSimpleResultSet does not ensure that the element it holds are unique. + * Is used in those cases where the nearest neighbour algorithm used does not + * attempt to insert the same element multiple times. + */ +template +class KNNSimpleResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const CV_OVERRIDE + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) CV_OVERRIDE + { + if (dist >= worst_distance_) return; + int i; + for (i=count; i>0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) ) +#else + if (dists[i-1]>dist) +#endif + { + if (i +class KNNResultSet : public ResultSet +{ + int* indices; + DistanceType* dists; + int capacity; + int count; + DistanceType worst_distance_; + +public: + KNNResultSet(int capacity_) + : indices(NULL), dists(NULL), capacity(capacity_), count(0), worst_distance_(0) + { + } + + void init(int* indices_, DistanceType* dists_) + { + indices = indices_; + dists = dists_; + count = 0; + worst_distance_ = (std::numeric_limits::max)(); + dists[capacity-1] = worst_distance_; + } + + size_t size() const + { + return count; + } + + bool full() const CV_OVERRIDE + { + return count == capacity; + } + + + void addPoint(DistanceType dist, int index) CV_OVERRIDE + { + CV_DbgAssert(indices); + CV_DbgAssert(dists); + if (dist >= worst_distance_) return; + int i; + for (i = count; i > 0; --i) { +#ifdef FLANN_FIRST_MATCH + if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) ) +#else + if (dists[i-1]<=dist) +#endif + { + // Check for duplicate indices + for (int j = i; dists[j] == dist && j--;) { + if (indices[j] == index) { + return; + } + } + break; + } + } + + if (count < capacity) ++count; + for (int j = count-1; j > i; --j) { + dists[j] = dists[j-1]; + indices[j] = indices[j-1]; + } + dists[i] = dist; + indices[i] = index; + worst_distance_ = dists[capacity-1]; + } + + DistanceType worstDist() const CV_OVERRIDE + { + return worst_distance_; + } +}; + + +/** + * A result-set class used when performing a radius based search. + */ +template +class RadiusResultSet : public ResultSet +{ + DistanceType radius; + int* indices; + DistanceType* dists; + size_t capacity; + size_t count; + +public: + RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) : + radius(radius_), indices(indices_), dists(dists_), capacity(capacity_) + { + init(); + } + + ~RadiusResultSet() + { + } + + void init() + { + count = 0; + } + + size_t size() const + { + return count; + } + + bool full() const + { + return true; + } + + void addPoint(DistanceType dist, int index) + { + if (dist0)&&(count < capacity)) { + dists[count] = dist; + indices[count] = index; + } + count++; + } + } + + DistanceType worstDist() const + { + return radius; + } + +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class UniqueResultSet : public ResultSet +{ +public: + struct DistIndex + { + DistIndex(DistanceType dist, unsigned int index) : + dist_(dist), index_(index) + { + } + bool operator<(const DistIndex dist_index) const + { + return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_); + } + DistanceType dist_; + unsigned int index_; + }; + + /** Default constructor */ + UniqueResultSet() : + is_full_(false), worst_distance_(std::numeric_limits::max()) + { + } + + /** Check the status of the set + * @return true if we have k NN + */ + inline bool full() const CV_OVERRIDE + { + return is_full_; + } + + /** Remove all elements in the set + */ + virtual void clear() = 0; + + /** Copy the set to two C arrays + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + if (n_neighbors < 0) { + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + else { + int i = 0; + for (typename std::set::const_iterator dist_index = dist_indices_.begin(), dist_index_end = + dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) { + *indices = dist_index->index_; + *dist = dist_index->dist_; + } + } + } + + /** Copy the set to two C arrays but sort it according to the distance first + * @param indices pointer to a C array of indices + * @param dist pointer to a C array of distances + * @param n_neighbors the number of neighbors to copy + */ + virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const + { + copy(indices, dist, n_neighbors); + } + + /** The number of neighbors in the set + * @return + */ + size_t size() const + { + return dist_indices_.size(); + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const CV_OVERRIDE + { + return worst_distance_; + } +protected: + /** Flag to say if the set is full */ + bool is_full_; + + /** The worst distance found so far */ + DistanceType worst_distance_; + + /** The best candidates so far */ + std::set dist_indices_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors + * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays + */ +template +class KNNUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + */ + KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity) + { + this->is_full_ = false; + this->clear(); + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + inline void addPoint(DistanceType dist, int index) CV_OVERRIDE + { + // Don't do anything if we are worse than the worst + if (dist >= worst_distance_) return; + dist_indices_.insert(DistIndex(dist, index)); + + if (is_full_) { + if (dist_indices_.size() > capacity_) { + dist_indices_.erase(*dist_indices_.rbegin()); + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + else if (dist_indices_.size() == capacity_) { + is_full_ = true; + worst_distance_ = dist_indices_.rbegin()->dist_; + } + } + + /** Remove all elements in the set + */ + void clear() CV_OVERRIDE + { + dist_indices_.clear(); + worst_distance_ = std::numeric_limits::max(); + is_full_ = false; + } + +protected: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::is_full_; + using UniqueResultSet::worst_distance_; + using UniqueResultSet::dist_indices_; + + /** The number of neighbors to keep */ + unsigned int capacity_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the radius nearest neighbors + * It is more accurate than RadiusResult as it is not limited in the number of neighbors + */ +template +class RadiusUniqueResultSet : public UniqueResultSet +{ +public: + /** Constructor + * @param radius the maximum distance of a neighbor + */ + RadiusUniqueResultSet(DistanceType radius) : + radius_(radius) + { + is_full_ = true; + } + + /** Add a possible candidate to the best neighbors + * @param dist distance for that neighbor + * @param index index of that neighbor + */ + void addPoint(DistanceType dist, int index) CV_OVERRIDE + { + if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index)); + } + + /** Remove all elements in the set + */ + inline void clear() CV_OVERRIDE + { + dist_indices_.clear(); + } + + + /** Check the status of the set + * @return alwys false + */ + inline bool full() const CV_OVERRIDE + { + return true; + } + + /** The distance of the furthest neighbor + * If we don't have enough neighbors, it returns the max possible value + * @return + */ + inline DistanceType worstDist() const CV_OVERRIDE + { + return radius_; + } +private: + typedef typename UniqueResultSet::DistIndex DistIndex; + using UniqueResultSet::dist_indices_; + using UniqueResultSet::is_full_; + + /** The furthest distance a neighbor can be */ + DistanceType radius_; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +/** Class that holds the k NN neighbors within a radius distance + */ +template +class KNNRadiusUniqueResultSet : public KNNUniqueResultSet +{ +public: + /** Constructor + * @param capacity the number of neighbors to store at max + * @param radius the maximum distance of a neighbor + */ + KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius) + { + this->capacity_ = capacity; + this->radius_ = radius; + this->dist_indices_.reserve(capacity_); + this->clear(); + } + + /** Remove all elements in the set + */ + void clear() + { + dist_indices_.clear(); + worst_distance_ = radius_; + is_full_ = false; + } +private: + using KNNUniqueResultSet::dist_indices_; + using KNNUniqueResultSet::is_full_; + using KNNUniqueResultSet::worst_distance_; + + /** The maximum number of neighbors to consider */ + unsigned int capacity_; + + /** The maximum distance of a neighbor */ + DistanceType radius_; +}; +} + +//! @endcond + +#endif //OPENCV_FLANN_RESULTSET_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/sampling.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/sampling.h new file mode 100644 index 00000000..4e452b9b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/sampling.h @@ -0,0 +1,84 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + + +#ifndef OPENCV_FLANN_SAMPLING_H_ +#define OPENCV_FLANN_SAMPLING_H_ + +//! @cond IGNORED + +#include "matrix.h" +#include "random.h" + +namespace cvflann +{ + +template +Matrix random_sample(Matrix& srcMatrix, long size, bool remove = false) +{ + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (long i=0; i +Matrix random_sample(const Matrix& srcMatrix, size_t size) +{ + UniqueRandom rand((int)srcMatrix.rows); + Matrix newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols); + + T* src,* dest; + for (size_t i=0; i +#include + +#include "general.h" +#include "nn_index.h" + +#ifdef FLANN_SIGNATURE_ +#undef FLANN_SIGNATURE_ +#endif +#define FLANN_SIGNATURE_ "FLANN_INDEX" + +namespace cvflann +{ + +template +struct Datatype {}; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_INT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT8; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT16; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_UINT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT32; } }; +template<> +struct Datatype { static flann_datatype_t type() { return FLANN_FLOAT64; } }; + + +/** + * Structure representing the index header. + */ +struct IndexHeader +{ + char signature[16]; + char version[16]; + flann_datatype_t data_type; + flann_algorithm_t index_type; + size_t rows; + size_t cols; +}; + +/** + * Saves index header to stream + * + * @param stream - Stream to save to + * @param index - The index to save + */ +template +void save_header(FILE* stream, const NNIndex& index) +{ + IndexHeader header; + memset(header.signature, 0, sizeof(header.signature)); + strcpy(header.signature, FLANN_SIGNATURE_); + memset(header.version, 0, sizeof(header.version)); + strcpy(header.version, FLANN_VERSION_); + header.data_type = Datatype::type(); + header.index_type = index.getType(); + header.rows = index.size(); + header.cols = index.veclen(); + + std::fwrite(&header, sizeof(header),1,stream); +} + + +/** + * + * @param stream - Stream to load from + * @return Index header + */ +inline IndexHeader load_header(FILE* stream) +{ + IndexHeader header; + size_t read_size = fread(&header,sizeof(header),1,stream); + + if (read_size!=(size_t)1) { + FLANN_THROW(cv::Error::StsError, "Invalid index file, cannot read"); + } + + if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { + FLANN_THROW(cv::Error::StsError, "Invalid index file, wrong signature"); + } + + return header; + +} + + +template +void save_value(FILE* stream, const T& value, size_t count = 1) +{ + fwrite(&value, sizeof(value),count, stream); +} + +template +void save_value(FILE* stream, const cvflann::Matrix& value) +{ + fwrite(&value, sizeof(value),1, stream); + fwrite(value.data, sizeof(T),value.rows*value.cols, stream); +} + +template +void save_value(FILE* stream, const std::vector& value) +{ + size_t size = value.size(); + fwrite(&size, sizeof(size_t), 1, stream); + fwrite(&value[0], sizeof(T), size, stream); +} + +template +void load_value(FILE* stream, T& value, size_t count = 1) +{ + size_t read_cnt = fread(&value, sizeof(value), count, stream); + if (read_cnt != count) { + FLANN_THROW(cv::Error::StsParseError, "Cannot read from file"); + } +} + +template +void load_value(FILE* stream, cvflann::Matrix& value) +{ + size_t read_cnt = fread(&value, sizeof(value), 1, stream); + if (read_cnt != 1) { + FLANN_THROW(cv::Error::StsParseError, "Cannot read from file"); + } + value.data = new T[value.rows*value.cols]; + read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream); + if (read_cnt != (size_t)(value.rows*value.cols)) { + FLANN_THROW(cv::Error::StsParseError, "Cannot read from file"); + } +} + + +template +void load_value(FILE* stream, std::vector& value) +{ + size_t size; + size_t read_cnt = fread(&size, sizeof(size_t), 1, stream); + if (read_cnt!=1) { + FLANN_THROW(cv::Error::StsError, "Cannot read from file"); + } + value.resize(size); + read_cnt = fread(&value[0], sizeof(T), size, stream); + if (read_cnt != size) { + FLANN_THROW(cv::Error::StsError, "Cannot read from file"); + } +} + +} + +//! @endcond + +#endif /* OPENCV_FLANN_SAVING_H_ */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/flann/simplex_downhill.h b/third_party/opencv/uos/sw_64/include/opencv2/flann/simplex_downhill.h new file mode 100644 index 00000000..02970148 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/flann/simplex_downhill.h @@ -0,0 +1,190 @@ +/*********************************************************************** + * Software License Agreement (BSD License) + * + * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. + * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. + * + * THE BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *************************************************************************/ + +#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ +#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_ + +//! @cond IGNORED + +namespace cvflann +{ + +/** + Adds val to array vals (and point to array points) and keeping the arrays sorted by vals. + */ +template +void addValue(int pos, float val, float* vals, T* point, T* points, int n) +{ + vals[pos] = val; + for (int i=0; i0 && vals[j] +float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL ) +{ + const int MAX_ITERATIONS = 10; + + CV_DbgAssert(n>0); + + T* p_o = new T[n]; + T* p_r = new T[n]; + T* p_e = new T[n]; + + int alpha = 1; + + int iterations = 0; + + bool ownVals = false; + if (vals == NULL) { + ownVals = true; + vals = new float[n+1]; + for (int i=0; i MAX_ITERATIONS) break; + + // compute average of simplex points (except the highest point) + for (int j=0; j=vals[0])&&(val_r=vals[n]) { + for (int i=0; i +#include "opencv2/core.hpp" +#include "opencv2/core/utility.hpp" + +namespace cvflann +{ + +/** + * A start-stop timer class. + * + * Can be used to time portions of code. + */ +class StartStopTimer +{ + int64 startTime; + +public: + /** + * Value of the timer. + */ + double value; + + + /** + * Constructor. + */ + StartStopTimer() + : startTime(0) + { + reset(); + } + + /** + * Starts the timer. + */ + void start() + { + startTime = cv::getTickCount(); + } + + /** + * Stops the timer and updates timer value. + */ + void stop() + { + int64 stopTime = cv::getTickCount(); + value += ( (double)stopTime - startTime) / cv::getTickFrequency(); + } + + /** + * Resets the timer value to 0. + */ + void reset() + { + value = 0; + } + +}; + +} + +//! @endcond + +#endif // FLANN_TIMER_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/highgui.hpp b/third_party/opencv/uos/sw_64/include/opencv2/highgui.hpp new file mode 100644 index 00000000..6a8c5987 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/highgui.hpp @@ -0,0 +1,857 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HIGHGUI_HPP +#define OPENCV_HIGHGUI_HPP + +#include "opencv2/core.hpp" +#ifdef HAVE_OPENCV_IMGCODECS +#include "opencv2/imgcodecs.hpp" +#endif +#ifdef HAVE_OPENCV_VIDEOIO +#include "opencv2/videoio.hpp" +#endif + +/** +@defgroup highgui High-level GUI + +While OpenCV was designed for use in full-scale applications and can be used within functionally +rich UI frameworks (such as Qt\*, WinForms\*, or Cocoa\*) or without any UI at all, sometimes there +it is required to try functionality quickly and visualize the results. This is what the HighGUI +module has been designed for. + +It provides easy interface to: + +- Create and manipulate windows that can display images and "remember" their content (no need to + handle repaint events from OS). +- Add trackbars to the windows, handle simple mouse events as well as keyboard commands. + +@{ + @defgroup highgui_window_flags Flags related creating and manipulating HighGUI windows and mouse events + @defgroup highgui_opengl OpenGL support + @defgroup highgui_qt Qt New Functions + + ![image](pics/qtgui.png) + + This figure explains new functionality implemented with Qt\* GUI. The new GUI provides a statusbar, + a toolbar, and a control panel. The control panel can have trackbars and buttonbars attached to it. + If you cannot see the control panel, press Ctrl+P or right-click any Qt window and select **Display + properties window**. + + - To attach a trackbar, the window name parameter must be NULL. + + - To attach a buttonbar, a button must be created. If the last bar attached to the control panel + is a buttonbar, the new button is added to the right of the last button. If the last bar + attached to the control panel is a trackbar, or the control panel is empty, a new buttonbar is + created. Then, a new button is attached to it. + + See below the example used to generate the figure: + @code + int main(int argc, char *argv[]) + { + + int value = 50; + int value2 = 0; + + + namedWindow("main1",WINDOW_NORMAL); + namedWindow("main2",WINDOW_AUTOSIZE | WINDOW_GUI_NORMAL); + createTrackbar( "track1", "main1", &value, 255, NULL); + + String nameb1 = "button1"; + String nameb2 = "button2"; + + createButton(nameb1,callbackButton,&nameb1,QT_CHECKBOX,1); + createButton(nameb2,callbackButton,NULL,QT_CHECKBOX,0); + createTrackbar( "track2", NULL, &value2, 255, NULL); + createButton("button5",callbackButton1,NULL,QT_RADIOBOX,0); + createButton("button6",callbackButton2,NULL,QT_RADIOBOX,1); + + setMouseCallback( "main2",on_mouse,NULL ); + + Mat img1 = imread("files/flower.jpg"); + VideoCapture video; + video.open("files/hockey.avi"); + + Mat img2,img3; + + while( waitKey(33) != 27 ) + { + img1.convertTo(img2,-1,1,value); + video >> img3; + + imshow("main1",img2); + imshow("main2",img3); + } + + destroyAllWindows(); + + return 0; + } + @endcode + + + @defgroup highgui_winrt WinRT support + + This figure explains new functionality implemented with WinRT GUI. The new GUI provides an Image control, + and a slider panel. Slider panel holds trackbars attached to it. + + Sliders are attached below the image control. Every new slider is added below the previous one. + + See below the example used to generate the figure: + @code + void sample_app::MainPage::ShowWindow() + { + static cv::String windowName("sample"); + cv::winrt_initContainer(this->cvContainer); + cv::namedWindow(windowName); // not required + + cv::Mat image = cv::imread("Assets/sample.jpg"); + cv::Mat converted = cv::Mat(image.rows, image.cols, CV_8UC4); + cv::cvtColor(image, converted, COLOR_BGR2BGRA); + cv::imshow(windowName, converted); // this will create window if it hasn't been created before + + int state = 42; + cv::TrackbarCallback callback = [](int pos, void* userdata) + { + if (pos == 0) { + cv::destroyWindow(windowName); + } + }; + cv::TrackbarCallback callbackTwin = [](int pos, void* userdata) + { + if (pos >= 70) { + cv::destroyAllWindows(); + } + }; + cv::createTrackbar("Sample trackbar", windowName, &state, 100, callback); + cv::createTrackbar("Twin brother", windowName, &state, 100, callbackTwin); + } + @endcode + + @defgroup highgui_c C API +@} +*/ + +///////////////////////// graphical user interface ////////////////////////// +namespace cv +{ + +//! @addtogroup highgui +//! @{ + +//! @addtogroup highgui_window_flags +//! @{ + +//! Flags for cv::namedWindow +enum WindowFlags { + WINDOW_NORMAL = 0x00000000, //!< the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size. + WINDOW_AUTOSIZE = 0x00000001, //!< the user cannot resize the window, the size is constrainted by the image displayed. + WINDOW_OPENGL = 0x00001000, //!< window with opengl support. + + WINDOW_FULLSCREEN = 1, //!< change the window to fullscreen. + WINDOW_FREERATIO = 0x00000100, //!< the image expends as much as it can (no ratio constraint). + WINDOW_KEEPRATIO = 0x00000000, //!< the ratio of the image is respected. + WINDOW_GUI_EXPANDED=0x00000000, //!< status bar and tool bar + WINDOW_GUI_NORMAL = 0x00000010, //!< old fashious way + }; + +//! Flags for cv::setWindowProperty / cv::getWindowProperty +enum WindowPropertyFlags { + WND_PROP_FULLSCREEN = 0, //!< fullscreen property (can be WINDOW_NORMAL or WINDOW_FULLSCREEN). + WND_PROP_AUTOSIZE = 1, //!< autosize property (can be WINDOW_NORMAL or WINDOW_AUTOSIZE). + WND_PROP_ASPECT_RATIO = 2, //!< window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO). + WND_PROP_OPENGL = 3, //!< opengl support. + WND_PROP_VISIBLE = 4, //!< checks whether the window exists and is visible + WND_PROP_TOPMOST = 5 //!< property to toggle normal window being topmost or not + }; + +//! Mouse Events see cv::MouseCallback +enum MouseEventTypes { + EVENT_MOUSEMOVE = 0, //!< indicates that the mouse pointer has moved over the window. + EVENT_LBUTTONDOWN = 1, //!< indicates that the left mouse button is pressed. + EVENT_RBUTTONDOWN = 2, //!< indicates that the right mouse button is pressed. + EVENT_MBUTTONDOWN = 3, //!< indicates that the middle mouse button is pressed. + EVENT_LBUTTONUP = 4, //!< indicates that left mouse button is released. + EVENT_RBUTTONUP = 5, //!< indicates that right mouse button is released. + EVENT_MBUTTONUP = 6, //!< indicates that middle mouse button is released. + EVENT_LBUTTONDBLCLK = 7, //!< indicates that left mouse button is double clicked. + EVENT_RBUTTONDBLCLK = 8, //!< indicates that right mouse button is double clicked. + EVENT_MBUTTONDBLCLK = 9, //!< indicates that middle mouse button is double clicked. + EVENT_MOUSEWHEEL = 10,//!< positive and negative values mean forward and backward scrolling, respectively. + EVENT_MOUSEHWHEEL = 11 //!< positive and negative values mean right and left scrolling, respectively. + }; + +//! Mouse Event Flags see cv::MouseCallback +enum MouseEventFlags { + EVENT_FLAG_LBUTTON = 1, //!< indicates that the left mouse button is down. + EVENT_FLAG_RBUTTON = 2, //!< indicates that the right mouse button is down. + EVENT_FLAG_MBUTTON = 4, //!< indicates that the middle mouse button is down. + EVENT_FLAG_CTRLKEY = 8, //!< indicates that CTRL Key is pressed. + EVENT_FLAG_SHIFTKEY = 16,//!< indicates that SHIFT Key is pressed. + EVENT_FLAG_ALTKEY = 32 //!< indicates that ALT Key is pressed. + }; + +//! @} highgui_window_flags + +//! @addtogroup highgui_qt +//! @{ + +//! Qt font weight +enum QtFontWeights { + QT_FONT_LIGHT = 25, //!< Weight of 25 + QT_FONT_NORMAL = 50, //!< Weight of 50 + QT_FONT_DEMIBOLD = 63, //!< Weight of 63 + QT_FONT_BOLD = 75, //!< Weight of 75 + QT_FONT_BLACK = 87 //!< Weight of 87 + }; + +//! Qt font style +enum QtFontStyles { + QT_STYLE_NORMAL = 0, //!< Normal font. + QT_STYLE_ITALIC = 1, //!< Italic font. + QT_STYLE_OBLIQUE = 2 //!< Oblique font. + }; + +//! Qt "button" type +enum QtButtonTypes { + QT_PUSH_BUTTON = 0, //!< Push button. + QT_CHECKBOX = 1, //!< Checkbox button. + QT_RADIOBOX = 2, //!< Radiobox button. + QT_NEW_BUTTONBAR = 1024 //!< Button should create a new buttonbar + }; + +//! @} highgui_qt + +/** @brief Callback function for mouse events. see cv::setMouseCallback +@param event one of the cv::MouseEventTypes constants. +@param x The x-coordinate of the mouse event. +@param y The y-coordinate of the mouse event. +@param flags one of the cv::MouseEventFlags constants. +@param userdata The optional parameter. + */ +typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); + +/** @brief Callback function for Trackbar see cv::createTrackbar +@param pos current position of the specified trackbar. +@param userdata The optional parameter. + */ +typedef void (*TrackbarCallback)(int pos, void* userdata); + +/** @brief Callback function defined to be called every frame. See cv::setOpenGlDrawCallback +@param userdata The optional parameter. + */ +typedef void (*OpenGlDrawCallback)(void* userdata); + +/** @brief Callback function for a button created by cv::createButton +@param state current state of the button. It could be -1 for a push button, 0 or 1 for a check/radio box button. +@param userdata The optional parameter. + */ +typedef void (*ButtonCallback)(int state, void* userdata); + +/** @brief Creates a window. + +The function namedWindow creates a window that can be used as a placeholder for images and +trackbars. Created windows are referred to by their names. + +If a window with the same name already exists, the function does nothing. + +You can call cv::destroyWindow or cv::destroyAllWindows to close the window and de-allocate any associated +memory usage. For a simple program, you do not really have to call these functions because all the +resources and windows of the application are closed automatically by the operating system upon exit. + +@note + +Qt backend supports additional flags: + - **WINDOW_NORMAL or WINDOW_AUTOSIZE:** WINDOW_NORMAL enables you to resize the + window, whereas WINDOW_AUTOSIZE adjusts automatically the window size to fit the + displayed image (see imshow ), and you cannot change the window size manually. + - **WINDOW_FREERATIO or WINDOW_KEEPRATIO:** WINDOW_FREERATIO adjusts the image + with no respect to its ratio, whereas WINDOW_KEEPRATIO keeps the image ratio. + - **WINDOW_GUI_NORMAL or WINDOW_GUI_EXPANDED:** WINDOW_GUI_NORMAL is the old way to draw the window + without statusbar and toolbar, whereas WINDOW_GUI_EXPANDED is a new enhanced GUI. +By default, flags == WINDOW_AUTOSIZE | WINDOW_KEEPRATIO | WINDOW_GUI_EXPANDED + +@param winname Name of the window in the window caption that may be used as a window identifier. +@param flags Flags of the window. The supported flags are: (cv::WindowFlags) + */ +CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE); + +/** @brief Destroys the specified window. + +The function destroyWindow destroys the window with the given name. + +@param winname Name of the window to be destroyed. + */ +CV_EXPORTS_W void destroyWindow(const String& winname); + +/** @brief Destroys all of the HighGUI windows. + +The function destroyAllWindows destroys all of the opened HighGUI windows. + */ +CV_EXPORTS_W void destroyAllWindows(); + +CV_EXPORTS_W int startWindowThread(); + +/** @brief Similar to #waitKey, but returns full key code. + +@note + +Key code is implementation specific and depends on used backend: QT/GTK/Win32/etc + +*/ +CV_EXPORTS_W int waitKeyEx(int delay = 0); + +/** @brief Waits for a pressed key. + +The function waitKey waits for a key event infinitely (when \f$\texttt{delay}\leq 0\f$ ) or for delay +milliseconds, when it is positive. Since the OS has a minimum time between switching threads, the +function will not wait exactly delay ms, it will wait at least delay ms, depending on what else is +running on your computer at that time. It returns the code of the pressed key or -1 if no key was +pressed before the specified time had elapsed. + +@note + +This function is the only method in HighGUI that can fetch and handle events, so it needs to be +called periodically for normal event processing unless HighGUI is used within an environment that +takes care of event processing. + +@note + +The function only works if there is at least one HighGUI window created and the window is active. +If there are several HighGUI windows, any of them can be active. + +@param delay Delay in milliseconds. 0 is the special value that means "forever". + */ +CV_EXPORTS_W int waitKey(int delay = 0); + +/** @brief Displays an image in the specified window. + +The function imshow displays an image in the specified window. If the window was created with the +cv::WINDOW_AUTOSIZE flag, the image is shown with its original size, however it is still limited by the screen resolution. +Otherwise, the image is scaled to fit the window. The function may scale the image, depending on its depth: + +- If the image is 8-bit unsigned, it is displayed as is. +- If the image is 16-bit unsigned or 32-bit integer, the pixels are divided by 256. That is, the + value range [0,255\*256] is mapped to [0,255]. +- If the image is 32-bit or 64-bit floating-point, the pixel values are multiplied by 255. That is, the + value range [0,1] is mapped to [0,255]. + +If window was created with OpenGL support, cv::imshow also support ogl::Buffer , ogl::Texture2D and +cuda::GpuMat as input. + +If the window was not created before this function, it is assumed creating a window with cv::WINDOW_AUTOSIZE. + +If you need to show an image that is bigger than the screen resolution, you will need to call namedWindow("", WINDOW_NORMAL) before the imshow. + +@note This function should be followed by cv::waitKey function which displays the image for specified +milliseconds. Otherwise, it won't display the image. For example, **waitKey(0)** will display the window +infinitely until any keypress (it is suitable for image display). **waitKey(25)** will display a frame +for 25 ms, after which display will be automatically closed. (If you put it in a loop to read +videos, it will display the video frame-by-frame) + +@note + +[__Windows Backend Only__] Pressing Ctrl+C will copy the image to the clipboard. + +[__Windows Backend Only__] Pressing Ctrl+S will show a dialog to save the image. + +@param winname Name of the window. +@param mat Image to be shown. + */ +CV_EXPORTS_W void imshow(const String& winname, InputArray mat); + +/** @brief Resizes the window to the specified size + +@note + +- The specified window size is for the image area. Toolbars are not counted. +- Only windows created without cv::WINDOW_AUTOSIZE flag can be resized. + +@param winname Window name. +@param width The new window width. +@param height The new window height. + */ +CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height); + +/** @overload +@param winname Window name. +@param size The new window size. +*/ +CV_EXPORTS_W void resizeWindow(const String& winname, const cv::Size& size); + +/** @brief Moves the window to the specified position + +@param winname Name of the window. +@param x The new x-coordinate of the window. +@param y The new y-coordinate of the window. + */ +CV_EXPORTS_W void moveWindow(const String& winname, int x, int y); + +/** @brief Changes parameters of a window dynamically. + +The function setWindowProperty enables changing properties of a window. + +@param winname Name of the window. +@param prop_id Window property to edit. The supported operation flags are: (cv::WindowPropertyFlags) +@param prop_value New value of the window property. The supported flags are: (cv::WindowFlags) + */ +CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value); + +/** @brief Updates window title +@param winname Name of the window. +@param title New title. +*/ +CV_EXPORTS_W void setWindowTitle(const String& winname, const String& title); + +/** @brief Provides parameters of a window. + +The function getWindowProperty returns properties of a window. + +@param winname Name of the window. +@param prop_id Window property to retrieve. The following operation flags are available: (cv::WindowPropertyFlags) + +@sa setWindowProperty + */ +CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id); + +/** @brief Provides rectangle of image in the window. + +The function getWindowImageRect returns the client screen coordinates, width and height of the image rendering area. + +@param winname Name of the window. + +@sa resizeWindow moveWindow + */ +CV_EXPORTS_W Rect getWindowImageRect(const String& winname); + +/** @example samples/cpp/create_mask.cpp +This program demonstrates using mouse events and how to make and use a mask image (black and white) . +*/ +/** @brief Sets mouse handler for the specified window + +@param winname Name of the window. +@param onMouse Callback function for mouse events. See OpenCV samples on how to specify and use the callback. +@param userdata The optional parameter passed to the callback. + */ +CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0); + +/** @brief Gets the mouse-wheel motion delta, when handling mouse-wheel events cv::EVENT_MOUSEWHEEL and +cv::EVENT_MOUSEHWHEEL. + +For regular mice with a scroll-wheel, delta will be a multiple of 120. The value 120 corresponds to +a one notch rotation of the wheel or the threshold for action to be taken and one such action should +occur for each delta. Some high-precision mice with higher-resolution freely-rotating wheels may +generate smaller values. + +For cv::EVENT_MOUSEWHEEL positive and negative values mean forward and backward scrolling, +respectively. For cv::EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and +left scrolling, respectively. + +@note + +Mouse-wheel events are currently supported only on Windows. + +@param flags The mouse callback flags parameter. + */ +CV_EXPORTS int getMouseWheelDelta(int flags); + +/** @brief Allows users to select a ROI on the given image. + +The function creates a window and allows users to select a ROI using the mouse. +Controls: use `space` or `enter` to finish selection, use key `c` to cancel selection (function will return the zero cv::Rect). + +@param windowName name of the window where selection process will be shown. +@param img image to select a ROI. +@param showCrosshair if true crosshair of selection rectangle will be shown. +@param fromCenter if true center of selection will match initial mouse position. In opposite case a corner of +selection rectangle will correspont to the initial mouse position. +@return selected ROI or empty rect if selection canceled. + +@note The function sets it's own mouse callback for specified window using cv::setMouseCallback(windowName, ...). +After finish of work an empty callback will be set for the used window. + */ +CV_EXPORTS_W Rect selectROI(const String& windowName, InputArray img, bool showCrosshair = true, bool fromCenter = false); + +/** @overload + */ +CV_EXPORTS_W Rect selectROI(InputArray img, bool showCrosshair = true, bool fromCenter = false); + +/** @brief Allows users to select multiple ROIs on the given image. + +The function creates a window and allows users to select multiple ROIs using the mouse. +Controls: use `space` or `enter` to finish current selection and start a new one, +use `esc` to terminate multiple ROI selection process. + +@param windowName name of the window where selection process will be shown. +@param img image to select a ROI. +@param boundingBoxes selected ROIs. +@param showCrosshair if true crosshair of selection rectangle will be shown. +@param fromCenter if true center of selection will match initial mouse position. In opposite case a corner of +selection rectangle will correspont to the initial mouse position. + +@note The function sets it's own mouse callback for specified window using cv::setMouseCallback(windowName, ...). +After finish of work an empty callback will be set for the used window. + */ +CV_EXPORTS_W void selectROIs(const String& windowName, InputArray img, + CV_OUT std::vector& boundingBoxes, bool showCrosshair = true, bool fromCenter = false); + +/** @brief Creates a trackbar and attaches it to the specified window. + +The function createTrackbar creates a trackbar (a slider or range control) with the specified name +and range, assigns a variable value to be a position synchronized with the trackbar and specifies +the callback function onChange to be called on the trackbar position change. The created trackbar is +displayed in the specified window winname. + +@note + +[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar should be attached to the +control panel. + +Clicking the label of each trackbar enables editing the trackbar values manually. + +@param trackbarname Name of the created trackbar. +@param winname Name of the window that will be used as a parent of the created trackbar. +@param value Optional pointer to an integer variable whose value reflects the position of the +slider. Upon creation, the slider position is defined by this variable. +@param count Maximal position of the slider. The minimal position is always 0. +@param onChange Pointer to the function to be called every time the slider changes position. This +function should be prototyped as void Foo(int,void\*); , where the first parameter is the trackbar +position and the second parameter is the user data (see the next parameter). If the callback is +the NULL pointer, no callbacks are called, but only value is updated. +@param userdata User data that is passed as is to the callback. It can be used to handle trackbar +events without using global variables. + */ +CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname, + int* value, int count, + TrackbarCallback onChange = 0, + void* userdata = 0); + +/** @brief Returns the trackbar position. + +The function returns the current position of the specified trackbar. + +@note + +[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control +panel. + +@param trackbarname Name of the trackbar. +@param winname Name of the window that is the parent of the trackbar. + */ +CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname); + +/** @brief Sets the trackbar position. + +The function sets the position of the specified trackbar in the specified window. + +@note + +[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control +panel. + +@param trackbarname Name of the trackbar. +@param winname Name of the window that is the parent of trackbar. +@param pos New position. + */ +CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos); + +/** @brief Sets the trackbar maximum position. + +The function sets the maximum position of the specified trackbar in the specified window. + +@note + +[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control +panel. + +@param trackbarname Name of the trackbar. +@param winname Name of the window that is the parent of trackbar. +@param maxval New maximum position. + */ +CV_EXPORTS_W void setTrackbarMax(const String& trackbarname, const String& winname, int maxval); + +/** @brief Sets the trackbar minimum position. + +The function sets the minimum position of the specified trackbar in the specified window. + +@note + +[__Qt Backend Only__] winname can be empty (or NULL) if the trackbar is attached to the control +panel. + +@param trackbarname Name of the trackbar. +@param winname Name of the window that is the parent of trackbar. +@param minval New minimum position. + */ +CV_EXPORTS_W void setTrackbarMin(const String& trackbarname, const String& winname, int minval); + +//! @addtogroup highgui_opengl OpenGL support +//! @{ + +/** @brief Displays OpenGL 2D texture in the specified window. + +@param winname Name of the window. +@param tex OpenGL 2D texture data. + */ +CV_EXPORTS void imshow(const String& winname, const ogl::Texture2D& tex); + +/** @brief Sets a callback function to be called to draw on top of displayed image. + +The function setOpenGlDrawCallback can be used to draw 3D data on the window. See the example of +callback function below: +@code + void on_opengl(void* param) + { + glLoadIdentity(); + + glTranslated(0.0, 0.0, -1.0); + + glRotatef( 55, 1, 0, 0 ); + glRotatef( 45, 0, 1, 0 ); + glRotatef( 0, 0, 0, 1 ); + + static const int coords[6][4][3] = { + { { +1, -1, -1 }, { -1, -1, -1 }, { -1, +1, -1 }, { +1, +1, -1 } }, + { { +1, +1, -1 }, { -1, +1, -1 }, { -1, +1, +1 }, { +1, +1, +1 } }, + { { +1, -1, +1 }, { +1, -1, -1 }, { +1, +1, -1 }, { +1, +1, +1 } }, + { { -1, -1, -1 }, { -1, -1, +1 }, { -1, +1, +1 }, { -1, +1, -1 } }, + { { +1, -1, +1 }, { -1, -1, +1 }, { -1, -1, -1 }, { +1, -1, -1 } }, + { { -1, -1, +1 }, { +1, -1, +1 }, { +1, +1, +1 }, { -1, +1, +1 } } + }; + + for (int i = 0; i < 6; ++i) { + glColor3ub( i*20, 100+i*10, i*42 ); + glBegin(GL_QUADS); + for (int j = 0; j < 4; ++j) { + glVertex3d(0.2 * coords[i][j][0], 0.2 * coords[i][j][1], 0.2 * coords[i][j][2]); + } + glEnd(); + } + } +@endcode + +@param winname Name of the window. +@param onOpenGlDraw Pointer to the function to be called every frame. This function should be +prototyped as void Foo(void\*) . +@param userdata Pointer passed to the callback function.(__Optional__) + */ +CV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); + +/** @brief Sets the specified window as current OpenGL context. + +@param winname Name of the window. + */ +CV_EXPORTS void setOpenGlContext(const String& winname); + +/** @brief Force window to redraw its context and call draw callback ( See cv::setOpenGlDrawCallback ). + +@param winname Name of the window. + */ +CV_EXPORTS void updateWindow(const String& winname); + +//! @} highgui_opengl + +//! @addtogroup highgui_qt +//! @{ + +/** @brief QtFont available only for Qt. See cv::fontQt + */ +struct QtFont +{ + const char* nameFont; //!< Name of the font + Scalar color; //!< Color of the font. Scalar(blue_component, green_component, red_component[, alpha_component]) + int font_face; //!< See cv::QtFontStyles + const int* ascii; //!< font data and metrics + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; //!< slope coefficient: 0 - normal, >0 - italic + int thickness; //!< See cv::QtFontWeights + float dx; //!< horizontal interval between letters + int line_type; //!< PointSize +}; + +/** @brief Creates the font to draw a text on an image. + +The function fontQt creates a cv::QtFont object. This cv::QtFont is not compatible with putText . + +A basic usage of this function is the following: : +@code + QtFont font = fontQt("Times"); + addText( img1, "Hello World !", Point(50,50), font); +@endcode + +@param nameFont Name of the font. The name should match the name of a system font (such as +*Times*). If the font is not found, a default one is used. +@param pointSize Size of the font. If not specified, equal zero or negative, the point size of the +font is set to a system-dependent default value. Generally, this is 12 points. +@param color Color of the font in BGRA where A = 255 is fully transparent. Use the macro CV_RGB +for simplicity. +@param weight Font weight. Available operation flags are : cv::QtFontWeights You can also specify a positive integer for better control. +@param style Font style. Available operation flags are : cv::QtFontStyles +@param spacing Spacing between characters. It can be negative or positive. + */ +CV_EXPORTS QtFont fontQt(const String& nameFont, int pointSize = -1, + Scalar color = Scalar::all(0), int weight = QT_FONT_NORMAL, + int style = QT_STYLE_NORMAL, int spacing = 0); + +/** @brief Draws a text on the image. + +The function addText draws *text* on the image *img* using a specific font *font* (see example cv::fontQt +) + +@param img 8-bit 3-channel image where the text should be drawn. +@param text Text to write on an image. +@param org Point(x,y) where the text should start on an image. +@param font Font to use to draw a text. + */ +CV_EXPORTS void addText( const Mat& img, const String& text, Point org, const QtFont& font); + +/** @brief Draws a text on the image. + +@param img 8-bit 3-channel image where the text should be drawn. +@param text Text to write on an image. +@param org Point(x,y) where the text should start on an image. +@param nameFont Name of the font. The name should match the name of a system font (such as +*Times*). If the font is not found, a default one is used. +@param pointSize Size of the font. If not specified, equal zero or negative, the point size of the +font is set to a system-dependent default value. Generally, this is 12 points. +@param color Color of the font in BGRA where A = 255 is fully transparent. +@param weight Font weight. Available operation flags are : cv::QtFontWeights You can also specify a positive integer for better control. +@param style Font style. Available operation flags are : cv::QtFontStyles +@param spacing Spacing between characters. It can be negative or positive. + */ +CV_EXPORTS_W void addText(const Mat& img, const String& text, Point org, const String& nameFont, int pointSize = -1, Scalar color = Scalar::all(0), + int weight = QT_FONT_NORMAL, int style = QT_STYLE_NORMAL, int spacing = 0); + +/** @brief Displays a text on a window image as an overlay for a specified duration. + +The function displayOverlay displays useful information/tips on top of the window for a certain +amount of time *delayms*. The function does not modify the image, displayed in the window, that is, +after the specified delay the original content of the window is restored. + +@param winname Name of the window. +@param text Overlay text to write on a window image. +@param delayms The period (in milliseconds), during which the overlay text is displayed. If this +function is called before the previous overlay text timed out, the timer is restarted and the text +is updated. If this value is zero, the text never disappears. + */ +CV_EXPORTS_W void displayOverlay(const String& winname, const String& text, int delayms = 0); + +/** @brief Displays a text on the window statusbar during the specified period of time. + +The function displayStatusBar displays useful information/tips on top of the window for a certain +amount of time *delayms* . This information is displayed on the window statusbar (the window must be +created with the CV_GUI_EXPANDED flags). + +@param winname Name of the window. +@param text Text to write on the window statusbar. +@param delayms Duration (in milliseconds) to display the text. If this function is called before +the previous text timed out, the timer is restarted and the text is updated. If this value is +zero, the text never disappears. + */ +CV_EXPORTS_W void displayStatusBar(const String& winname, const String& text, int delayms = 0); + +/** @brief Saves parameters of the specified window. + +The function saveWindowParameters saves size, location, flags, trackbars value, zoom and panning +location of the window windowName. + +@param windowName Name of the window. + */ +CV_EXPORTS void saveWindowParameters(const String& windowName); + +/** @brief Loads parameters of the specified window. + +The function loadWindowParameters loads size, location, flags, trackbars value, zoom and panning +location of the window windowName. + +@param windowName Name of the window. + */ +CV_EXPORTS void loadWindowParameters(const String& windowName); + +CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); + +CV_EXPORTS void stopLoop(); + +/** @brief Attaches a button to the control panel. + +The function createButton attaches a button to the control panel. Each button is added to a +buttonbar to the right of the last button. A new buttonbar is created if nothing was attached to the +control panel before, or if the last element attached to the control panel was a trackbar or if the +QT_NEW_BUTTONBAR flag is added to the type. + +See below various examples of the cv::createButton function call: : +@code + createButton(NULL,callbackButton);//create a push button "button 0", that will call callbackButton. + createButton("button2",callbackButton,NULL,QT_CHECKBOX,0); + createButton("button3",callbackButton,&value); + createButton("button5",callbackButton1,NULL,QT_RADIOBOX); + createButton("button6",callbackButton2,NULL,QT_PUSH_BUTTON,1); + createButton("button6",callbackButton2,NULL,QT_PUSH_BUTTON|QT_NEW_BUTTONBAR);// create a push button in a new row +@endcode + +@param bar_name Name of the button. +@param on_change Pointer to the function to be called every time the button changes its state. +This function should be prototyped as void Foo(int state,\*void); . *state* is the current state +of the button. It could be -1 for a push button, 0 or 1 for a check/radio box button. +@param userdata Pointer passed to the callback function. +@param type Optional type of the button. Available types are: (cv::QtButtonTypes) +@param initial_button_state Default state of the button. Use for checkbox and radiobox. Its +value could be 0 or 1. (__Optional__) +*/ +CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change, + void* userdata = 0, int type = QT_PUSH_BUTTON, + bool initial_button_state = false); + +//! @} highgui_qt + +//! @} highgui + +} // cv + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/highgui/highgui_c.h" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui.hpp b/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui.hpp new file mode 100644 index 00000000..160c9cf4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/highgui.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui_c.h b/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui_c.h new file mode 100644 index 00000000..35413139 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/highgui/highgui_c.h @@ -0,0 +1,262 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HIGHGUI_H +#define OPENCV_HIGHGUI_H + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/imgproc_c.h" +#ifdef HAVE_OPENCV_IMGCODECS +#include "opencv2/imgcodecs/imgcodecs_c.h" +#endif +#ifdef HAVE_OPENCV_VIDEOIO +#include "opencv2/videoio/videoio_c.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup highgui_c + @{ + */ + +/****************************************************************************************\ +* Basic GUI functions * +\****************************************************************************************/ +//YV +//-----------New for Qt +/* For font */ +enum { CV_FONT_LIGHT = 25,//QFont::Light, + CV_FONT_NORMAL = 50,//QFont::Normal, + CV_FONT_DEMIBOLD = 63,//QFont::DemiBold, + CV_FONT_BOLD = 75,//QFont::Bold, + CV_FONT_BLACK = 87 //QFont::Black +}; + +enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal, + CV_STYLE_ITALIC = 1,//QFont::StyleItalic, + CV_STYLE_OBLIQUE = 2 //QFont::StyleOblique +}; +/* ---------*/ + +//for color cvScalar(blue_component, green_component, red_component[, alpha_component]) +//and alpha= 0 <-> 0xFF (not transparent <-> transparent) +CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0)); + +CVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2); + +CVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0)); +CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0)); + +CVAPI(void) cvSaveWindowParameters(const char* name); +CVAPI(void) cvLoadWindowParameters(const char* name); +CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); +CVAPI(void) cvStopLoop( void ); + +typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata); +enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2}; +CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0)); +//---------------------- + + +/* this function is used to set some external parameters in case of X Window */ +CVAPI(int) cvInitSystem( int argc, char** argv ); + +CVAPI(int) cvStartWindowThread( void ); + +// --------- YV --------- +enum +{ + //These 3 flags are used by cvSet/GetWindowProperty + CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property + CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property + CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property + CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support + CV_WND_PROP_VISIBLE = 4, + + //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed + CV_WINDOW_OPENGL = 0x00001000, //window with opengl support + + //Those flags are only for Qt + CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar + CV_GUI_NORMAL = 0x00000010, //old fashious way + + //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty + CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen + CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint) + CV_WINDOW_KEEPRATIO = 0x00000000//the ration image is respected. +}; + +/* create window */ +CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) ); + +/* Set and Get Property of the window */ +CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value); +CVAPI(double) cvGetWindowProperty(const char* name, int prop_id); + +#ifdef __cplusplus // FIXIT remove in OpenCV 4.0 +/* Get window image rectangle coordinates, width and height */ +CVAPI(cv::Rect)cvGetWindowImageRect(const char* name); +#endif + +/* display image within window (highgui windows remember their content) */ +CVAPI(void) cvShowImage( const char* name, const CvArr* image ); + +/* resize/move window */ +CVAPI(void) cvResizeWindow( const char* name, int width, int height ); +CVAPI(void) cvMoveWindow( const char* name, int x, int y ); + + +/* destroy window and all the trackers associated with it */ +CVAPI(void) cvDestroyWindow( const char* name ); + +CVAPI(void) cvDestroyAllWindows(void); + +/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */ +CVAPI(void*) cvGetWindowHandle( const char* name ); + +/* get name of highgui window given its native handle */ +CVAPI(const char*) cvGetWindowName( void* window_handle ); + + +typedef void (CV_CDECL *CvTrackbarCallback)(int pos); + +/* create trackbar and display it on top of given window, set callback */ +CVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL)); + +typedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata); + +CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name, + int* value, int count, CvTrackbarCallback2 on_change, + void* userdata CV_DEFAULT(0)); + +/* retrieve or set trackbar position */ +CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name ); +CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ); +CVAPI(void) cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval); +CVAPI(void) cvSetTrackbarMin(const char* trackbar_name, const char* window_name, int minval); + +enum +{ + CV_EVENT_MOUSEMOVE =0, + CV_EVENT_LBUTTONDOWN =1, + CV_EVENT_RBUTTONDOWN =2, + CV_EVENT_MBUTTONDOWN =3, + CV_EVENT_LBUTTONUP =4, + CV_EVENT_RBUTTONUP =5, + CV_EVENT_MBUTTONUP =6, + CV_EVENT_LBUTTONDBLCLK =7, + CV_EVENT_RBUTTONDBLCLK =8, + CV_EVENT_MBUTTONDBLCLK =9, + CV_EVENT_MOUSEWHEEL =10, + CV_EVENT_MOUSEHWHEEL =11 +}; + +enum +{ + CV_EVENT_FLAG_LBUTTON =1, + CV_EVENT_FLAG_RBUTTON =2, + CV_EVENT_FLAG_MBUTTON =4, + CV_EVENT_FLAG_CTRLKEY =8, + CV_EVENT_FLAG_SHIFTKEY =16, + CV_EVENT_FLAG_ALTKEY =32 +}; + + +#define CV_GET_WHEEL_DELTA(flags) ((short)((flags >> 16) & 0xffff)) // upper 16 bits + +typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param); + +/* assign callback for mouse events */ +CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, + void* param CV_DEFAULT(NULL)); + +/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */ +CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0)); + +// OpenGL support + +typedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata); +CVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL)); + +CVAPI(void) cvSetOpenGlContext(const char* window_name); +CVAPI(void) cvUpdateWindow(const char* window_name); + + +/****************************************************************************************\ + +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvAddSearchPath(path) +#define cvvInitSystem cvInitSystem +#define cvvNamedWindow cvNamedWindow +#define cvvShowImage cvShowImage +#define cvvResizeWindow cvResizeWindow +#define cvvDestroyWindow cvDestroyWindow +#define cvvCreateTrackbar cvCreateTrackbar +#define cvvAddSearchPath cvAddSearchPath +#define cvvWaitKey(name) cvWaitKey(0) +#define cvvWaitKeyEx(name,delay) cvWaitKey(delay) +#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE +#define set_preprocess_func cvSetPreprocessFuncWin32 +#define set_postprocess_func cvSetPostprocessFuncWin32 + +#if defined _WIN32 + +CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback); +CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback); +#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback)) +#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback)) + +#endif + +/** @} highgui_c */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs.hpp new file mode 100644 index 00000000..41fcc796 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs.hpp @@ -0,0 +1,278 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGCODECS_HPP +#define OPENCV_IMGCODECS_HPP + +#include "opencv2/core.hpp" + +/** + @defgroup imgcodecs Image file reading and writing + @{ + @defgroup imgcodecs_c C API + @defgroup imgcodecs_flags Flags used for image file reading and writing + @defgroup imgcodecs_ios iOS glue + @} +*/ + +//////////////////////////////// image codec //////////////////////////////// +namespace cv +{ + +//! @addtogroup imgcodecs +//! @{ + +//! @addtogroup imgcodecs_flags +//! @{ + +//! Imread flags +enum ImreadModes { + IMREAD_UNCHANGED = -1, //!< If set, return the loaded image as is (with alpha channel, otherwise it gets cropped). Ignore EXIF orientation. + IMREAD_GRAYSCALE = 0, //!< If set, always convert image to the single channel grayscale image (codec internal conversion). + IMREAD_COLOR = 1, //!< If set, always convert image to the 3 channel BGR color image. + IMREAD_ANYDEPTH = 2, //!< If set, return 16-bit/32-bit image when the input has the corresponding depth, otherwise convert it to 8-bit. + IMREAD_ANYCOLOR = 4, //!< If set, the image is read in any possible color format. + IMREAD_LOAD_GDAL = 8, //!< If set, use the gdal driver for loading the image. + IMREAD_REDUCED_GRAYSCALE_2 = 16, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/2. + IMREAD_REDUCED_COLOR_2 = 17, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/2. + IMREAD_REDUCED_GRAYSCALE_4 = 32, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/4. + IMREAD_REDUCED_COLOR_4 = 33, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/4. + IMREAD_REDUCED_GRAYSCALE_8 = 64, //!< If set, always convert image to the single channel grayscale image and the image size reduced 1/8. + IMREAD_REDUCED_COLOR_8 = 65, //!< If set, always convert image to the 3 channel BGR color image and the image size reduced 1/8. + IMREAD_IGNORE_ORIENTATION = 128 //!< If set, do not rotate the image according to EXIF's orientation flag. + }; + +//! Imwrite flags +enum ImwriteFlags { + IMWRITE_JPEG_QUALITY = 1, //!< For JPEG, it can be a quality from 0 to 100 (the higher is the better). Default value is 95. + IMWRITE_JPEG_PROGRESSIVE = 2, //!< Enable JPEG features, 0 or 1, default is False. + IMWRITE_JPEG_OPTIMIZE = 3, //!< Enable JPEG features, 0 or 1, default is False. + IMWRITE_JPEG_RST_INTERVAL = 4, //!< JPEG restart interval, 0 - 65535, default is 0 - no restart. + IMWRITE_JPEG_LUMA_QUALITY = 5, //!< Separate luma quality level, 0 - 100, default is 0 - don't use. + IMWRITE_JPEG_CHROMA_QUALITY = 6, //!< Separate chroma quality level, 0 - 100, default is 0 - don't use. + IMWRITE_PNG_COMPRESSION = 16, //!< For PNG, it can be the compression level from 0 to 9. A higher value means a smaller size and longer compression time. If specified, strategy is changed to IMWRITE_PNG_STRATEGY_DEFAULT (Z_DEFAULT_STRATEGY). Default value is 1 (best speed setting). + IMWRITE_PNG_STRATEGY = 17, //!< One of cv::ImwritePNGFlags, default is IMWRITE_PNG_STRATEGY_RLE. + IMWRITE_PNG_BILEVEL = 18, //!< Binary level PNG, 0 or 1, default is 0. + IMWRITE_PXM_BINARY = 32, //!< For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1. Default value is 1. + IMWRITE_EXR_TYPE = (3 << 4) + 0, /* 48 */ //!< override EXR storage type (FLOAT (FP32) is default) + IMWRITE_WEBP_QUALITY = 64, //!< For WEBP, it can be a quality from 1 to 100 (the higher is the better). By default (without any parameter) and for quality above 100 the lossless compression is used. + IMWRITE_PAM_TUPLETYPE = 128,//!< For PAM, sets the TUPLETYPE field to the corresponding string value that is defined for the format + IMWRITE_TIFF_RESUNIT = 256,//!< For TIFF, use to specify which DPI resolution unit to set; see libtiff documentation for valid values. + IMWRITE_TIFF_XDPI = 257,//!< For TIFF, use to specify the X direction DPI. + IMWRITE_TIFF_YDPI = 258, //!< For TIFF, use to specify the Y direction DPI. + IMWRITE_TIFF_COMPRESSION = 259 //!< For TIFF, use to specify the image compression scheme. See libtiff for integer constants corresponding to compression formats. Note, for images whose depth is CV_32F, only libtiff's SGILOG compression scheme is used. For other supported depths, the compression scheme can be specified by this flag; LZW compression is the default. + }; + +enum ImwriteEXRTypeFlags { + /*IMWRITE_EXR_TYPE_UNIT = 0, //!< not supported */ + IMWRITE_EXR_TYPE_HALF = 1, //!< store as HALF (FP16) + IMWRITE_EXR_TYPE_FLOAT = 2 //!< store as FP32 (default) + }; + +//! Imwrite PNG specific flags used to tune the compression algorithm. +/** These flags will be modify the way of PNG image compression and will be passed to the underlying zlib processing stage. + +- The effect of IMWRITE_PNG_STRATEGY_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between IMWRITE_PNG_STRATEGY_DEFAULT and IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY. +- IMWRITE_PNG_STRATEGY_RLE is designed to be almost as fast as IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, but give better compression for PNG image data. +- The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set appropriately. +- IMWRITE_PNG_STRATEGY_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications. +*/ +enum ImwritePNGFlags { + IMWRITE_PNG_STRATEGY_DEFAULT = 0, //!< Use this value for normal data. + IMWRITE_PNG_STRATEGY_FILTERED = 1, //!< Use this value for data produced by a filter (or predictor).Filtered data consists mostly of small values with a somewhat random distribution. In this case, the compression algorithm is tuned to compress them better. + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, //!< Use this value to force Huffman encoding only (no string match). + IMWRITE_PNG_STRATEGY_RLE = 3, //!< Use this value to limit match distances to one (run-length encoding). + IMWRITE_PNG_STRATEGY_FIXED = 4 //!< Using this value prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications. + }; + +//! Imwrite PAM specific tupletype flags used to define the 'TUPETYPE' field of a PAM file. +enum ImwritePAMFlags { + IMWRITE_PAM_FORMAT_NULL = 0, + IMWRITE_PAM_FORMAT_BLACKANDWHITE = 1, + IMWRITE_PAM_FORMAT_GRAYSCALE = 2, + IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA = 3, + IMWRITE_PAM_FORMAT_RGB = 4, + IMWRITE_PAM_FORMAT_RGB_ALPHA = 5, + }; + +//! @} imgcodecs_flags + +/** @brief Loads an image from a file. + +@anchor imread + +The function imread loads an image from the specified file and returns it. If the image cannot be +read (because of missing file, improper permissions, unsupported or invalid format), the function +returns an empty matrix ( Mat::data==NULL ). + +Currently, the following file formats are supported: + +- Windows bitmaps - \*.bmp, \*.dib (always supported) +- JPEG files - \*.jpeg, \*.jpg, \*.jpe (see the *Note* section) +- JPEG 2000 files - \*.jp2 (see the *Note* section) +- Portable Network Graphics - \*.png (see the *Note* section) +- WebP - \*.webp (see the *Note* section) +- Portable image format - \*.pbm, \*.pgm, \*.ppm \*.pxm, \*.pnm (always supported) +- Sun rasters - \*.sr, \*.ras (always supported) +- TIFF files - \*.tiff, \*.tif (see the *Note* section) +- OpenEXR Image files - \*.exr (see the *Note* section) +- Radiance HDR - \*.hdr, \*.pic (always supported) +- Raster and Vector geospatial data supported by GDAL (see the *Note* section) + +@note +- The function determines the type of an image by the content, not by the file extension. +- In the case of color images, the decoded images will have the channels stored in **B G R** order. +- When using IMREAD_GRAYSCALE, the codec's internal grayscale conversion will be used, if available. + Results may differ to the output of cvtColor() +- On Microsoft Windows\* OS and MacOSX\*, the codecs shipped with an OpenCV image (libjpeg, + libpng, libtiff, and libjasper) are used by default. So, OpenCV can always read JPEGs, PNGs, + and TIFFs. On MacOSX, there is also an option to use native MacOSX image readers. But beware + that currently these native image loaders give images with different pixel values because of + the color management embedded into MacOSX. +- On Linux\*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for + codecs supplied with an OS image. Install the relevant packages (do not forget the development + files, for example, "libjpeg-dev", in Debian\* and Ubuntu\*) to get the codec support or turn + on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake. +- In the case you set *WITH_GDAL* flag to true in CMake and @ref IMREAD_LOAD_GDAL to load the image, + then the [GDAL](http://www.gdal.org) driver will be used in order to decode the image, supporting + the following formats: [Raster](http://www.gdal.org/formats_list.html), + [Vector](http://www.gdal.org/ogr_formats.html). +- If EXIF information is embedded in the image file, the EXIF orientation will be taken into account + and thus the image will be rotated accordingly except if the flags @ref IMREAD_IGNORE_ORIENTATION + or @ref IMREAD_UNCHANGED are passed. +- By default number of pixels must be less than 2^30. Limit can be set using system + variable OPENCV_IO_MAX_IMAGE_PIXELS + +@param filename Name of file to be loaded. +@param flags Flag that can take values of cv::ImreadModes +*/ +CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR ); + +/** @brief Loads a multi-page image from a file. + +The function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects. +@param filename Name of file to be loaded. +@param flags Flag that can take values of cv::ImreadModes, default with cv::IMREAD_ANYCOLOR. +@param mats A vector of Mat objects holding each page, if more than one. +@sa cv::imread +*/ +CV_EXPORTS_W bool imreadmulti(const String& filename, CV_OUT std::vector& mats, int flags = IMREAD_ANYCOLOR); + +/** @brief Saves an image to a specified file. + +The function imwrite saves the image to the specified file. The image format is chosen based on the +filename extension (see cv::imread for the list of extensions). In general, only 8-bit +single-channel or 3-channel (with 'BGR' channel order) images +can be saved using this function, with these exceptions: + +- 16-bit unsigned (CV_16U) images can be saved in the case of PNG, JPEG 2000, and TIFF formats +- 32-bit float (CV_32F) images can be saved in TIFF, OpenEXR, and Radiance HDR formats; 3-channel +(CV_32FC3) TIFF images will be saved using the LogLuv high dynamic range encoding (4 bytes per pixel) +- PNG images with an alpha channel can be saved using this function. To do this, create +8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels +should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535 (see the code sample below). +- Multiple images (vector of Mat) can be saved in TIFF format (see the code sample below). + +If the image format is not supported, the image will be converted to 8-bit unsigned (CV_8U) and saved that way. + +If the format, depth or channel order is different, use +Mat::convertTo and cv::cvtColor to convert it before saving. Or, use the universal FileStorage I/O +functions to save the image to XML or YAML format. + +The sample below shows how to create a BGRA image, how to set custom compression parameters and save it to a PNG file. +It also demonstrates how to save multiple images in a TIFF file: +@include snippets/imgcodecs_imwrite.cpp +@param filename Name of the file. +@param img (Mat or vector of Mat) Image or Images to be saved. +@param params Format-specific parameters encoded as pairs (paramId_1, paramValue_1, paramId_2, paramValue_2, ... .) see cv::ImwriteFlags +*/ +CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, + const std::vector& params = std::vector()); + +/// @overload multi-image overload for bindings +CV_WRAP static inline +bool imwritemulti(const String& filename, InputArrayOfArrays img, + const std::vector& params = std::vector()) +{ + return imwrite(filename, img, params); +} + +/** @brief Reads an image from a buffer in memory. + +The function imdecode reads an image from the specified buffer in the memory. If the buffer is too short or +contains invalid data, the function returns an empty matrix ( Mat::data==NULL ). + +See cv::imread for the list of supported formats and flags description. + +@note In the case of color images, the decoded images will have the channels stored in **B G R** order. +@param buf Input array or vector of bytes. +@param flags The same flags as in cv::imread, see cv::ImreadModes. +*/ +CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); + +/** @overload +@param buf +@param flags +@param dst The optional output placeholder for the decoded matrix. It can save the image +reallocations when the function is called repeatedly for images of the same size. +*/ +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst); + +/** @brief Encodes an image into a memory buffer. + +The function imencode compresses the image and stores it in the memory buffer that is resized to fit the +result. See cv::imwrite for the list of supported formats and flags description. + +@param ext File extension that defines the output format. +@param img Image to be written. +@param buf Output buffer resized to fit the compressed image. +@param params Format-specific parameters. See cv::imwrite and cv::ImwriteFlags. +*/ +CV_EXPORTS_W bool imencode( const String& ext, InputArray img, + CV_OUT std::vector& buf, + const std::vector& params = std::vector()); + +//! @} imgcodecs + +} // cv + +#endif //OPENCV_IMGCODECS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs.hpp new file mode 100644 index 00000000..a3cd2326 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/imgcodecs.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs_c.h b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs_c.h new file mode 100644 index 00000000..c36dac33 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/imgcodecs_c.h @@ -0,0 +1,149 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGCODECS_H +#define OPENCV_IMGCODECS_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** @addtogroup imgcodecs_c + @{ + */ + +enum +{ +/* 8bit, color or not */ + CV_LOAD_IMAGE_UNCHANGED =-1, +/* 8bit, gray */ + CV_LOAD_IMAGE_GRAYSCALE =0, +/* ?, color */ + CV_LOAD_IMAGE_COLOR =1, +/* any depth, ? */ + CV_LOAD_IMAGE_ANYDEPTH =2, +/* ?, any color */ + CV_LOAD_IMAGE_ANYCOLOR =4, +/* ?, no rotate */ + CV_LOAD_IMAGE_IGNORE_ORIENTATION =128 +}; + +/* load image from file + iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED + overrides the other flags + using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED + unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit +*/ +CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +enum +{ + CV_IMWRITE_JPEG_QUALITY =1, + CV_IMWRITE_JPEG_PROGRESSIVE =2, + CV_IMWRITE_JPEG_OPTIMIZE =3, + CV_IMWRITE_JPEG_RST_INTERVAL =4, + CV_IMWRITE_JPEG_LUMA_QUALITY =5, + CV_IMWRITE_JPEG_CHROMA_QUALITY =6, + CV_IMWRITE_PNG_COMPRESSION =16, + CV_IMWRITE_PNG_STRATEGY =17, + CV_IMWRITE_PNG_BILEVEL =18, + CV_IMWRITE_PNG_STRATEGY_DEFAULT =0, + CV_IMWRITE_PNG_STRATEGY_FILTERED =1, + CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, + CV_IMWRITE_PNG_STRATEGY_RLE =3, + CV_IMWRITE_PNG_STRATEGY_FIXED =4, + CV_IMWRITE_PXM_BINARY =32, + CV_IMWRITE_EXR_TYPE = 48, + CV_IMWRITE_WEBP_QUALITY =64, + CV_IMWRITE_PAM_TUPLETYPE = 128, + CV_IMWRITE_PAM_FORMAT_NULL = 0, + CV_IMWRITE_PAM_FORMAT_BLACKANDWHITE = 1, + CV_IMWRITE_PAM_FORMAT_GRAYSCALE = 2, + CV_IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA = 3, + CV_IMWRITE_PAM_FORMAT_RGB = 4, + CV_IMWRITE_PAM_FORMAT_RGB_ALPHA = 5, +}; + + + +/* save image to file */ +CVAPI(int) cvSaveImage( const char* filename, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +/* decode image stored in the buffer */ +CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); +CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR)); + +/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */ +CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image, + const int* params CV_DEFAULT(0) ); + +enum +{ + CV_CVTIMG_FLIP =1, + CV_CVTIMG_SWAP_RB =2 +}; + +/* utility function: convert one image to another with optional vertical flip */ +CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0)); + +CVAPI(int) cvHaveImageReader(const char* filename); +CVAPI(int) cvHaveImageWriter(const char* filename); + + +/****************************************************************************************\ +* Obsolete functions/synonyms * +\****************************************************************************************/ + +#define cvvLoadImage(name) cvLoadImage((name),1) +#define cvvSaveImage cvSaveImage +#define cvvConvertImage cvConvertImage + +/** @} imgcodecs_c */ + +#ifdef __cplusplus +} +#endif + +#endif // OPENCV_IMGCODECS_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/ios.h b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/ios.h new file mode 100644 index 00000000..a90c6d37 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgcodecs/ios.h @@ -0,0 +1,57 @@ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#import +#import +#import +#import +#include "opencv2/core/core.hpp" + +//! @addtogroup imgcodecs_ios +//! @{ + +CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image); +CV_EXPORTS void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist = false); + +//! @} diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgproc.hpp new file mode 100644 index 00000000..576bebc2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc.hpp @@ -0,0 +1,4969 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGPROC_HPP +#define OPENCV_IMGPROC_HPP + +#include "opencv2/core.hpp" + +/** + @defgroup imgproc Image Processing + +This module includes image-processing functions. + + @{ + @defgroup imgproc_filter Image Filtering + +Functions and classes described in this section are used to perform various linear or non-linear +filtering operations on 2D images (represented as Mat's). It means that for each pixel location +\f$(x,y)\f$ in the source image (normally, rectangular), its neighborhood is considered and used to +compute the response. In case of a linear filter, it is a weighted sum of pixel values. In case of +morphological operations, it is the minimum or maximum values, and so on. The computed response is +stored in the destination image at the same location \f$(x,y)\f$. It means that the output image +will be of the same size as the input image. Normally, the functions support multi-channel arrays, +in which case every channel is processed independently. Therefore, the output image will also have +the same number of channels as the input one. + +Another common feature of the functions and classes described in this section is that, unlike +simple arithmetic functions, they need to extrapolate values of some non-existing pixels. For +example, if you want to smooth an image using a Gaussian \f$3 \times 3\f$ filter, then, when +processing the left-most pixels in each row, you need pixels to the left of them, that is, outside +of the image. You can let these pixels be the same as the left-most image pixels ("replicated +border" extrapolation method), or assume that all the non-existing pixels are zeros ("constant +border" extrapolation method), and so on. OpenCV enables you to specify the extrapolation method. +For details, see #BorderTypes + +@anchor filter_depths +### Depth combinations +Input depth (src.depth()) | Output depth (ddepth) +--------------------------|---------------------- +CV_8U | -1/CV_16S/CV_32F/CV_64F +CV_16U/CV_16S | -1/CV_32F/CV_64F +CV_32F | -1/CV_32F/CV_64F +CV_64F | -1/CV_64F + +@note when ddepth=-1, the output image will have the same depth as the source. + + @defgroup imgproc_transform Geometric Image Transformations + +The functions in this section perform various geometrical transformations of 2D images. They do not +change the image content but deform the pixel grid and map this deformed grid to the destination +image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from +destination to the source. That is, for each pixel \f$(x, y)\f$ of the destination image, the +functions compute coordinates of the corresponding "donor" pixel in the source image and copy the +pixel value: + +\f[\texttt{dst} (x,y)= \texttt{src} (f_x(x,y), f_y(x,y))\f] + +In case when you specify the forward mapping \f$\left: \texttt{src} \rightarrow +\texttt{dst}\f$, the OpenCV functions first compute the corresponding inverse mapping +\f$\left: \texttt{dst} \rightarrow \texttt{src}\f$ and then use the above formula. + +The actual implementations of the geometrical transformations, from the most generic remap and to +the simplest and the fastest resize, need to solve two main problems with the above formula: + +- Extrapolation of non-existing pixels. Similarly to the filtering functions described in the +previous section, for some \f$(x,y)\f$, either one of \f$f_x(x,y)\f$, or \f$f_y(x,y)\f$, or both +of them may fall outside of the image. In this case, an extrapolation method needs to be used. +OpenCV provides the same selection of extrapolation methods as in the filtering functions. In +addition, it provides the method #BORDER_TRANSPARENT. This means that the corresponding pixels in +the destination image will not be modified at all. + +- Interpolation of pixel values. Usually \f$f_x(x,y)\f$ and \f$f_y(x,y)\f$ are floating-point +numbers. This means that \f$\left\f$ can be either an affine or perspective +transformation, or radial lens distortion correction, and so on. So, a pixel value at fractional +coordinates needs to be retrieved. In the simplest case, the coordinates can be just rounded to the +nearest integer coordinates and the corresponding pixel can be used. This is called a +nearest-neighbor interpolation. However, a better result can be achieved by using more +sophisticated [interpolation methods](http://en.wikipedia.org/wiki/Multivariate_interpolation) , +where a polynomial function is fit into some neighborhood of the computed pixel \f$(f_x(x,y), +f_y(x,y))\f$, and then the value of the polynomial at \f$(f_x(x,y), f_y(x,y))\f$ is taken as the +interpolated pixel value. In OpenCV, you can choose between several interpolation methods. See +resize for details. + +@note The geometrical transformations do not work with `CV_8S` or `CV_32S` images. + + @defgroup imgproc_misc Miscellaneous Image Transformations + @defgroup imgproc_draw Drawing Functions + +Drawing functions work with matrices/images of arbitrary depth. The boundaries of the shapes can be +rendered with antialiasing (implemented only for 8-bit images for now). All the functions include +the parameter color that uses an RGB value (that may be constructed with the Scalar constructor ) +for color images and brightness for grayscale images. For color images, the channel ordering is +normally *Blue, Green, Red*. This is what imshow, imread, and imwrite expect. So, if you form a +color using the Scalar constructor, it should look like: + +\f[\texttt{Scalar} (blue \_ component, green \_ component, red \_ component[, alpha \_ component])\f] + +If you are using your own image rendering and I/O functions, you can use any channel ordering. The +drawing functions process each channel independently and do not depend on the channel order or even +on the used color space. The whole image can be converted from BGR to RGB or to a different color +space using cvtColor . + +If a drawn figure is partially or completely outside the image, the drawing functions clip it. Also, +many drawing functions can handle pixel coordinates specified with sub-pixel accuracy. This means +that the coordinates can be passed as fixed-point numbers encoded as integers. The number of +fractional bits is specified by the shift parameter and the real point coordinates are calculated as +\f$\texttt{Point}(x,y)\rightarrow\texttt{Point2f}(x*2^{-shift},y*2^{-shift})\f$ . This feature is +especially effective when rendering antialiased shapes. + +@note The functions do not support alpha-transparency when the target image is 4-channel. In this +case, the color[3] is simply copied to the repainted pixels. Thus, if you want to paint +semi-transparent shapes, you can paint them in a separate buffer and then blend it with the main +image. + + @defgroup imgproc_color_conversions Color Space Conversions + @defgroup imgproc_colormap ColorMaps in OpenCV + +The human perception isn't built for observing fine changes in grayscale images. Human eyes are more +sensitive to observing changes between colors, so you often need to recolor your grayscale images to +get a clue about them. OpenCV now comes with various colormaps to enhance the visualization in your +computer vision application. + +In OpenCV you only need applyColorMap to apply a colormap on a given image. The following sample +code reads the path to an image from command line, applies a Jet colormap on it and shows the +result: + +@include snippets/imgproc_applyColorMap.cpp + +@see #ColormapTypes + + @defgroup imgproc_subdiv2d Planar Subdivision + +The Subdiv2D class described in this section is used to perform various planar subdivision on +a set of 2D points (represented as vector of Point2f). OpenCV subdivides a plane into triangles +using the Delaunay's algorithm, which corresponds to the dual graph of the Voronoi diagram. +In the figure below, the Delaunay's triangulation is marked with black lines and the Voronoi +diagram with red lines. + +![Delaunay triangulation (black) and Voronoi (red)](pics/delaunay_voronoi.png) + +The subdivisions can be used for the 3D piece-wise transformation of a plane, morphing, fast +location of points on the plane, building special graphs (such as NNG,RNG), and so forth. + + @defgroup imgproc_hist Histograms + @defgroup imgproc_shape Structural Analysis and Shape Descriptors + @defgroup imgproc_motion Motion Analysis and Object Tracking + @defgroup imgproc_feature Feature Detection + @defgroup imgproc_object Object Detection + @defgroup imgproc_c C API + @defgroup imgproc_hal Hardware Acceleration Layer + @{ + @defgroup imgproc_hal_functions Functions + @defgroup imgproc_hal_interface Interface + @} + @} +*/ + +namespace cv +{ + +/** @addtogroup imgproc +@{ +*/ + +//! @addtogroup imgproc_filter +//! @{ + +//! type of morphological operation +enum MorphTypes{ + MORPH_ERODE = 0, //!< see #erode + MORPH_DILATE = 1, //!< see #dilate + MORPH_OPEN = 2, //!< an opening operation + //!< \f[\texttt{dst} = \mathrm{open} ( \texttt{src} , \texttt{element} )= \mathrm{dilate} ( \mathrm{erode} ( \texttt{src} , \texttt{element} ))\f] + MORPH_CLOSE = 3, //!< a closing operation + //!< \f[\texttt{dst} = \mathrm{close} ( \texttt{src} , \texttt{element} )= \mathrm{erode} ( \mathrm{dilate} ( \texttt{src} , \texttt{element} ))\f] + MORPH_GRADIENT = 4, //!< a morphological gradient + //!< \f[\texttt{dst} = \mathrm{morph\_grad} ( \texttt{src} , \texttt{element} )= \mathrm{dilate} ( \texttt{src} , \texttt{element} )- \mathrm{erode} ( \texttt{src} , \texttt{element} )\f] + MORPH_TOPHAT = 5, //!< "top hat" + //!< \f[\texttt{dst} = \mathrm{tophat} ( \texttt{src} , \texttt{element} )= \texttt{src} - \mathrm{open} ( \texttt{src} , \texttt{element} )\f] + MORPH_BLACKHAT = 6, //!< "black hat" + //!< \f[\texttt{dst} = \mathrm{blackhat} ( \texttt{src} , \texttt{element} )= \mathrm{close} ( \texttt{src} , \texttt{element} )- \texttt{src}\f] + MORPH_HITMISS = 7 //!< "hit or miss" + //!< .- Only supported for CV_8UC1 binary images. A tutorial can be found in the documentation +}; + +//! shape of the structuring element +enum MorphShapes { + MORPH_RECT = 0, //!< a rectangular structuring element: \f[E_{ij}=1\f] + MORPH_CROSS = 1, //!< a cross-shaped structuring element: + //!< \f[E_{ij} = \begin{cases} 1 & \texttt{if } {i=\texttt{anchor.y } {or } {j=\texttt{anchor.x}}} \\0 & \texttt{otherwise} \end{cases}\f] + MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed + //!< into the rectangle Rect(0, 0, esize.width, 0.esize.height) +}; + +//! @} imgproc_filter + +//! @addtogroup imgproc_transform +//! @{ + +//! interpolation algorithm +enum InterpolationFlags{ + /** nearest neighbor interpolation */ + INTER_NEAREST = 0, + /** bilinear interpolation */ + INTER_LINEAR = 1, + /** bicubic interpolation */ + INTER_CUBIC = 2, + /** resampling using pixel area relation. It may be a preferred method for image decimation, as + it gives moire'-free results. But when the image is zoomed, it is similar to the INTER_NEAREST + method. */ + INTER_AREA = 3, + /** Lanczos interpolation over 8x8 neighborhood */ + INTER_LANCZOS4 = 4, + /** Bit exact bilinear interpolation */ + INTER_LINEAR_EXACT = 5, + /** Bit exact nearest neighbor interpolation. This will produce same results as + the nearest neighbor method in PIL, scikit-image or Matlab. */ + INTER_NEAREST_EXACT = 6, + /** mask for interpolation codes */ + INTER_MAX = 7, + /** flag, fills all of the destination image pixels. If some of them correspond to outliers in the + source image, they are set to zero */ + WARP_FILL_OUTLIERS = 8, + /** flag, inverse transformation + + For example, #linearPolar or #logPolar transforms: + - flag is __not__ set: \f$dst( \rho , \phi ) = src(x,y)\f$ + - flag is set: \f$dst(x,y) = src( \rho , \phi )\f$ + */ + WARP_INVERSE_MAP = 16 +}; + +/** \brief Specify the polar mapping mode +@sa warpPolar +*/ +enum WarpPolarMode +{ + WARP_POLAR_LINEAR = 0, ///< Remaps an image to/from polar space. + WARP_POLAR_LOG = 256 ///< Remaps an image to/from semilog-polar space. +}; + +enum InterpolationMasks { + INTER_BITS = 5, + INTER_BITS2 = INTER_BITS * 2, + INTER_TAB_SIZE = 1 << INTER_BITS, + INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE + }; + +//! @} imgproc_transform + +//! @addtogroup imgproc_misc +//! @{ + +//! Distance types for Distance Transform and M-estimators +//! @see distanceTransform, fitLine +enum DistanceTypes { + DIST_USER = -1, //!< User defined distance + DIST_L1 = 1, //!< distance = |x1-x2| + |y1-y2| + DIST_L2 = 2, //!< the simple euclidean distance + DIST_C = 3, //!< distance = max(|x1-x2|,|y1-y2|) + DIST_L12 = 4, //!< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) + DIST_FAIR = 5, //!< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 + DIST_WELSCH = 6, //!< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 + DIST_HUBER = 7 //!< distance = |x| \texttt{thresh}\)}{0}{otherwise}\f] + THRESH_BINARY_INV = 1, //!< \f[\texttt{dst} (x,y) = \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{maxval}}{otherwise}\f] + THRESH_TRUNC = 2, //!< \f[\texttt{dst} (x,y) = \fork{\texttt{threshold}}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\f] + THRESH_TOZERO = 3, //!< \f[\texttt{dst} (x,y) = \fork{\texttt{src}(x,y)}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{0}{otherwise}\f] + THRESH_TOZERO_INV = 4, //!< \f[\texttt{dst} (x,y) = \fork{0}{if \(\texttt{src}(x,y) > \texttt{thresh}\)}{\texttt{src}(x,y)}{otherwise}\f] + THRESH_MASK = 7, + THRESH_OTSU = 8, //!< flag, use Otsu algorithm to choose the optimal threshold value + THRESH_TRIANGLE = 16 //!< flag, use Triangle algorithm to choose the optimal threshold value +}; + +//! adaptive threshold algorithm +//! @see adaptiveThreshold +enum AdaptiveThresholdTypes { + /** the threshold value \f$T(x,y)\f$ is a mean of the \f$\texttt{blockSize} \times + \texttt{blockSize}\f$ neighborhood of \f$(x, y)\f$ minus C */ + ADAPTIVE_THRESH_MEAN_C = 0, + /** the threshold value \f$T(x, y)\f$ is a weighted sum (cross-correlation with a Gaussian + window) of the \f$\texttt{blockSize} \times \texttt{blockSize}\f$ neighborhood of \f$(x, y)\f$ + minus C . The default sigma (standard deviation) is used for the specified blockSize . See + #getGaussianKernel*/ + ADAPTIVE_THRESH_GAUSSIAN_C = 1 +}; + +//! cv::undistort mode +enum UndistortTypes { + PROJ_SPHERICAL_ORTHO = 0, + PROJ_SPHERICAL_EQRECT = 1 + }; + +//! class of the pixel in GrabCut algorithm +enum GrabCutClasses { + GC_BGD = 0, //!< an obvious background pixels + GC_FGD = 1, //!< an obvious foreground (object) pixel + GC_PR_BGD = 2, //!< a possible background pixel + GC_PR_FGD = 3 //!< a possible foreground pixel +}; + +//! GrabCut algorithm flags +enum GrabCutModes { + /** The function initializes the state and the mask using the provided rectangle. After that it + runs iterCount iterations of the algorithm. */ + GC_INIT_WITH_RECT = 0, + /** The function initializes the state using the provided mask. Note that GC_INIT_WITH_RECT + and GC_INIT_WITH_MASK can be combined. Then, all the pixels outside of the ROI are + automatically initialized with GC_BGD .*/ + GC_INIT_WITH_MASK = 1, + /** The value means that the algorithm should just resume. */ + GC_EVAL = 2, + /** The value means that the algorithm should just run the grabCut algorithm (a single iteration) with the fixed model */ + GC_EVAL_FREEZE_MODEL = 3 +}; + +//! distanceTransform algorithm flags +enum DistanceTransformLabelTypes { + /** each connected component of zeros in src (as well as all the non-zero pixels closest to the + connected component) will be assigned the same label */ + DIST_LABEL_CCOMP = 0, + /** each zero pixel (and all the non-zero pixels closest to it) gets its own label. */ + DIST_LABEL_PIXEL = 1 +}; + +//! floodfill algorithm flags +enum FloodFillFlags { + /** If set, the difference between the current pixel and seed pixel is considered. Otherwise, + the difference between neighbor pixels is considered (that is, the range is floating). */ + FLOODFILL_FIXED_RANGE = 1 << 16, + /** If set, the function does not change the image ( newVal is ignored), and only fills the + mask with the value specified in bits 8-16 of flags as described above. This option only make + sense in function variants that have the mask parameter. */ + FLOODFILL_MASK_ONLY = 1 << 17 +}; + +//! @} imgproc_misc + +//! @addtogroup imgproc_shape +//! @{ + +//! connected components statistics +enum ConnectedComponentsTypes { + CC_STAT_LEFT = 0, //!< The leftmost (x) coordinate which is the inclusive start of the bounding + //!< box in the horizontal direction. + CC_STAT_TOP = 1, //!< The topmost (y) coordinate which is the inclusive start of the bounding + //!< box in the vertical direction. + CC_STAT_WIDTH = 2, //!< The horizontal size of the bounding box + CC_STAT_HEIGHT = 3, //!< The vertical size of the bounding box + CC_STAT_AREA = 4, //!< The total area (in pixels) of the connected component +#ifndef CV_DOXYGEN + CC_STAT_MAX = 5 //!< Max enumeration value. Used internally only for memory allocation +#endif +}; + +//! connected components algorithm +enum ConnectedComponentsAlgorithmsTypes { + CCL_DEFAULT = -1, //!< BBDT @cite Grana2010 algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity. The parallel implementation described in @cite Bolelli2017 is available for both BBDT and SAUF. + CCL_WU = 0, //!< SAUF @cite Wu2009 algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity. The parallel implementation described in @cite Bolelli2017 is available for SAUF. + CCL_GRANA = 1, //!< BBDT @cite Grana2010 algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity. The parallel implementation described in @cite Bolelli2017 is available for both BBDT and SAUF. + CCL_BOLELLI = 2, //!< Spaghetti @cite Bolelli2019 algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity. + CCL_SAUF = 3, //!< Same as CCL_WU. It is preferable to use the flag with the name of the algorithm (CCL_SAUF) rather than the one with the name of the first author (CCL_WU). + CCL_BBDT = 4, //!< Same as CCL_GRANA. It is preferable to use the flag with the name of the algorithm (CCL_BBDT) rather than the one with the name of the first author (CCL_GRANA). + CCL_SPAGHETTI = 5, //!< Same as CCL_BOLELLI. It is preferable to use the flag with the name of the algorithm (CCL_SPAGHETTI) rather than the one with the name of the first author (CCL_BOLELLI). +}; + +//! mode of the contour retrieval algorithm +enum RetrievalModes { + /** retrieves only the extreme outer contours. It sets `hierarchy[i][2]=hierarchy[i][3]=-1` for + all the contours. */ + RETR_EXTERNAL = 0, + /** retrieves all of the contours without establishing any hierarchical relationships. */ + RETR_LIST = 1, + /** retrieves all of the contours and organizes them into a two-level hierarchy. At the top + level, there are external boundaries of the components. At the second level, there are + boundaries of the holes. If there is another contour inside a hole of a connected component, it + is still put at the top level. */ + RETR_CCOMP = 2, + /** retrieves all of the contours and reconstructs a full hierarchy of nested contours.*/ + RETR_TREE = 3, + RETR_FLOODFILL = 4 //!< +}; + +//! the contour approximation algorithm +enum ContourApproximationModes { + /** stores absolutely all the contour points. That is, any 2 subsequent points (x1,y1) and + (x2,y2) of the contour will be either horizontal, vertical or diagonal neighbors, that is, + max(abs(x1-x2),abs(y2-y1))==1. */ + CHAIN_APPROX_NONE = 1, + /** compresses horizontal, vertical, and diagonal segments and leaves only their end points. + For example, an up-right rectangular contour is encoded with 4 points. */ + CHAIN_APPROX_SIMPLE = 2, + /** applies one of the flavors of the Teh-Chin chain approximation algorithm @cite TehChin89 */ + CHAIN_APPROX_TC89_L1 = 3, + /** applies one of the flavors of the Teh-Chin chain approximation algorithm @cite TehChin89 */ + CHAIN_APPROX_TC89_KCOS = 4 +}; + +/** @brief Shape matching methods + +\f$A\f$ denotes object1,\f$B\f$ denotes object2 + +\f$\begin{array}{l} m^A_i = \mathrm{sign} (h^A_i) \cdot \log{h^A_i} \\ m^B_i = \mathrm{sign} (h^B_i) \cdot \log{h^B_i} \end{array}\f$ + +and \f$h^A_i, h^B_i\f$ are the Hu moments of \f$A\f$ and \f$B\f$ , respectively. +*/ +enum ShapeMatchModes { + CONTOURS_MATCH_I1 =1, //!< \f[I_1(A,B) = \sum _{i=1...7} \left | \frac{1}{m^A_i} - \frac{1}{m^B_i} \right |\f] + CONTOURS_MATCH_I2 =2, //!< \f[I_2(A,B) = \sum _{i=1...7} \left | m^A_i - m^B_i \right |\f] + CONTOURS_MATCH_I3 =3 //!< \f[I_3(A,B) = \max _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }\f] +}; + +//! @} imgproc_shape + +//! @addtogroup imgproc_feature +//! @{ + +//! Variants of a Hough transform +enum HoughModes { + + /** classical or standard Hough transform. Every line is represented by two floating-point + numbers \f$(\rho, \theta)\f$ , where \f$\rho\f$ is a distance between (0,0) point and the line, + and \f$\theta\f$ is the angle between x-axis and the normal to the line. Thus, the matrix must + be (the created sequence will be) of CV_32FC2 type */ + HOUGH_STANDARD = 0, + /** probabilistic Hough transform (more efficient in case if the picture contains a few long + linear segments). It returns line segments rather than the whole line. Each segment is + represented by starting and ending points, and the matrix must be (the created sequence will + be) of the CV_32SC4 type. */ + HOUGH_PROBABILISTIC = 1, + /** multi-scale variant of the classical Hough transform. The lines are encoded the same way as + HOUGH_STANDARD. */ + HOUGH_MULTI_SCALE = 2, + HOUGH_GRADIENT = 3 //!< basically *21HT*, described in @cite Yuen90 +}; + +//! Variants of Line Segment %Detector +enum LineSegmentDetectorModes { + LSD_REFINE_NONE = 0, //!< No refinement applied + LSD_REFINE_STD = 1, //!< Standard refinement is applied. E.g. breaking arches into smaller straighter line approximations. + LSD_REFINE_ADV = 2 //!< Advanced refinement. Number of false alarms is calculated, lines are + //!< refined through increase of precision, decrement in size, etc. +}; + +//! @} imgproc_feature + +/** Histogram comparison methods + @ingroup imgproc_hist +*/ +enum HistCompMethods { + /** Correlation + \f[d(H_1,H_2) = \frac{\sum_I (H_1(I) - \bar{H_1}) (H_2(I) - \bar{H_2})}{\sqrt{\sum_I(H_1(I) - \bar{H_1})^2 \sum_I(H_2(I) - \bar{H_2})^2}}\f] + where + \f[\bar{H_k} = \frac{1}{N} \sum _J H_k(J)\f] + and \f$N\f$ is a total number of histogram bins. */ + HISTCMP_CORREL = 0, + /** Chi-Square + \f[d(H_1,H_2) = \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)}\f] */ + HISTCMP_CHISQR = 1, + /** Intersection + \f[d(H_1,H_2) = \sum _I \min (H_1(I), H_2(I))\f] */ + HISTCMP_INTERSECT = 2, + /** Bhattacharyya distance + (In fact, OpenCV computes Hellinger distance, which is related to Bhattacharyya coefficient.) + \f[d(H_1,H_2) = \sqrt{1 - \frac{1}{\sqrt{\bar{H_1} \bar{H_2} N^2}} \sum_I \sqrt{H_1(I) \cdot H_2(I)}}\f] */ + HISTCMP_BHATTACHARYYA = 3, + HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA, //!< Synonym for HISTCMP_BHATTACHARYYA + /** Alternative Chi-Square + \f[d(H_1,H_2) = 2 * \sum _I \frac{\left(H_1(I)-H_2(I)\right)^2}{H_1(I)+H_2(I)}\f] + This alternative formula is regularly used for texture comparison. See e.g. @cite Puzicha1997 */ + HISTCMP_CHISQR_ALT = 4, + /** Kullback-Leibler divergence + \f[d(H_1,H_2) = \sum _I H_1(I) \log \left(\frac{H_1(I)}{H_2(I)}\right)\f] */ + HISTCMP_KL_DIV = 5 +}; + +/** the color conversion codes +@see @ref imgproc_color_conversions +@ingroup imgproc_color_conversions + */ +enum ColorConversionCodes { + COLOR_BGR2BGRA = 0, //!< add alpha channel to RGB or BGR image + COLOR_RGB2RGBA = COLOR_BGR2BGRA, + + COLOR_BGRA2BGR = 1, //!< remove alpha channel from RGB or BGR image + COLOR_RGBA2RGB = COLOR_BGRA2BGR, + + COLOR_BGR2RGBA = 2, //!< convert between RGB and BGR color spaces (with or without alpha channel) + COLOR_RGB2BGRA = COLOR_BGR2RGBA, + + COLOR_RGBA2BGR = 3, + COLOR_BGRA2RGB = COLOR_RGBA2BGR, + + COLOR_BGR2RGB = 4, + COLOR_RGB2BGR = COLOR_BGR2RGB, + + COLOR_BGRA2RGBA = 5, + COLOR_RGBA2BGRA = COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY = 6, //!< convert between RGB/BGR and grayscale, @ref color_convert_rgb_gray "color conversions" + COLOR_RGB2GRAY = 7, + COLOR_GRAY2BGR = 8, + COLOR_GRAY2RGB = COLOR_GRAY2BGR, + COLOR_GRAY2BGRA = 9, + COLOR_GRAY2RGBA = COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY = 10, + COLOR_RGBA2GRAY = 11, + + COLOR_BGR2BGR565 = 12, //!< convert between RGB/BGR and BGR565 (16-bit images) + COLOR_RGB2BGR565 = 13, + COLOR_BGR5652BGR = 14, + COLOR_BGR5652RGB = 15, + COLOR_BGRA2BGR565 = 16, + COLOR_RGBA2BGR565 = 17, + COLOR_BGR5652BGRA = 18, + COLOR_BGR5652RGBA = 19, + + COLOR_GRAY2BGR565 = 20, //!< convert between grayscale to BGR565 (16-bit images) + COLOR_BGR5652GRAY = 21, + + COLOR_BGR2BGR555 = 22, //!< convert between RGB/BGR and BGR555 (16-bit images) + COLOR_RGB2BGR555 = 23, + COLOR_BGR5552BGR = 24, + COLOR_BGR5552RGB = 25, + COLOR_BGRA2BGR555 = 26, + COLOR_RGBA2BGR555 = 27, + COLOR_BGR5552BGRA = 28, + COLOR_BGR5552RGBA = 29, + + COLOR_GRAY2BGR555 = 30, //!< convert between grayscale and BGR555 (16-bit images) + COLOR_BGR5552GRAY = 31, + + COLOR_BGR2XYZ = 32, //!< convert RGB/BGR to CIE XYZ, @ref color_convert_rgb_xyz "color conversions" + COLOR_RGB2XYZ = 33, + COLOR_XYZ2BGR = 34, + COLOR_XYZ2RGB = 35, + + COLOR_BGR2YCrCb = 36, //!< convert RGB/BGR to luma-chroma (aka YCC), @ref color_convert_rgb_ycrcb "color conversions" + COLOR_RGB2YCrCb = 37, + COLOR_YCrCb2BGR = 38, + COLOR_YCrCb2RGB = 39, + + COLOR_BGR2HSV = 40, //!< convert RGB/BGR to HSV (hue saturation value) with H range 0..180 if 8 bit image, @ref color_convert_rgb_hsv "color conversions" + COLOR_RGB2HSV = 41, + + COLOR_BGR2Lab = 44, //!< convert RGB/BGR to CIE Lab, @ref color_convert_rgb_lab "color conversions" + COLOR_RGB2Lab = 45, + + COLOR_BGR2Luv = 50, //!< convert RGB/BGR to CIE Luv, @ref color_convert_rgb_luv "color conversions" + COLOR_RGB2Luv = 51, + COLOR_BGR2HLS = 52, //!< convert RGB/BGR to HLS (hue lightness saturation) with H range 0..180 if 8 bit image, @ref color_convert_rgb_hls "color conversions" + COLOR_RGB2HLS = 53, + + COLOR_HSV2BGR = 54, //!< backward conversions HSV to RGB/BGR with H range 0..180 if 8 bit image + COLOR_HSV2RGB = 55, + + COLOR_Lab2BGR = 56, + COLOR_Lab2RGB = 57, + COLOR_Luv2BGR = 58, + COLOR_Luv2RGB = 59, + COLOR_HLS2BGR = 60, //!< backward conversions HLS to RGB/BGR with H range 0..180 if 8 bit image + COLOR_HLS2RGB = 61, + + COLOR_BGR2HSV_FULL = 66, //!< convert RGB/BGR to HSV (hue saturation value) with H range 0..255 if 8 bit image, @ref color_convert_rgb_hsv "color conversions" + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, //!< convert RGB/BGR to HLS (hue lightness saturation) with H range 0..255 if 8 bit image, @ref color_convert_rgb_hls "color conversions" + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, //!< backward conversions HSV to RGB/BGR with H range 0..255 if 8 bit image + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, //!< backward conversions HLS to RGB/BGR with H range 0..255 if 8 bit image + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, //!< convert between RGB/BGR and YUV + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + //! YUV 4:2:0 family to RGB + COLOR_YUV2RGB_NV12 = 90, + COLOR_YUV2BGR_NV12 = 91, + COLOR_YUV2RGB_NV21 = 92, + COLOR_YUV2BGR_NV21 = 93, + COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, + COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, + + COLOR_YUV2RGBA_NV12 = 94, + COLOR_YUV2BGRA_NV12 = 95, + COLOR_YUV2RGBA_NV21 = 96, + COLOR_YUV2BGRA_NV21 = 97, + COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, + COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, + + COLOR_YUV2RGB_YV12 = 98, + COLOR_YUV2BGR_YV12 = 99, + COLOR_YUV2RGB_IYUV = 100, + COLOR_YUV2BGR_IYUV = 101, + COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, + COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, + COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, + COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, + + COLOR_YUV2RGBA_YV12 = 102, + COLOR_YUV2BGRA_YV12 = 103, + COLOR_YUV2RGBA_IYUV = 104, + COLOR_YUV2BGRA_IYUV = 105, + COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, + COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, + COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, + COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, + + COLOR_YUV2GRAY_420 = 106, + COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, + COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, + COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, + + //! YUV 4:2:2 family to RGB + COLOR_YUV2RGB_UYVY = 107, + COLOR_YUV2BGR_UYVY = 108, + //COLOR_YUV2RGB_VYUY = 109, + //COLOR_YUV2BGR_VYUY = 110, + COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, + COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, + + COLOR_YUV2RGBA_UYVY = 111, + COLOR_YUV2BGRA_UYVY = 112, + //COLOR_YUV2RGBA_VYUY = 113, + //COLOR_YUV2BGRA_VYUY = 114, + COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, + COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, + + COLOR_YUV2RGB_YUY2 = 115, + COLOR_YUV2BGR_YUY2 = 116, + COLOR_YUV2RGB_YVYU = 117, + COLOR_YUV2BGR_YVYU = 118, + COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, + COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, + + COLOR_YUV2RGBA_YUY2 = 119, + COLOR_YUV2BGRA_YUY2 = 120, + COLOR_YUV2RGBA_YVYU = 121, + COLOR_YUV2BGRA_YVYU = 122, + COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, + COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, + + COLOR_YUV2GRAY_UYVY = 123, + COLOR_YUV2GRAY_YUY2 = 124, + //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, + + //! alpha premultiplication + COLOR_RGBA2mRGBA = 125, + COLOR_mRGBA2RGBA = 126, + + //! RGB to YUV 4:2:0 family + COLOR_RGB2YUV_I420 = 127, + COLOR_BGR2YUV_I420 = 128, + COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, + COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, + + COLOR_RGBA2YUV_I420 = 129, + COLOR_BGRA2YUV_I420 = 130, + COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, + COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, + COLOR_RGB2YUV_YV12 = 131, + COLOR_BGR2YUV_YV12 = 132, + COLOR_RGBA2YUV_YV12 = 133, + COLOR_BGRA2YUV_YV12 = 134, + + //! Demosaicing + COLOR_BayerBG2BGR = 46, + COLOR_BayerGB2BGR = 47, + COLOR_BayerRG2BGR = 48, + COLOR_BayerGR2BGR = 49, + + COLOR_BayerBG2RGB = COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB = COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB = COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB = COLOR_BayerGB2BGR, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + //! Demosaicing using Variable Number of Gradients + COLOR_BayerBG2BGR_VNG = 62, + COLOR_BayerGB2BGR_VNG = 63, + COLOR_BayerRG2BGR_VNG = 64, + COLOR_BayerGR2BGR_VNG = 65, + + COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG, + + //! Edge-Aware Demosaicing + COLOR_BayerBG2BGR_EA = 135, + COLOR_BayerGB2BGR_EA = 136, + COLOR_BayerRG2BGR_EA = 137, + COLOR_BayerGR2BGR_EA = 138, + + COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA, + COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA, + COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA, + COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA, + + //! Demosaicing with alpha channel + COLOR_BayerBG2BGRA = 139, + COLOR_BayerGB2BGRA = 140, + COLOR_BayerRG2BGRA = 141, + COLOR_BayerGR2BGRA = 142, + + COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA, + COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA, + COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA, + COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA, + + COLOR_COLORCVT_MAX = 143 +}; + +//! @addtogroup imgproc_shape +//! @{ + +//! types of intersection between rectangles +enum RectanglesIntersectTypes { + INTERSECT_NONE = 0, //!< No intersection + INTERSECT_PARTIAL = 1, //!< There is a partial intersection + INTERSECT_FULL = 2 //!< One of the rectangle is fully enclosed in the other +}; + +/** @brief finds arbitrary template in the grayscale image using Generalized Hough Transform +*/ +class CV_EXPORTS_W GeneralizedHough : public Algorithm +{ +public: + //! set template to search + CV_WRAP virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0; + CV_WRAP virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0; + + //! find template on image + CV_WRAP virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0; + CV_WRAP virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0; + + //! Canny low threshold. + CV_WRAP virtual void setCannyLowThresh(int cannyLowThresh) = 0; + CV_WRAP virtual int getCannyLowThresh() const = 0; + + //! Canny high threshold. + CV_WRAP virtual void setCannyHighThresh(int cannyHighThresh) = 0; + CV_WRAP virtual int getCannyHighThresh() const = 0; + + //! Minimum distance between the centers of the detected objects. + CV_WRAP virtual void setMinDist(double minDist) = 0; + CV_WRAP virtual double getMinDist() const = 0; + + //! Inverse ratio of the accumulator resolution to the image resolution. + CV_WRAP virtual void setDp(double dp) = 0; + CV_WRAP virtual double getDp() const = 0; + + //! Maximal size of inner buffers. + CV_WRAP virtual void setMaxBufferSize(int maxBufferSize) = 0; + CV_WRAP virtual int getMaxBufferSize() const = 0; +}; + +/** @brief finds arbitrary template in the grayscale image using Generalized Hough Transform + +Detects position only without translation and rotation @cite Ballard1981 . +*/ +class CV_EXPORTS_W GeneralizedHoughBallard : public GeneralizedHough +{ +public: + //! R-Table levels. + CV_WRAP virtual void setLevels(int levels) = 0; + CV_WRAP virtual int getLevels() const = 0; + + //! The accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected. + CV_WRAP virtual void setVotesThreshold(int votesThreshold) = 0; + CV_WRAP virtual int getVotesThreshold() const = 0; +}; + +/** @brief finds arbitrary template in the grayscale image using Generalized Hough Transform + +Detects position, translation and rotation @cite Guil1999 . +*/ +class CV_EXPORTS_W GeneralizedHoughGuil : public GeneralizedHough +{ +public: + //! Angle difference in degrees between two points in feature. + CV_WRAP virtual void setXi(double xi) = 0; + CV_WRAP virtual double getXi() const = 0; + + //! Feature table levels. + CV_WRAP virtual void setLevels(int levels) = 0; + CV_WRAP virtual int getLevels() const = 0; + + //! Maximal difference between angles that treated as equal. + CV_WRAP virtual void setAngleEpsilon(double angleEpsilon) = 0; + CV_WRAP virtual double getAngleEpsilon() const = 0; + + //! Minimal rotation angle to detect in degrees. + CV_WRAP virtual void setMinAngle(double minAngle) = 0; + CV_WRAP virtual double getMinAngle() const = 0; + + //! Maximal rotation angle to detect in degrees. + CV_WRAP virtual void setMaxAngle(double maxAngle) = 0; + CV_WRAP virtual double getMaxAngle() const = 0; + + //! Angle step in degrees. + CV_WRAP virtual void setAngleStep(double angleStep) = 0; + CV_WRAP virtual double getAngleStep() const = 0; + + //! Angle votes threshold. + CV_WRAP virtual void setAngleThresh(int angleThresh) = 0; + CV_WRAP virtual int getAngleThresh() const = 0; + + //! Minimal scale to detect. + CV_WRAP virtual void setMinScale(double minScale) = 0; + CV_WRAP virtual double getMinScale() const = 0; + + //! Maximal scale to detect. + CV_WRAP virtual void setMaxScale(double maxScale) = 0; + CV_WRAP virtual double getMaxScale() const = 0; + + //! Scale step. + CV_WRAP virtual void setScaleStep(double scaleStep) = 0; + CV_WRAP virtual double getScaleStep() const = 0; + + //! Scale votes threshold. + CV_WRAP virtual void setScaleThresh(int scaleThresh) = 0; + CV_WRAP virtual int getScaleThresh() const = 0; + + //! Position votes threshold. + CV_WRAP virtual void setPosThresh(int posThresh) = 0; + CV_WRAP virtual int getPosThresh() const = 0; +}; + +//! @} imgproc_shape + +//! @addtogroup imgproc_hist +//! @{ + +/** @brief Base class for Contrast Limited Adaptive Histogram Equalization. +*/ +class CV_EXPORTS_W CLAHE : public Algorithm +{ +public: + /** @brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization. + + @param src Source image of type CV_8UC1 or CV_16UC1. + @param dst Destination image. + */ + CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0; + + /** @brief Sets threshold for contrast limiting. + + @param clipLimit threshold value. + */ + CV_WRAP virtual void setClipLimit(double clipLimit) = 0; + + //! Returns threshold value for contrast limiting. + CV_WRAP virtual double getClipLimit() const = 0; + + /** @brief Sets size of grid for histogram equalization. Input image will be divided into + equally sized rectangular tiles. + + @param tileGridSize defines the number of tiles in row and column. + */ + CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0; + + //!@brief Returns Size defines the number of tiles in row and column. + CV_WRAP virtual Size getTilesGridSize() const = 0; + + CV_WRAP virtual void collectGarbage() = 0; +}; + +//! @} imgproc_hist + +//! @addtogroup imgproc_subdiv2d +//! @{ + +class CV_EXPORTS_W Subdiv2D +{ +public: + /** Subdiv2D point location cases */ + enum { PTLOC_ERROR = -2, //!< Point location error + PTLOC_OUTSIDE_RECT = -1, //!< Point outside the subdivision bounding rect + PTLOC_INSIDE = 0, //!< Point inside some facet + PTLOC_VERTEX = 1, //!< Point coincides with one of the subdivision vertices + PTLOC_ON_EDGE = 2 //!< Point on some edge + }; + + /** Subdiv2D edge type navigation (see: getEdge()) */ + enum { NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + /** creates an empty Subdiv2D object. + To create a new empty Delaunay subdivision you need to use the #initDelaunay function. + */ + CV_WRAP Subdiv2D(); + + /** @overload + + @param rect Rectangle that includes all of the 2D points that are to be added to the subdivision. + + The function creates an empty Delaunay subdivision where 2D points can be added using the function + insert() . All of the points to be added must be within the specified rectangle, otherwise a runtime + error is raised. + */ + CV_WRAP Subdiv2D(Rect rect); + + /** @brief Creates a new empty Delaunay subdivision + + @param rect Rectangle that includes all of the 2D points that are to be added to the subdivision. + + */ + CV_WRAP void initDelaunay(Rect rect); + + /** @brief Insert a single point into a Delaunay triangulation. + + @param pt Point to insert. + + The function inserts a single point into a subdivision and modifies the subdivision topology + appropriately. If a point with the same coordinates exists already, no new point is added. + @returns the ID of the point. + + @note If the point is outside of the triangulation specified rect a runtime error is raised. + */ + CV_WRAP int insert(Point2f pt); + + /** @brief Insert multiple points into a Delaunay triangulation. + + @param ptvec Points to insert. + + The function inserts a vector of points into a subdivision and modifies the subdivision topology + appropriately. + */ + CV_WRAP void insert(const std::vector& ptvec); + + /** @brief Returns the location of a point within a Delaunay triangulation. + + @param pt Point to locate. + @param edge Output edge that the point belongs to or is located to the right of it. + @param vertex Optional output vertex the input point coincides with. + + The function locates the input point within the subdivision and gives one of the triangle edges + or vertices. + + @returns an integer which specify one of the following five cases for point location: + - The point falls into some facet. The function returns #PTLOC_INSIDE and edge will contain one of + edges of the facet. + - The point falls onto the edge. The function returns #PTLOC_ON_EDGE and edge will contain this edge. + - The point coincides with one of the subdivision vertices. The function returns #PTLOC_VERTEX and + vertex will contain a pointer to the vertex. + - The point is outside the subdivision reference rectangle. The function returns #PTLOC_OUTSIDE_RECT + and no pointers are filled. + - One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error + processing mode is selected, #PTLOC_ERROR is returned. + */ + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + /** @brief Finds the subdivision vertex closest to the given point. + + @param pt Input point. + @param nearestPt Output subdivision vertex point. + + The function is another function that locates the input point within the subdivision. It finds the + subdivision vertex that is the closest to the input point. It is not necessarily one of vertices + of the facet containing the input point, though the facet (located using locate() ) is used as a + starting point. + + @returns vertex ID. + */ + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt = 0); + + /** @brief Returns a list of all edges. + + @param edgeList Output vector. + + The function gives each edge as a 4 numbers vector, where each two are one of the edge + vertices. i.e. org_x = v[0], org_y = v[1], dst_x = v[2], dst_y = v[3]. + */ + CV_WRAP void getEdgeList(CV_OUT std::vector& edgeList) const; + + /** @brief Returns a list of the leading edge ID connected to each triangle. + + @param leadingEdgeList Output vector. + + The function gives one edge ID for each triangle. + */ + CV_WRAP void getLeadingEdgeList(CV_OUT std::vector& leadingEdgeList) const; + + /** @brief Returns a list of all triangles. + + @param triangleList Output vector. + + The function gives each triangle as a 6 numbers vector, where each two are one of the triangle + vertices. i.e. p1_x = v[0], p1_y = v[1], p2_x = v[2], p2_y = v[3], p3_x = v[4], p3_y = v[5]. + */ + CV_WRAP void getTriangleList(CV_OUT std::vector& triangleList) const; + + /** @brief Returns a list of all Voronoi facets. + + @param idx Vector of vertices IDs to consider. For all vertices you can pass empty vector. + @param facetList Output vector of the Voronoi facets. + @param facetCenters Output vector of the Voronoi facets center points. + + */ + CV_WRAP void getVoronoiFacetList(const std::vector& idx, CV_OUT std::vector >& facetList, + CV_OUT std::vector& facetCenters); + + /** @brief Returns vertex location from vertex ID. + + @param vertex vertex ID. + @param firstEdge Optional. The first edge ID which is connected to the vertex. + @returns vertex (x,y) + + */ + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge = 0) const; + + /** @brief Returns one of the edges related to the given edge. + + @param edge Subdivision edge ID. + @param nextEdgeType Parameter specifying which of the related edges to return. + The following values are possible: + - NEXT_AROUND_ORG next around the edge origin ( eOnext on the picture below if e is the input edge) + - NEXT_AROUND_DST next around the edge vertex ( eDnext ) + - PREV_AROUND_ORG previous around the edge origin (reversed eRnext ) + - PREV_AROUND_DST previous around the edge destination (reversed eLnext ) + - NEXT_AROUND_LEFT next around the left facet ( eLnext ) + - NEXT_AROUND_RIGHT next around the right facet ( eRnext ) + - PREV_AROUND_LEFT previous around the left facet (reversed eOnext ) + - PREV_AROUND_RIGHT previous around the right facet (reversed eDnext ) + + ![sample output](pics/quadedge.png) + + @returns edge ID related to the input edge. + */ + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + + /** @brief Returns next edge around the edge origin. + + @param edge Subdivision edge ID. + + @returns an integer which is next edge ID around the edge origin: eOnext on the + picture above if e is the input edge). + */ + CV_WRAP int nextEdge(int edge) const; + + /** @brief Returns another edge of the same quad-edge. + + @param edge Subdivision edge ID. + @param rotate Parameter specifying which of the edges of the same quad-edge as the input + one to return. The following values are possible: + - 0 - the input edge ( e on the picture below if e is the input edge) + - 1 - the rotated edge ( eRot ) + - 2 - the reversed edge (reversed e (in green)) + - 3 - the reversed rotated edge (reversed eRot (in green)) + + @returns one of the edges ID of the same quad-edge as the input edge. + */ + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + + /** @brief Returns the edge origin. + + @param edge Subdivision edge ID. + @param orgpt Output vertex location. + + @returns vertex ID. + */ + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt = 0) const; + + /** @brief Returns the edge destination. + + @param edge Subdivision edge ID. + @param dstpt Output vertex location. + + @returns vertex ID. + */ + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt = 0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge = 0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void checkSubdiv() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool isvirtual, int firstEdge=0); + bool isvirtual() const; + bool isfree() const; + + int firstEdge; + int type; + Point2f pt; + }; + + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + + int next[4]; + int pt[4]; + }; + + //! All of the vertices + std::vector vtx; + //! All of the edges + std::vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + //! Top left corner of the bounding rect + Point2f topLeft; + //! Bottom right corner of the bounding rect + Point2f bottomRight; +}; + +//! @} imgproc_subdiv2d + +//! @addtogroup imgproc_feature +//! @{ + +/** @example samples/cpp/lsd_lines.cpp +An example using the LineSegmentDetector +\image html building_lsd.png "Sample output image" width=434 height=300 +*/ + +/** @brief Line segment detector class + +following the algorithm described at @cite Rafael12 . + +@note Implementation has been removed from OpenCV version 3.4.6 to 3.4.15 and version 4.1.0 to 4.5.3 due original code license conflict. +restored again after [Computation of a NFA](https://github.com/rafael-grompone-von-gioi/binomial_nfa) code published under the MIT license. +*/ +class CV_EXPORTS_W LineSegmentDetector : public Algorithm +{ +public: + + /** @brief Finds lines in the input image. + + This is the output of the default parameters of the algorithm on the above shown image. + + ![image](pics/building_lsd.png) + + @param image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use: + `lsd_ptr-\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);` + @param lines A vector of Vec4f elements specifying the beginning and ending point of a line. Where + Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly + oriented depending on the gradient. + @param width Vector of widths of the regions, where the lines are found. E.g. Width of line. + @param prec Vector of precisions with which the lines are found. + @param nfa Vector containing number of false alarms in the line region, with precision of 10%. The + bigger the value, logarithmically better the detection. + - -1 corresponds to 10 mean false alarms + - 0 corresponds to 1 mean false alarm + - 1 corresponds to 0.1 mean false alarms + This vector will be calculated only when the objects type is #LSD_REFINE_ADV. + */ + CV_WRAP virtual void detect(InputArray image, OutputArray lines, + OutputArray width = noArray(), OutputArray prec = noArray(), + OutputArray nfa = noArray()) = 0; + + /** @brief Draws the line segments on a given image. + @param image The image, where the lines will be drawn. Should be bigger or equal to the image, + where the lines were found. + @param lines A vector of the lines that needed to be drawn. + */ + CV_WRAP virtual void drawSegments(InputOutputArray image, InputArray lines) = 0; + + /** @brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels. + + @param size The size of the image, where lines1 and lines2 were found. + @param lines1 The first group of lines that needs to be drawn. It is visualized in blue color. + @param lines2 The second group of lines. They visualized in red color. + @param image Optional image, where the lines will be drawn. The image should be color(3-channel) + in order for lines1 and lines2 to be drawn in the above mentioned colors. + */ + CV_WRAP virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray image = noArray()) = 0; + + virtual ~LineSegmentDetector() { } +}; + +/** @brief Creates a smart pointer to a LineSegmentDetector object and initializes it. + +The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want +to edit those, as to tailor it for their own application. + +@param refine The way found lines will be refined, see #LineSegmentDetectorModes +@param scale The scale of the image that will be used to find the lines. Range (0..1]. +@param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. +@param quant Bound to the quantization error on the gradient norm. +@param ang_th Gradient angle tolerance in degrees. +@param log_eps Detection threshold: -log10(NFA) \> log_eps. Used only when advance refinement is chosen. +@param density_th Minimal density of aligned region points in the enclosing rectangle. +@param n_bins Number of bins in pseudo-ordering of gradient modulus. + */ +CV_EXPORTS_W Ptr createLineSegmentDetector( + int refine = LSD_REFINE_STD, double scale = 0.8, + double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, + double log_eps = 0, double density_th = 0.7, int n_bins = 1024); + +//! @} imgproc_feature + +//! @addtogroup imgproc_filter +//! @{ + +/** @brief Returns Gaussian filter coefficients. + +The function computes and returns the \f$\texttt{ksize} \times 1\f$ matrix of Gaussian filter +coefficients: + +\f[G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\f] + +where \f$i=0..\texttt{ksize}-1\f$ and \f$\alpha\f$ is the scale factor chosen so that \f$\sum_i G_i=1\f$. + +Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize +smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly. +You may also use the higher-level GaussianBlur. +@param ksize Aperture size. It should be odd ( \f$\texttt{ksize} \mod 2 = 1\f$ ) and positive. +@param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as +`sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8`. +@param ktype Type of filter coefficients. It can be CV_32F or CV_64F . +@sa sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur + */ +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype = CV_64F ); + +/** @brief Returns filter coefficients for computing spatial image derivatives. + +The function computes and returns the filter coefficients for spatial image derivatives. When +`ksize=CV_SCHARR`, the Scharr \f$3 \times 3\f$ kernels are generated (see #Scharr). Otherwise, Sobel +kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to + +@param kx Output matrix of row filter coefficients. It has the type ktype . +@param ky Output matrix of column filter coefficients. It has the type ktype . +@param dx Derivative order in respect of x. +@param dy Derivative order in respect of y. +@param ksize Aperture size. It can be CV_SCHARR, 1, 3, 5, or 7. +@param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not. +Theoretically, the coefficients should have the denominator \f$=2^{ksize*2-dx-dy-2}\f$. If you are +going to filter floating-point images, you are likely to use the normalized kernels. But if you +compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve +all the fractional bits, you may want to set normalize=false . +@param ktype Type of filter coefficients. It can be CV_32f or CV_64F . + */ +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize = false, int ktype = CV_32F ); + +/** @brief Returns Gabor filter coefficients. + +For more details about gabor filter equations and parameters, see: [Gabor +Filter](http://en.wikipedia.org/wiki/Gabor_filter). + +@param ksize Size of the filter returned. +@param sigma Standard deviation of the gaussian envelope. +@param theta Orientation of the normal to the parallel stripes of a Gabor function. +@param lambd Wavelength of the sinusoidal factor. +@param gamma Spatial aspect ratio. +@param psi Phase offset. +@param ktype Type of filter coefficients. It can be CV_32F or CV_64F . + */ +CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, + double gamma, double psi = CV_PI*0.5, int ktype = CV_64F ); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +/** @brief Returns a structuring element of the specified size and shape for morphological operations. + +The function constructs and returns the structuring element that can be further passed to #erode, +#dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as +the structuring element. + +@param shape Element shape that could be one of #MorphShapes +@param ksize Size of the structuring element. +@param anchor Anchor position within the element. The default value \f$(-1, -1)\f$ means that the +anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor +position. In other cases the anchor just regulates how much the result of the morphological +operation is shifted. + */ +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)); + +/** @example samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp +Sample code for simple filters +![Sample screenshot](Smoothing_Tutorial_Result_Median_Filter.jpg) +Check @ref tutorial_gausian_median_blur_bilateral_filter "the corresponding tutorial" for more details + */ + +/** @brief Blurs an image using the median filter. + +The function smoothes an image using the median filter with the \f$\texttt{ksize} \times +\texttt{ksize}\f$ aperture. Each channel of a multi-channel image is processed independently. +In-place operation is supported. + +@note The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes + +@param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be +CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U. +@param dst destination array of the same size and type as src. +@param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ... +@sa bilateralFilter, blur, boxFilter, GaussianBlur + */ +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); + +/** @brief Blurs an image using a Gaussian filter. + +The function convolves the source image with the specified Gaussian kernel. In-place filtering is +supported. + +@param src input image; the image can have any number of channels, which are processed +independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. +@param dst output image of the same size and type as src. +@param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be +positive and odd. Or, they can be zero's and then they are computed from sigma. +@param sigmaX Gaussian kernel standard deviation in X direction. +@param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be +equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, +respectively (see #getGaussianKernel for details); to fully control the result regardless of +possible future modifications of all this semantics, it is recommended to specify all of ksize, +sigmaX, and sigmaY. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. + +@sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur + */ +CV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize, + double sigmaX, double sigmaY = 0, + int borderType = BORDER_DEFAULT ); + +/** @brief Applies the bilateral filter to an image. + +The function applies bilateral filtering to the input image, as described in +http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html +bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is +very slow compared to most filters. + +_Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\< +10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very +strong effect, making the image look "cartoonish". + +_Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time +applications, and perhaps d=9 for offline applications that need heavy noise filtering. + +This filter does not work inplace. +@param src Source 8-bit or floating-point, 1-channel or 3-channel image. +@param dst Destination image of the same size and type as src . +@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, +it is computed from sigmaSpace. +@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that +farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting +in larger areas of semi-equal color. +@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that +farther pixels will influence each other as long as their colors are close enough (see sigmaColor +). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is +proportional to sigmaSpace. +@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes + */ +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType = BORDER_DEFAULT ); + +/** @brief Blurs an image using the box filter. + +The function smooths an image using the kernel: + +\f[\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\f] + +where + +\f[\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\f] + +Unnormalized box filter is useful for computing various integral characteristics over each pixel +neighborhood, such as covariance matrices of image derivatives (used in dense optical flow +algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral. + +@param src input image. +@param dst output image of the same size and type as src. +@param ddepth the output image depth (-1 to use src.depth()). +@param ksize blurring kernel size. +@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel +center. +@param normalize flag, specifying whether the kernel is normalized by its area or not. +@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. +@sa blur, bilateralFilter, GaussianBlur, medianBlur, integral + */ +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor = Point(-1,-1), + bool normalize = true, + int borderType = BORDER_DEFAULT ); + +/** @brief Calculates the normalized sum of squares of the pixel values overlapping the filter. + +For every pixel \f$ (x, y) \f$ in the source image, the function calculates the sum of squares of those neighboring +pixel values which overlap the filter placed over the pixel \f$ (x, y) \f$. + +The unnormalized square box filter can be useful in computing local image statistics such as the the local +variance and standard deviation around the neighborhood of a pixel. + +@param src input image +@param dst output image of the same size and type as src +@param ddepth the output image depth (-1 to use src.depth()) +@param ksize kernel size +@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel +center. +@param normalize flag, specifying whether the kernel is to be normalized by it's area or not. +@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. +@sa boxFilter +*/ +CV_EXPORTS_W void sqrBoxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor = Point(-1, -1), + bool normalize = true, + int borderType = BORDER_DEFAULT ); + +/** @brief Blurs an image using the normalized box filter. + +The function smooths an image using the kernel: + +\f[\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\f] + +The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize, +anchor, true, borderType)`. + +@param src input image; it can have any number of channels, which are processed independently, but +the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. +@param dst output image of the same size and type as src. +@param ksize blurring kernel size. +@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel +center. +@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. +@sa boxFilter, bilateralFilter, GaussianBlur, medianBlur + */ +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor = Point(-1,-1), + int borderType = BORDER_DEFAULT ); + +/** @brief Convolves an image with the kernel. + +The function applies an arbitrary linear filter to an image. In-place operation is supported. When +the aperture is partially outside the image, the function interpolates outlier pixel values +according to the specified border mode. + +The function does actually compute correlation, not the convolution: + +\f[\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\f] + +That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip +the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows - +anchor.y - 1)`. + +The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or +larger) and the direct algorithm for small kernels. + +@param src input image. +@param dst output image of the same size and the same number of channels as src. +@param ddepth desired depth of the destination image, see @ref filter_depths "combinations" +@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point +matrix; if you want to apply different kernels to different channels, split the image into +separate color planes using split and process them individually. +@param anchor anchor of the kernel that indicates the relative position of a filtered point within +the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor +is at the kernel center. +@param delta optional value added to the filtered pixels before storing them in dst. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@sa sepFilter2D, dft, matchTemplate + */ +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor = Point(-1,-1), + double delta = 0, int borderType = BORDER_DEFAULT ); + +/** @brief Applies a separable linear filter to an image. + +The function applies a separable linear filter to the image. That is, first, every row of src is +filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D +kernel kernelY. The final result shifted by delta is stored in dst . + +@param src Source image. +@param dst Destination image of the same size and the same number of channels as src . +@param ddepth Destination image depth, see @ref filter_depths "combinations" +@param kernelX Coefficients for filtering each row. +@param kernelY Coefficients for filtering each column. +@param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor +is at the kernel center. +@param delta Value added to the filtered results before storing them. +@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@sa filter2D, Sobel, GaussianBlur, boxFilter, blur + */ +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor = Point(-1,-1), + double delta = 0, int borderType = BORDER_DEFAULT ); + +/** @example samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp +Sample code using Sobel and/or Scharr OpenCV functions to make a simple Edge Detector +![Sample screenshot](Sobel_Derivatives_Tutorial_Result.jpg) +Check @ref tutorial_sobel_derivatives "the corresponding tutorial" for more details +*/ + +/** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. + +In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to +calculate the derivative. When \f$\texttt{ksize = 1}\f$, the \f$3 \times 1\f$ or \f$1 \times 3\f$ +kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first +or the second x- or y- derivatives. + +There is also the special value `ksize = #CV_SCHARR (-1)` that corresponds to the \f$3\times3\f$ Scharr +filter that may give more accurate results than the \f$3\times3\f$ Sobel. The Scharr aperture is + +\f[\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\f] + +for the x-derivative, or transposed for the y-derivative. + +The function calculates an image derivative by convolving the image with the appropriate kernel: + +\f[\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\f] + +The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less +resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) +or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first +case corresponds to a kernel of: + +\f[\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\f] + +The second case corresponds to a kernel of: + +\f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f] + +@param src input image. +@param dst output image of the same size and the same number of channels as src . +@param ddepth output image depth, see @ref filter_depths "combinations"; in the case of + 8-bit input images it will result in truncated derivatives. +@param dx order of the derivative x. +@param dy order of the derivative y. +@param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. +@param scale optional scale factor for the computed derivative values; by default, no scaling is +applied (see #getDerivKernels for details). +@param delta optional delta value that is added to the results prior to storing them in dst. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar + */ +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize = 3, + double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +/** @brief Calculates the first order image derivative in both x and y using a Sobel operator + +Equivalent to calling: + +@code +Sobel( src, dx, CV_16SC1, 1, 0, 3 ); +Sobel( src, dy, CV_16SC1, 0, 1, 3 ); +@endcode + +@param src input image. +@param dx output image with first-order derivative in x. +@param dy output image with first-order derivative in y. +@param ksize size of Sobel kernel. It must be 3. +@param borderType pixel extrapolation method, see #BorderTypes. + Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported. + +@sa Sobel + */ + +CV_EXPORTS_W void spatialGradient( InputArray src, OutputArray dx, + OutputArray dy, int ksize = 3, + int borderType = BORDER_DEFAULT ); + +/** @brief Calculates the first x- or y- image derivative using Scharr operator. + +The function computes the first x- or y- spatial image derivative using the Scharr operator. The +call + +\f[\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\f] + +is equivalent to + +\f[\texttt{Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, borderType)} .\f] + +@param src input image. +@param dst output image of the same size and the same number of channels as src. +@param ddepth output image depth, see @ref filter_depths "combinations" +@param dx order of the derivative x. +@param dy order of the derivative y. +@param scale optional scale factor for the computed derivative values; by default, no scaling is +applied (see #getDerivKernels for details). +@param delta optional delta value that is added to the results prior to storing them in dst. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@sa cartToPolar + */ +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +/** @example samples/cpp/laplace.cpp +An example using Laplace transformations for edge detection +*/ + +/** @brief Calculates the Laplacian of an image. + +The function calculates the Laplacian of the source image by adding up the second x and y +derivatives calculated using the Sobel operator: + +\f[\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\f] + +This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image +with the following \f$3 \times 3\f$ aperture: + +\f[\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\f] + +@param src Source image. +@param dst Destination image of the same size and the same number of channels as src . +@param ddepth Desired depth of the destination image. +@param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for +details. The size must be positive and odd. +@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is +applied. See #getDerivKernels for details. +@param delta Optional delta value that is added to the results prior to storing them in dst . +@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@sa Sobel, Scharr + */ +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize = 1, double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +//! @} imgproc_filter + +//! @addtogroup imgproc_feature +//! @{ + +/** @example samples/cpp/edge.cpp +This program demonstrates usage of the Canny edge detector + +Check @ref tutorial_canny_detector "the corresponding tutorial" for more details +*/ + +/** @brief Finds edges in an image using the Canny algorithm @cite Canny86 . + +The function finds edges in the input image and marks them in the output map edges using the +Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The +largest value is used to find initial segments of strong edges. See + + +@param image 8-bit input image. +@param edges output edge map; single channels 8-bit image, which has the same size as image . +@param threshold1 first threshold for the hysteresis procedure. +@param threshold2 second threshold for the hysteresis procedure. +@param apertureSize aperture size for the Sobel operator. +@param L2gradient a flag, indicating whether a more accurate \f$L_2\f$ norm +\f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to calculate the image gradient magnitude ( +L2gradient=true ), or whether the default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough ( +L2gradient=false ). + */ +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize = 3, bool L2gradient = false ); + +/** \overload + +Finds edges in an image using the Canny algorithm with custom image gradient. + +@param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3). +@param dy 16-bit y derivative of input image (same type as dx). +@param edges output edge map; single channels 8-bit image, which has the same size as image . +@param threshold1 first threshold for the hysteresis procedure. +@param threshold2 second threshold for the hysteresis procedure. +@param L2gradient a flag, indicating whether a more accurate \f$L_2\f$ norm +\f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to calculate the image gradient magnitude ( +L2gradient=true ), or whether the default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough ( +L2gradient=false ). + */ +CV_EXPORTS_W void Canny( InputArray dx, InputArray dy, + OutputArray edges, + double threshold1, double threshold2, + bool L2gradient = false ); + +/** @brief Calculates the minimal eigenvalue of gradient matrices for corner detection. + +The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal +eigenvalue of the covariance matrix of derivatives, that is, \f$\min(\lambda_1, \lambda_2)\f$ in terms +of the formulae in the cornerEigenValsAndVecs description. + +@param src Input single-channel 8-bit or floating-point image. +@param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as +src . +@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). +@param ksize Aperture parameter for the Sobel operator. +@param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. + */ +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize = 3, + int borderType = BORDER_DEFAULT ); + +/** @brief Harris corner detector. + +The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and +cornerEigenValsAndVecs , for each pixel \f$(x, y)\f$ it calculates a \f$2\times2\f$ gradient covariance +matrix \f$M^{(x,y)}\f$ over a \f$\texttt{blockSize} \times \texttt{blockSize}\f$ neighborhood. Then, it +computes the following characteristic: + +\f[\texttt{dst} (x,y) = \mathrm{det} M^{(x,y)} - k \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\f] + +Corners in the image can be found as the local maxima of this response map. + +@param src Input single-channel 8-bit or floating-point image. +@param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same +size as src . +@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). +@param ksize Aperture parameter for the Sobel operator. +@param k Harris detector free parameter. See the formula above. +@param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. + */ +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType = BORDER_DEFAULT ); + +/** @brief Calculates eigenvalues and eigenvectors of image blocks for corner detection. + +For every pixel \f$p\f$ , the function cornerEigenValsAndVecs considers a blockSize \f$\times\f$ blockSize +neighborhood \f$S(p)\f$ . It calculates the covariation matrix of derivatives over the neighborhood as: + +\f[M = \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 & \sum _{S(p)}dI/dx dI/dy \\ \sum _{S(p)}dI/dx dI/dy & \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\f] + +where the derivatives are computed using the Sobel operator. + +After that, it finds eigenvectors and eigenvalues of \f$M\f$ and stores them in the destination image as +\f$(\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\f$ where + +- \f$\lambda_1, \lambda_2\f$ are the non-sorted eigenvalues of \f$M\f$ +- \f$x_1, y_1\f$ are the eigenvectors corresponding to \f$\lambda_1\f$ +- \f$x_2, y_2\f$ are the eigenvectors corresponding to \f$\lambda_2\f$ + +The output of the function can be used for robust edge or corner detection. + +@param src Input single-channel 8-bit or floating-point image. +@param dst Image to store the results. It has the same size as src and the type CV_32FC(6) . +@param blockSize Neighborhood size (see details below). +@param ksize Aperture parameter for the Sobel operator. +@param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. + +@sa cornerMinEigenVal, cornerHarris, preCornerDetect + */ +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType = BORDER_DEFAULT ); + +/** @brief Calculates a feature map for corner detection. + +The function calculates the complex spatial derivative-based function of the source image + +\f[\texttt{dst} = (D_x \texttt{src} )^2 \cdot D_{yy} \texttt{src} + (D_y \texttt{src} )^2 \cdot D_{xx} \texttt{src} - 2 D_x \texttt{src} \cdot D_y \texttt{src} \cdot D_{xy} \texttt{src}\f] + +where \f$D_x\f$,\f$D_y\f$ are the first image derivatives, \f$D_{xx}\f$,\f$D_{yy}\f$ are the second image +derivatives, and \f$D_{xy}\f$ is the mixed derivative. + +The corners can be found as local maximums of the functions, as shown below: +@code + Mat corners, dilated_corners; + preCornerDetect(image, corners, 3); + // dilation with 3x3 rectangular structuring element + dilate(corners, dilated_corners, Mat(), 1); + Mat corner_mask = corners == dilated_corners; +@endcode + +@param src Source single-channel 8-bit of floating-point image. +@param dst Output image that has the type CV_32F and the same size as src . +@param ksize %Aperture size of the Sobel . +@param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. + */ +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType = BORDER_DEFAULT ); + +/** @brief Refines the corner locations. + +The function iterates to find the sub-pixel accurate location of corners or radial saddle +points as described in @cite forstner1987fast, and as shown on the figure below. + +![image](pics/cornersubpix.png) + +Sub-pixel accurate corner locator is based on the observation that every vector from the center \f$q\f$ +to a point \f$p\f$ located within a neighborhood of \f$q\f$ is orthogonal to the image gradient at \f$p\f$ +subject to image and measurement noise. Consider the expression: + +\f[\epsilon _i = {DI_{p_i}}^T \cdot (q - p_i)\f] + +where \f${DI_{p_i}}\f$ is an image gradient at one of the points \f$p_i\f$ in a neighborhood of \f$q\f$ . The +value of \f$q\f$ is to be found so that \f$\epsilon_i\f$ is minimized. A system of equations may be set up +with \f$\epsilon_i\f$ set to zero: + +\f[\sum _i(DI_{p_i} \cdot {DI_{p_i}}^T) \cdot q - \sum _i(DI_{p_i} \cdot {DI_{p_i}}^T \cdot p_i)\f] + +where the gradients are summed within a neighborhood ("search window") of \f$q\f$ . Calling the first +gradient term \f$G\f$ and the second gradient term \f$b\f$ gives: + +\f[q = G^{-1} \cdot b\f] + +The algorithm sets the center of the neighborhood window at this new center \f$q\f$ and then iterates +until the center stays within a set threshold. + +@param image Input single-channel, 8-bit or float image. +@param corners Initial coordinates of the input corners and refined coordinates provided for +output. +@param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) , +then a \f$(5*2+1) \times (5*2+1) = 11 \times 11\f$ search window is used. +@param zeroZone Half of the size of the dead region in the middle of the search zone over which +the summation in the formula below is not done. It is used sometimes to avoid possible +singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such +a size. +@param criteria Criteria for termination of the iterative process of corner refinement. That is, +the process of corner position refinement stops either after criteria.maxCount iterations or when +the corner position moves by less than criteria.epsilon on some iteration. + */ +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +/** @brief Determines strong corners on an image. + +The function finds the most prominent corners in the image or in the specified image region, as +described in @cite Shi94 + +- Function calculates the corner quality measure at every source image pixel using the + #cornerMinEigenVal or #cornerHarris . +- Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are + retained). +- The corners with the minimal eigenvalue less than + \f$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\f$ are rejected. +- The remaining corners are sorted by the quality measure in the descending order. +- Function throws away each corner for which there is a stronger corner at a distance less than + maxDistance. + +The function can be used to initialize a point-based tracker of an object. + +@note If the function is called with different values A and B of the parameter qualityLevel , and +A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector +with qualityLevel=B . + +@param image Input 8-bit or floating-point 32-bit, single-channel image. +@param corners Output vector of detected corners. +@param maxCorners Maximum number of corners to return. If there are more corners than are found, +the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set +and all detected corners are returned. +@param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The +parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue +(see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the +quality measure less than the product are rejected. For example, if the best corner has the +quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure +less than 15 are rejected. +@param minDistance Minimum possible Euclidean distance between the returned corners. +@param mask Optional region of interest. If the image is not empty (it needs to have the type +CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. +@param blockSize Size of an average block for computing a derivative covariation matrix over each +pixel neighborhood. See cornerEigenValsAndVecs . +@param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris) +or #cornerMinEigenVal. +@param k Free parameter of the Harris detector. + +@sa cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, + */ + +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask = noArray(), int blockSize = 3, + bool useHarrisDetector = false, double k = 0.04 ); + +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask, int blockSize, + int gradientSize, bool useHarrisDetector = false, + double k = 0.04 ); +/** @example samples/cpp/tutorial_code/ImgTrans/houghlines.cpp +An example using the Hough line detector +![Sample input image](Hough_Lines_Tutorial_Original_Image.jpg) ![Output image](Hough_Lines_Tutorial_Result.jpg) +*/ + +/** @brief Finds lines in a binary image using the standard Hough transform. + +The function implements the standard or standard multi-scale Hough transform algorithm for line +detection. See for a good explanation of Hough +transform. + +@param image 8-bit, single-channel binary source image. The image may be modified by the function. +@param lines Output vector of lines. Each line is represented by a 2 or 3 element vector +\f$(\rho, \theta)\f$ or \f$(\rho, \theta, \textrm{votes})\f$ . \f$\rho\f$ is the distance from the coordinate origin \f$(0,0)\f$ (top-left corner of +the image). \f$\theta\f$ is the line rotation angle in radians ( +\f$0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\f$ ). +\f$\textrm{votes}\f$ is the value of accumulator. +@param rho Distance resolution of the accumulator in pixels. +@param theta Angle resolution of the accumulator in radians. +@param threshold Accumulator threshold parameter. Only those lines are returned that get enough +votes ( \f$>\texttt{threshold}\f$ ). +@param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho . +The coarse accumulator distance resolution is rho and the accurate accumulator resolution is +rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these +parameters should be positive. +@param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta. +@param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines. +Must fall between 0 and max_theta. +@param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines. +Must fall between min_theta and CV_PI. + */ +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn = 0, double stn = 0, + double min_theta = 0, double max_theta = CV_PI ); + +/** @brief Finds line segments in a binary image using the probabilistic Hough transform. + +The function implements the probabilistic Hough transform algorithm for line detection, described +in @cite Matas00 + +See the line detection example below: +@include snippets/imgproc_HoughLinesP.cpp +This is a sample picture the function parameters have been tuned for: + +![image](pics/building.jpg) + +And this is the output of the above program in case of the probabilistic Hough transform: + +![image](pics/houghp.png) + +@param image 8-bit, single-channel binary source image. The image may be modified by the function. +@param lines Output vector of lines. Each line is represented by a 4-element vector +\f$(x_1, y_1, x_2, y_2)\f$ , where \f$(x_1,y_1)\f$ and \f$(x_2, y_2)\f$ are the ending points of each detected +line segment. +@param rho Distance resolution of the accumulator in pixels. +@param theta Angle resolution of the accumulator in radians. +@param threshold Accumulator threshold parameter. Only those lines are returned that get enough +votes ( \f$>\texttt{threshold}\f$ ). +@param minLineLength Minimum line length. Line segments shorter than that are rejected. +@param maxLineGap Maximum allowed gap between points on the same line to link them. + +@sa LineSegmentDetector + */ +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength = 0, double maxLineGap = 0 ); + +/** @brief Finds lines in a set of points using the standard Hough transform. + +The function finds lines in a set of points using a modification of the Hough transform. +@include snippets/imgproc_HoughLinesPointSet.cpp +@param point Input vector of points. Each vector must be encoded as a Point vector \f$(x,y)\f$. Type must be CV_32FC2 or CV_32SC2. +@param lines Output vector of found lines. Each vector is encoded as a vector \f$(votes, rho, theta)\f$. +The larger the value of 'votes', the higher the reliability of the Hough line. +@param lines_max Max count of hough lines. +@param threshold Accumulator threshold parameter. Only those lines are returned that get enough +votes ( \f$>\texttt{threshold}\f$ ) +@param min_rho Minimum Distance value of the accumulator in pixels. +@param max_rho Maximum Distance value of the accumulator in pixels. +@param rho_step Distance resolution of the accumulator in pixels. +@param min_theta Minimum angle value of the accumulator in radians. +@param max_theta Maximum angle value of the accumulator in radians. +@param theta_step Angle resolution of the accumulator in radians. + */ +CV_EXPORTS_W void HoughLinesPointSet( InputArray point, OutputArray lines, int lines_max, int threshold, + double min_rho, double max_rho, double rho_step, + double min_theta, double max_theta, double theta_step ); + +/** @example samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp +An example using the Hough circle detector +*/ + +/** @brief Finds circles in a grayscale image using the Hough transform. + +The function finds circles in a grayscale image using a modification of the Hough transform. + +Example: : +@include snippets/imgproc_HoughLinesCircles.cpp + +@note Usually the function detects the centers of circles well. However, it may fail to find correct +radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if +you know it. Or, you may set maxRadius to a negative number to return centers only without radius +search, and find the correct radius using an additional procedure. + +@param image 8-bit, single-channel, grayscale input image. +@param circles Output vector of found circles. Each vector is encoded as 3 or 4 element +floating-point vector \f$(x, y, radius)\f$ or \f$(x, y, radius, votes)\f$ . +@param method Detection method, see #HoughModes. Currently, the only implemented method is #HOUGH_GRADIENT +@param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if +dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has +half as big width and height. +@param minDist Minimum distance between the centers of the detected circles. If the parameter is +too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is +too large, some circles may be missed. +@param param1 First method-specific parameter. In case of #HOUGH_GRADIENT , it is the higher +threshold of the two passed to the Canny edge detector (the lower one is twice smaller). +@param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT , it is the +accumulator threshold for the circle centers at the detection stage. The smaller it is, the more +false circles may be detected. Circles, corresponding to the larger accumulator values, will be +returned first. +@param minRadius Minimum circle radius. +@param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns +centers without finding the radius. + +@sa fitEllipse, minEnclosingCircle + */ +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1 = 100, double param2 = 100, + int minRadius = 0, int maxRadius = 0 ); + +//! @} imgproc_feature + +//! @addtogroup imgproc_filter +//! @{ + +/** @example samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp +Advanced morphology Transformations sample code +![Sample screenshot](Morphology_2_Tutorial_Result.jpg) +Check @ref tutorial_opening_closing_hats "the corresponding tutorial" for more details +*/ + +/** @brief Erodes an image by using a specific structuring element. + +The function erodes the source image using the specified structuring element that determines the +shape of a pixel neighborhood over which the minimum is taken: + +\f[\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\f] + +The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In +case of multi-channel images, each channel is processed independently. + +@param src input image; the number of channels can be arbitrary, but the depth should be one of +CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. +@param dst output image of the same size and type as src. +@param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular +structuring element is used. Kernel can be created using #getStructuringElement. +@param anchor position of the anchor within the element; default value (-1, -1) means that the +anchor is at the element center. +@param iterations number of times erosion is applied. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@param borderValue border value in case of a constant border +@sa dilate, morphologyEx, getStructuringElement + */ +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +/** @example samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp +Erosion and Dilation sample code +![Sample Screenshot-Erosion](Morphology_1_Tutorial_Erosion_Result.jpg)![Sample Screenshot-Dilation](Morphology_1_Tutorial_Dilation_Result.jpg) +Check @ref tutorial_erosion_dilatation "the corresponding tutorial" for more details +*/ + +/** @brief Dilates an image by using a specific structuring element. + +The function dilates the source image using the specified structuring element that determines the +shape of a pixel neighborhood over which the maximum is taken: +\f[\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\f] + +The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In +case of multi-channel images, each channel is processed independently. + +@param src input image; the number of channels can be arbitrary, but the depth should be one of +CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. +@param dst output image of the same size and type as src. +@param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular +structuring element is used. Kernel can be created using #getStructuringElement +@param anchor position of the anchor within the element; default value (-1, -1) means that the +anchor is at the element center. +@param iterations number of times dilation is applied. +@param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported. +@param borderValue border value in case of a constant border +@sa erode, morphologyEx, getStructuringElement + */ +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +/** @brief Performs advanced morphological transformations. + +The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as +basic operations. + +Any of the operations can be done in-place. In case of multi-channel images, each channel is +processed independently. + +@param src Source image. The number of channels can be arbitrary. The depth should be one of +CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. +@param dst Destination image of the same size and type as source image. +@param op Type of a morphological operation, see #MorphTypes +@param kernel Structuring element. It can be created using #getStructuringElement. +@param anchor Anchor position with the kernel. Negative values mean that the anchor is at the +kernel center. +@param iterations Number of times erosion and dilation are applied. +@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@param borderValue Border value in case of a constant border. The default value has a special +meaning. +@sa dilate, erode, getStructuringElement +@note The number of iterations is the number of times erosion or dilatation operation will be applied. +For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply +successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). + */ +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +//! @} imgproc_filter + +//! @addtogroup imgproc_transform +//! @{ + +/** @brief Resizes an image. + +The function resize resizes the image src down to or up to the specified size. Note that the +initial dst type or size are not taken into account. Instead, the size and type are derived from +the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst, +you may call the function as follows: +@code + // explicitly specify dsize=dst.size(); fx and fy will be computed from that. + resize(src, dst, dst.size(), 0, 0, interpolation); +@endcode +If you want to decimate the image by factor of 2 in each direction, you can call the function this +way: +@code + // specify fx and fy and let the function compute the destination image size. + resize(src, dst, Size(), 0.5, 0.5, interpolation); +@endcode +To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to +enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR +(faster but still looks OK). + +@param src input image. +@param dst output image; it has the size dsize (when it is non-zero) or the size computed from +src.size(), fx, and fy; the type of dst is the same as of src. +@param dsize output image size; if it equals zero (`None` in Python), it is computed as: + \f[\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\f] + Either dsize or both fx and fy must be non-zero. +@param fx scale factor along the horizontal axis; when it equals 0, it is computed as +\f[\texttt{(double)dsize.width/src.cols}\f] +@param fy scale factor along the vertical axis; when it equals 0, it is computed as +\f[\texttt{(double)dsize.height/src.rows}\f] +@param interpolation interpolation method, see #InterpolationFlags + +@sa warpAffine, warpPerspective, remap + */ +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx = 0, double fy = 0, + int interpolation = INTER_LINEAR ); + +/** @brief Applies an affine transformation to an image. + +The function warpAffine transforms the source image using the specified matrix: + +\f[\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\f] + +when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted +with #invertAffineTransform and then put in the formula above instead of M. The function cannot +operate in-place. + +@param src input image. +@param dst output image that has the size dsize and the same type as src . +@param M \f$2\times 3\f$ transformation matrix. +@param dsize size of the output image. +@param flags combination of interpolation methods (see #InterpolationFlags) and the optional +flag #WARP_INVERSE_MAP that means that M is the inverse transformation ( +\f$\texttt{dst}\rightarrow\texttt{src}\f$ ). +@param borderMode pixel extrapolation method (see #BorderTypes); when +borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to +the "outliers" in the source image are not modified by the function. +@param borderValue value used in case of a constant border; by default, it is 0. + +@sa warpPerspective, resize, remap, getRectSubPix, transform + */ +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +/** @example samples/cpp/warpPerspective_demo.cpp +An example program shows using cv::getPerspectiveTransform and cv::warpPerspective for image warping +*/ + +/** @brief Applies a perspective transformation to an image. + +The function warpPerspective transforms the source image using the specified matrix: + +\f[\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , + \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\f] + +when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert +and then put in the formula above instead of M. The function cannot operate in-place. + +@param src input image. +@param dst output image that has the size dsize and the same type as src . +@param M \f$3\times 3\f$ transformation matrix. +@param dsize size of the output image. +@param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the +optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation ( +\f$\texttt{dst}\rightarrow\texttt{src}\f$ ). +@param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE). +@param borderValue value used in case of a constant border; by default, it equals 0. + +@sa warpAffine, resize, remap, getRectSubPix, perspectiveTransform + */ +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +/** @brief Applies a generic geometrical transformation to an image. + +The function remap transforms the source image using the specified map: + +\f[\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))\f] + +where values of pixels with non-integer coordinates are computed using one of available +interpolation methods. \f$map_x\f$ and \f$map_y\f$ can be encoded as separate floating-point maps +in \f$map_1\f$ and \f$map_2\f$ respectively, or interleaved floating-point maps of \f$(x,y)\f$ in +\f$map_1\f$, or fixed-point maps created by using convertMaps. The reason you might want to +convert from floating to fixed-point representations of a map is that they can yield much faster +(\~2x) remapping operations. In the converted case, \f$map_1\f$ contains pairs (cvFloor(x), +cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients. + +This function cannot operate in-place. + +@param src Source image. +@param dst Destination image. It has the same size as map1 and the same type as src . +@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 , +CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point +representation to fixed-point for speed. +@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map +if map1 is (x,y) points), respectively. +@param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA +and #INTER_LINEAR_EXACT are not supported by this function. +@param borderMode Pixel extrapolation method (see #BorderTypes). When +borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that +corresponds to the "outliers" in the source image are not modified by the function. +@param borderValue Value used in case of a constant border. By default, it is 0. +@note +Due to current implementation limitations the size of an input and output images should be less than 32767x32767. + */ +CV_EXPORTS_W void remap( InputArray src, OutputArray dst, + InputArray map1, InputArray map2, + int interpolation, int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +/** @brief Converts image transformation maps from one representation to another. + +The function converts a pair of maps for remap from one representation to another. The following +options ( (map1.type(), map2.type()) \f$\rightarrow\f$ (dstmap1.type(), dstmap2.type()) ) are +supported: + +- \f$\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\f$. This is the +most frequently used conversion operation, in which the original floating-point maps (see remap ) +are converted to a more compact and much faster fixed-point representation. The first output array +contains the rounded coordinates and the second array (created only when nninterpolation=false ) +contains indices in the interpolation tables. + +- \f$\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\f$. The same as above but +the original maps are stored in one 2-channel matrix. + +- Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same +as the originals. + +@param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 . +@param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix), +respectively. +@param dstmap1 The first output map that has the type dstmap1type and the same size as src . +@param dstmap2 The second output map. +@param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or +CV_32FC2 . +@param nninterpolation Flag indicating whether the fixed-point maps are used for the +nearest-neighbor or for a more complex interpolation. + +@sa remap, undistort, initUndistortRectifyMap + */ +CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2, + OutputArray dstmap1, OutputArray dstmap2, + int dstmap1type, bool nninterpolation = false ); + +/** @brief Calculates an affine matrix of 2D rotation. + +The function calculates the following matrix: + +\f[\begin{bmatrix} \alpha & \beta & (1- \alpha ) \cdot \texttt{center.x} - \beta \cdot \texttt{center.y} \\ - \beta & \alpha & \beta \cdot \texttt{center.x} + (1- \alpha ) \cdot \texttt{center.y} \end{bmatrix}\f] + +where + +\f[\begin{array}{l} \alpha = \texttt{scale} \cdot \cos \texttt{angle} , \\ \beta = \texttt{scale} \cdot \sin \texttt{angle} \end{array}\f] + +The transformation maps the rotation center to itself. If this is not the target, adjust the shift. + +@param center Center of the rotation in the source image. +@param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the +coordinate origin is assumed to be the top-left corner). +@param scale Isotropic scale factor. + +@sa getAffineTransform, warpAffine, transform + */ +CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale ); + +//! returns 3x3 perspective transformation for the corresponding 4 point pairs. +CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] ); + +/** @brief Calculates an affine transform from three pairs of the corresponding points. + +The function calculates the \f$2 \times 3\f$ matrix of an affine transform so that: + +\f[\begin{bmatrix} x'_i \\ y'_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\f] + +where + +\f[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2\f] + +@param src Coordinates of triangle vertices in the source image. +@param dst Coordinates of the corresponding triangle vertices in the destination image. + +@sa warpAffine, transform + */ +CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] ); + +/** @brief Inverts an affine transformation. + +The function computes an inverse affine transformation represented by \f$2 \times 3\f$ matrix M: + +\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ a_{21} & a_{22} & b_2 \end{bmatrix}\f] + +The result is also a \f$2 \times 3\f$ matrix of the same type as M. + +@param M Original affine transformation. +@param iM Output reverse affine transformation. + */ +CV_EXPORTS_W void invertAffineTransform( InputArray M, OutputArray iM ); + +/** @brief Calculates a perspective transform from four pairs of the corresponding points. + +The function calculates the \f$3 \times 3\f$ matrix of a perspective transform so that: + +\f[\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\f] + +where + +\f[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\f] + +@param src Coordinates of quadrangle vertices in the source image. +@param dst Coordinates of the corresponding quadrangle vertices in the destination image. + +@sa findHomography, warpPerspective, perspectiveTransform + */ +CV_EXPORTS_W Mat getPerspectiveTransform( InputArray src, InputArray dst ); + +CV_EXPORTS_W Mat getAffineTransform( InputArray src, InputArray dst ); + +/** @brief Retrieves a pixel rectangle from an image with sub-pixel accuracy. + +The function getRectSubPix extracts pixels from src: + +\f[patch(x, y) = src(x + \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y + \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\f] + +where the values of the pixels at non-integer coordinates are retrieved using bilinear +interpolation. Every channel of multi-channel images is processed independently. Also +the image should be a single channel or three channel image. While the center of the +rectangle must be inside the image, parts of the rectangle may be outside. + +@param image Source image. +@param patchSize Size of the extracted patch. +@param center Floating point coordinates of the center of the extracted rectangle within the +source image. The center must be inside the image. +@param patch Extracted patch that has the size patchSize and the same number of channels as src . +@param patchType Depth of the extracted pixels. By default, they have the same depth as src . + +@sa warpAffine, warpPerspective + */ +CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize, + Point2f center, OutputArray patch, int patchType = -1 ); + +/** @example samples/cpp/polar_transforms.cpp +An example using the cv::linearPolar and cv::logPolar operations +*/ + +/** @brief Remaps an image to semilog-polar coordinates space. + +@deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG); + +@internal +Transform the source image using the following transformation (See @ref polar_remaps_reference_image "Polar remaps reference image d)"): +\f[\begin{array}{l} + dst( \rho , \phi ) = src(x,y) \\ + dst.size() \leftarrow src.size() +\end{array}\f] + +where +\f[\begin{array}{l} + I = (dx,dy) = (x - center.x,y - center.y) \\ + \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\ + \phi = Kangle \cdot \texttt{angle} (I) \\ +\end{array}\f] + +and +\f[\begin{array}{l} + M = src.cols / log_e(maxRadius) \\ + Kangle = src.rows / 2\Pi \\ +\end{array}\f] + +The function emulates the human "foveal" vision and can be used for fast scale and +rotation-invariant template matching, for object tracking and so forth. +@param src Source image +@param dst Destination image. It will have same size and type as src. +@param center The transformation center; where the output precision is maximal +@param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too. +@param flags A combination of interpolation methods, see #InterpolationFlags + +@note +- The function can not operate in-place. +- To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. + +@sa cv::linearPolar +@endinternal +*/ +CV_EXPORTS_W void logPolar( InputArray src, OutputArray dst, + Point2f center, double M, int flags ); + +/** @brief Remaps an image to polar coordinates space. + +@deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags) + +@internal +Transform the source image using the following transformation (See @ref polar_remaps_reference_image "Polar remaps reference image c)"): +\f[\begin{array}{l} + dst( \rho , \phi ) = src(x,y) \\ + dst.size() \leftarrow src.size() +\end{array}\f] + +where +\f[\begin{array}{l} + I = (dx,dy) = (x - center.x,y - center.y) \\ + \rho = Kmag \cdot \texttt{magnitude} (I) ,\\ + \phi = angle \cdot \texttt{angle} (I) +\end{array}\f] + +and +\f[\begin{array}{l} + Kx = src.cols / maxRadius \\ + Ky = src.rows / 2\Pi +\end{array}\f] + + +@param src Source image +@param dst Destination image. It will have same size and type as src. +@param center The transformation center; +@param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too. +@param flags A combination of interpolation methods, see #InterpolationFlags + +@note +- The function can not operate in-place. +- To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. + +@sa cv::logPolar +@endinternal +*/ +CV_EXPORTS_W void linearPolar( InputArray src, OutputArray dst, + Point2f center, double maxRadius, int flags ); + + +/** \brief Remaps an image to polar or semilog-polar coordinates space + +@anchor polar_remaps_reference_image +![Polar remaps reference](pics/polar_remap_doc.png) + +Transform the source image using the following transformation: +\f[ +dst(\rho , \phi ) = src(x,y) +\f] + +where +\f[ +\begin{array}{l} +\vec{I} = (x - center.x, \;y - center.y) \\ +\phi = Kangle \cdot \texttt{angle} (\vec{I}) \\ +\rho = \left\{\begin{matrix} +Klin \cdot \texttt{magnitude} (\vec{I}) & default \\ +Klog \cdot log_e(\texttt{magnitude} (\vec{I})) & if \; semilog \\ +\end{matrix}\right. +\end{array} +\f] + +and +\f[ +\begin{array}{l} +Kangle = dsize.height / 2\Pi \\ +Klin = dsize.width / maxRadius \\ +Klog = dsize.width / log_e(maxRadius) \\ +\end{array} +\f] + + +\par Linear vs semilog mapping + +Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to `flags` to specify the polar mapping mode. + +Linear is the default mode. + +The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of sight (central vision) +in contrast to peripheral vision where acuity is minor. + +\par Option on `dsize`: + +- if both values in `dsize <=0 ` (default), +the destination image will have (almost) same area of source bounding circle: +\f[\begin{array}{l} +dsize.area \leftarrow (maxRadius^2 \cdot \Pi) \\ +dsize.width = \texttt{cvRound}(maxRadius) \\ +dsize.height = \texttt{cvRound}(maxRadius \cdot \Pi) \\ +\end{array}\f] + + +- if only `dsize.height <= 0`, +the destination image area will be proportional to the bounding circle area but scaled by `Kx * Kx`: +\f[\begin{array}{l} +dsize.height = \texttt{cvRound}(dsize.width \cdot \Pi) \\ +\end{array} +\f] + +- if both values in `dsize > 0 `, +the destination image will have the given size therefore the area of the bounding circle will be scaled to `dsize`. + + +\par Reverse mapping + +You can get reverse mapping adding #WARP_INVERSE_MAP to `flags` +\snippet polar_transforms.cpp InverseMap + +In addiction, to calculate the original coordinate from a polar mapped coordinate \f$(rho, phi)->(x, y)\f$: +\snippet polar_transforms.cpp InverseCoordinate + +@param src Source image. +@param dst Destination image. It will have same type as src. +@param dsize The destination image size (see description for valid options). +@param center The transformation center. +@param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too. +@param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode. + - Add #WARP_POLAR_LINEAR to select linear polar mapping (default) + - Add #WARP_POLAR_LOG to select semilog polar mapping + - Add #WARP_INVERSE_MAP for reverse mapping. +@note +- The function can not operate in-place. +- To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. +- This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767. + +@sa cv::remap +*/ +CV_EXPORTS_W void warpPolar(InputArray src, OutputArray dst, Size dsize, + Point2f center, double maxRadius, int flags); + + +//! @} imgproc_transform + +//! @addtogroup imgproc_misc +//! @{ + +/** @overload */ +CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 ); + +/** @overload */ +CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum, + OutputArray sqsum, int sdepth = -1, int sqdepth = -1 ); + +/** @brief Calculates the integral of an image. + +The function calculates one or more integral images for the source image as follows: + +\f[\texttt{sum} (X,Y) = \sum _{x + +Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed +with getOptimalDFTSize. + +The function performs the following equations: +- First it applies a Hanning window (see ) to each +image to remove possible edge effects. This window is cached until the array size changes to speed +up processing time. +- Next it computes the forward DFTs of each source array: +\f[\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\f] +where \f$\mathcal{F}\f$ is the forward DFT. +- It then computes the cross-power spectrum of each frequency domain array: +\f[R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\f] +- Next the cross-correlation is converted back into the time domain via the inverse DFT: +\f[r = \mathcal{F}^{-1}\{R\}\f] +- Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to +achieve sub-pixel accuracy. +\f[(\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\f] +- If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5 +centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single +peak) and will be smaller when there are multiple peaks. + +@param src1 Source floating point array (CV_32FC1 or CV_64FC1) +@param src2 Source floating point array (CV_32FC1 or CV_64FC1) +@param window Floating point array with windowing coefficients to reduce edge effects (optional). +@param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional). +@returns detected phase shift (sub-pixel) between the two arrays. + +@sa dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow + */ +CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2, + InputArray window = noArray(), CV_OUT double* response = 0); + +/** @brief This function computes a Hanning window coefficients in two dimensions. + +See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function) +for more information. + +An example is shown below: +@code + // create hanning window of size 100x100 and type CV_32F + Mat hann; + createHanningWindow(hann, Size(100, 100), CV_32F); +@endcode +@param dst Destination array to place Hann coefficients in +@param winSize The window size specifications (both width and height must be > 1) +@param type Created array type + */ +CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type); + +/** @brief Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum. + +The function cv::divSpectrums performs the per-element division of the first array by the second array. +The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform. + +@param a first input array. +@param b second input array of the same size and type as src1 . +@param c output array of the same size and type as src1 . +@param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that +each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value. +@param conjB optional flag that conjugates the second input array before the multiplication (true) +or not (false). +*/ +CV_EXPORTS_W void divSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB = false); + +//! @} imgproc_motion + +//! @addtogroup imgproc_misc +//! @{ + +/** @brief Applies a fixed-level threshold to each array element. + +The function applies fixed-level thresholding to a multiple-channel array. The function is typically +used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for +this purpose) or for removing a noise, that is, filtering out pixels with too small or too large +values. There are several types of thresholding supported by the function. They are determined by +type parameter. + +Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the +above values. In these cases, the function determines the optimal threshold value using the Otsu's +or Triangle algorithm and uses it instead of the specified thresh. + +@note Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images. + +@param src input array (multiple-channel, 8-bit or 32-bit floating point). +@param dst output array of the same size and type and the same number of channels as src. +@param thresh threshold value. +@param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding +types. +@param type thresholding type (see #ThresholdTypes). +@return the computed threshold value if Otsu's or Triangle methods used. + +@sa adaptiveThreshold, findContours, compare, min, max + */ +CV_EXPORTS_W double threshold( InputArray src, OutputArray dst, + double thresh, double maxval, int type ); + + +/** @brief Applies an adaptive threshold to an array. + +The function transforms a grayscale image to a binary image according to the formulae: +- **THRESH_BINARY** + \f[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\f] +- **THRESH_BINARY_INV** + \f[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\f] +where \f$T(x,y)\f$ is a threshold calculated individually for each pixel (see adaptiveMethod parameter). + +The function can process the image in-place. + +@param src Source 8-bit single-channel image. +@param dst Destination image of the same size and the same type as src. +@param maxValue Non-zero value assigned to the pixels for which the condition is satisfied +@param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes. +The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries. +@param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV, +see #ThresholdTypes. +@param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the +pixel: 3, 5, 7, and so on. +@param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it +is positive but may be zero or negative as well. + +@sa threshold, blur, GaussianBlur + */ +CV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst, + double maxValue, int adaptiveMethod, + int thresholdType, int blockSize, double C ); + +//! @} imgproc_misc + +//! @addtogroup imgproc_filter +//! @{ + +/** @example samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp +An example using pyrDown and pyrUp functions +*/ + +/** @brief Blurs an image and downsamples it. + +By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in +any case, the following conditions should be satisfied: + +\f[\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\f] + +The function performs the downsampling step of the Gaussian pyramid construction. First, it +convolves the source image with the kernel: + +\f[\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}\f] + +Then, it downsamples the image by rejecting even rows and columns. + +@param src input image. +@param dst output image; it has the specified size and the same type as src. +@param dstsize size of the output image. +@param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported) + */ +CV_EXPORTS_W void pyrDown( InputArray src, OutputArray dst, + const Size& dstsize = Size(), int borderType = BORDER_DEFAULT ); + +/** @brief Upsamples an image and then blurs it. + +By default, size of the output image is computed as `Size(src.cols\*2, (src.rows\*2)`, but in any +case, the following conditions should be satisfied: + +\f[\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}\f] + +The function performs the upsampling step of the Gaussian pyramid construction, though it can +actually be used to construct the Laplacian pyramid. First, it upsamples the source image by +injecting even zero rows and columns and then convolves the result with the same kernel as in +pyrDown multiplied by 4. + +@param src input image. +@param dst output image. It has the specified size and the same type as src . +@param dstsize size of the output image. +@param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported) + */ +CV_EXPORTS_W void pyrUp( InputArray src, OutputArray dst, + const Size& dstsize = Size(), int borderType = BORDER_DEFAULT ); + +/** @brief Constructs the Gaussian pyramid for an image. + +The function constructs a vector of images and builds the Gaussian pyramid by recursively applying +pyrDown to the previously built pyramid layers, starting from `dst[0]==src`. + +@param src Source image. Check pyrDown for the list of supported types. +@param dst Destination vector of maxlevel+1 images of the same type as src. dst[0] will be the +same as src. dst[1] is the next pyramid layer, a smoothed and down-sized src, and so on. +@param maxlevel 0-based index of the last (the smallest) pyramid layer. It must be non-negative. +@param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported) + */ +CV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst, + int maxlevel, int borderType = BORDER_DEFAULT ); + +//! @} imgproc_filter + +//! @addtogroup imgproc_transform +//! @{ + +/** @brief Transforms an image to compensate for lens distortion. + +The function transforms an image to compensate radial and tangential lens distortion. + +The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap +(with bilinear interpolation). See the former function for details of the transformation being +performed. + +Those pixels in the destination image, for which there is no correspondent pixels in the source +image, are filled with zeros (black color). + +A particular subset of the source image that will be visible in the corrected image can be regulated +by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate +newCameraMatrix depending on your requirements. + +The camera matrix and the distortion parameters can be determined using #calibrateCamera. If +the resolution of images is different from the resolution used at the calibration stage, \f$f_x, +f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain +the same. + +@param src Input (distorted) image. +@param dst Output (corrected) image that has the same size and type as src . +@param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as +cameraMatrix but you may additionally scale and shift the result by using a different matrix. + */ +CV_EXPORTS_W void undistort( InputArray src, OutputArray dst, + InputArray cameraMatrix, + InputArray distCoeffs, + InputArray newCameraMatrix = noArray() ); + +/** @brief Computes the undistortion and rectification transformation map. + +The function computes the joint undistortion and rectification transformation and represents the +result in the form of maps for remap. The undistorted image looks like original, as if it is +captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a +monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by +#getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera, +newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . + +Also, this new camera is oriented differently in the coordinate space, according to R. That, for +example, helps to align two heads of a stereo camera so that the epipolar lines on both images +become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera). + +The function actually builds the maps for the inverse mapping algorithm that is used by remap. That +is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function +computes the corresponding coordinates in the source image (that is, in the original image from +camera). The following process is applied: +\f[ +\begin{array}{l} +x \leftarrow (u - {c'}_x)/{f'}_x \\ +y \leftarrow (v - {c'}_y)/{f'}_y \\ +{[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\ +x' \leftarrow X/W \\ +y' \leftarrow Y/W \\ +r^2 \leftarrow x'^2 + y'^2 \\ +x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} ++ 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\ +y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} ++ p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ +s\vecthree{x'''}{y'''}{1} = +\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} +{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} +{0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\ +map_x(u,v) \leftarrow x''' f_x + c_x \\ +map_y(u,v) \leftarrow y''' f_y + c_y +\end{array} +\f] +where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +are the distortion coefficients. + +In case of a stereo camera, this function is called twice: once for each camera head, after +stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera +was not calibrated, it is still possible to compute the rectification transformations directly from +the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes +homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D +space. R can be computed from H as +\f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f] +where cameraMatrix can be chosen arbitrarily. + +@param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 , +computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation +is assumed. In cvInitUndistortMap R assumed to be an identity matrix. +@param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$. +@param size Undistorted image size. +@param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps +@param map1 The first output map. +@param map2 The second output map. + */ +CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs, + InputArray R, InputArray newCameraMatrix, + Size size, int m1type, OutputArray map1, OutputArray map2 ); + +//! initializes maps for #remap for wide-angle +CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, int destImageWidth, + int m1type, OutputArray map1, OutputArray map2, + int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0); + +/** @brief Returns the default new camera matrix. + +The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when +centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). + +In the latter case, the new camera matrix will be: + +\f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\f] + +where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively. + +By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not +move the principal point. However, when you work with stereo, it is important to move the principal +points in both views to the same y-coordinate (which is required by most of stereo correspondence +algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for +each view where the principal points are located at the center. + +@param cameraMatrix Input camera matrix. +@param imgsize Camera view image size in pixels. +@param centerPrincipalPoint Location of the principal point in the new camera matrix. The +parameter indicates whether this location should be at the image center or not. + */ +CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize = Size(), + bool centerPrincipalPoint = false ); + +/** @brief Computes the ideal point coordinates from the observed point coordinates. + +The function is similar to #undistort and #initUndistortRectifyMap but it operates on a +sparse set of points instead of a raster image. Also the function performs a reverse transformation +to projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a +planar object, it does, up to a translation vector, if the proper R is specified. + +For each observed point coordinate \f$(u, v)\f$ the function computes: +\f[ +\begin{array}{l} +x^{"} \leftarrow (u - c_x)/f_x \\ +y^{"} \leftarrow (v - c_y)/f_y \\ +(x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ +{[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ +x \leftarrow X/W \\ +y \leftarrow Y/W \\ +\text{only performed if P is specified:} \\ +u' \leftarrow x {f'}_x + {c'}_x \\ +v' \leftarrow y {f'}_y + {c'}_y +\end{array} +\f] + +where *undistort* is an approximate iterative algorithm that estimates the normalized original +point coordinates out of the normalized distorted point coordinates ("normalized" means that the +coordinates do not depend on the camera matrix). + +The function can be used for both a stereo camera head or a monocular camera (when R is empty). +@param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or +vector\ ). +@param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\ ) after undistortion and reverse perspective +transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. +@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . +@param distCoeffs Input vector of distortion coefficients +\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ +of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. +@param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by +#stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. +@param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by +#stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. + */ +CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst, + InputArray cameraMatrix, InputArray distCoeffs, + InputArray R = noArray(), InputArray P = noArray() ); +/** @overload + @note Default version of #undistortPoints does 5 iterations to compute undistorted points. + + */ +CV_EXPORTS_AS(undistortPointsIter) void undistortPoints( InputArray src, OutputArray dst, + InputArray cameraMatrix, InputArray distCoeffs, + InputArray R, InputArray P, TermCriteria criteria ); + +//! @} imgproc_transform + +//! @addtogroup imgproc_hist +//! @{ + +/** @example samples/cpp/demhist.cpp +An example for creating histograms of an image +*/ + +/** @brief Calculates a histogram of a set of arrays. + +The function cv::calcHist calculates the histogram of one or more arrays. The elements of a tuple used +to increment a histogram bin are taken from the corresponding input arrays at the same location. The +sample below shows how to compute a 2D Hue-Saturation histogram for a color image. : +@include snippets/imgproc_calcHist.cpp + +@param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the same +size. Each of them can have an arbitrary number of channels. +@param nimages Number of source images. +@param channels List of the dims channels used to compute the histogram. The first array channels +are numerated from 0 to images[0].channels()-1 , the second array channels are counted from +images[0].channels() to images[0].channels() + images[1].channels()-1, and so on. +@param mask Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size +as images[i] . The non-zero mask elements mark the array elements counted in the histogram. +@param hist Output histogram, which is a dense or sparse dims -dimensional array. +@param dims Histogram dimensionality that must be positive and not greater than CV_MAX_DIMS +(equal to 32 in the current OpenCV version). +@param histSize Array of histogram sizes in each dimension. +@param ranges Array of the dims arrays of the histogram bin boundaries in each dimension. When the +histogram is uniform ( uniform =true), then for each dimension i it is enough to specify the lower +(inclusive) boundary \f$L_0\f$ of the 0-th histogram bin and the upper (exclusive) boundary +\f$U_{\texttt{histSize}[i]-1}\f$ for the last histogram bin histSize[i]-1 . That is, in case of a +uniform histogram each of ranges[i] is an array of 2 elements. When the histogram is not uniform ( +uniform=false ), then each of ranges[i] contains histSize[i]+1 elements: +\f$L_0, U_0=L_1, U_1=L_2, ..., U_{\texttt{histSize[i]}-2}=L_{\texttt{histSize[i]}-1}, U_{\texttt{histSize[i]}-1}\f$ +. The array elements, that are not between \f$L_0\f$ and \f$U_{\texttt{histSize[i]}-1}\f$ , are not +counted in the histogram. +@param uniform Flag indicating whether the histogram is uniform or not (see above). +@param accumulate Accumulation flag. If it is set, the histogram is not cleared in the beginning +when it is allocated. This feature enables you to compute a single histogram from several sets of +arrays, or to update the histogram in time. +*/ +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform = true, bool accumulate = false ); + +/** @overload + +this variant uses %SparseMat for output +*/ +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform = true, bool accumulate = false ); + +/** @overload */ +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const std::vector& channels, + InputArray mask, OutputArray hist, + const std::vector& histSize, + const std::vector& ranges, + bool accumulate = false ); + +/** @brief Calculates the back projection of a histogram. + +The function cv::calcBackProject calculates the back project of the histogram. That is, similarly to +#calcHist , at each location (x, y) the function collects the values from the selected channels +in the input images and finds the corresponding histogram bin. But instead of incrementing it, the +function reads the bin value, scales it by scale , and stores in backProject(x,y) . In terms of +statistics, the function computes probability of each element value in respect with the empirical +probability distribution represented by the histogram. See how, for example, you can find and track +a bright-colored object in a scene: + +- Before tracking, show the object to the camera so that it covers almost the whole frame. +Calculate a hue histogram. The histogram may have strong maximums, corresponding to the dominant +colors in the object. + +- When tracking, calculate a back projection of a hue plane of each input video frame using that +pre-computed histogram. Threshold the back projection to suppress weak colors. It may also make +sense to suppress pixels with non-sufficient color saturation and too dark or too bright pixels. + +- Find connected components in the resulting picture and choose, for example, the largest +component. + +This is an approximate algorithm of the CamShift color object tracker. + +@param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the same +size. Each of them can have an arbitrary number of channels. +@param nimages Number of source images. +@param channels The list of channels used to compute the back projection. The number of channels +must match the histogram dimensionality. The first array channels are numerated from 0 to +images[0].channels()-1 , the second array channels are counted from images[0].channels() to +images[0].channels() + images[1].channels()-1, and so on. +@param hist Input histogram that can be dense or sparse. +@param backProject Destination back projection array that is a single-channel array of the same +size and depth as images[0] . +@param ranges Array of arrays of the histogram bin boundaries in each dimension. See #calcHist . +@param scale Optional scale factor for the output back projection. +@param uniform Flag indicating whether the histogram is uniform or not (see above). + +@sa calcHist, compareHist + */ +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale = 1, bool uniform = true ); + +/** @overload */ +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale = 1, bool uniform = true ); + +/** @overload */ +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const std::vector& channels, + InputArray hist, OutputArray dst, + const std::vector& ranges, + double scale ); + +/** @brief Compares two histograms. + +The function cv::compareHist compares two dense or two sparse histograms using the specified method. + +The function returns \f$d(H_1, H_2)\f$ . + +While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable +for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling +problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms +or more general sparse configurations of weighted points, consider using the #EMD function. + +@param H1 First compared histogram. +@param H2 Second compared histogram of the same size as H1 . +@param method Comparison method, see #HistCompMethods + */ +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +/** @overload */ +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +/** @brief Equalizes the histogram of a grayscale image. + +The function equalizes the histogram of the input image using the following algorithm: + +- Calculate the histogram \f$H\f$ for src . +- Normalize the histogram so that the sum of histogram bins is 255. +- Compute the integral of the histogram: +\f[H'_i = \sum _{0 \le j < i} H(j)\f] +- Transform the image using \f$H'\f$ as a look-up table: \f$\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\f$ + +The algorithm normalizes the brightness and increases the contrast of the image. + +@param src Source 8-bit single channel image. +@param dst Destination image of the same size and type as src . + */ +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +/** @brief Creates a smart pointer to a cv::CLAHE class and initializes it. + +@param clipLimit Threshold for contrast limiting. +@param tileGridSize Size of grid for histogram equalization. Input image will be divided into +equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. + */ +CV_EXPORTS_W Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); + +/** @brief Computes the "minimal work" distance between two weighted point configurations. + +The function computes the earth mover distance and/or a lower boundary of the distance between the +two weighted point configurations. One of the applications described in @cite RubnerSept98, +@cite Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation +problem that is solved using some modification of a simplex algorithm, thus the complexity is +exponential in the worst case, though, on average it is much faster. In the case of a real metric +the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used +to determine roughly whether the two signatures are far enough so that they cannot relate to the +same object. + +@param signature1 First signature, a \f$\texttt{size1}\times \texttt{dims}+1\f$ floating-point matrix. +Each row stores the point weight followed by the point coordinates. The matrix is allowed to have +a single column (weights only) if the user-defined cost matrix is used. The weights must be +non-negative and have at least one non-zero value. +@param signature2 Second signature of the same format as signature1 , though the number of rows +may be different. The total weights may be different. In this case an extra "dummy" point is added +to either signature1 or signature2. The weights must be non-negative and have at least one non-zero +value. +@param distType Used metric. See #DistanceTypes. +@param cost User-defined \f$\texttt{size1}\times \texttt{size2}\f$ cost matrix. Also, if a cost matrix +is used, lower boundary lowerBound cannot be calculated because it needs a metric function. +@param lowerBound Optional input/output parameter: lower boundary of a distance between the two +signatures that is a distance between mass centers. The lower boundary may not be calculated if +the user-defined cost matrix is used, the total weights of point configurations are not equal, or +if the signatures consist of weights only (the signature matrices have a single column). You +**must** initialize \*lowerBound . If the calculated distance between mass centers is greater or +equal to \*lowerBound (it means that the signatures are far enough), the function does not +calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on +return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound +should be set to 0. +@param flow Resultant \f$\texttt{size1} \times \texttt{size2}\f$ flow matrix: \f$\texttt{flow}_{i,j}\f$ is +a flow from \f$i\f$ -th point of signature1 to \f$j\f$ -th point of signature2 . + */ +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound = 0, OutputArray flow = noArray() ); + +CV_EXPORTS_AS(EMD) float wrapperEMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + CV_IN_OUT Ptr lowerBound = Ptr(), OutputArray flow = noArray() ); + +//! @} imgproc_hist + +/** @example samples/cpp/watershed.cpp +An example using the watershed algorithm +*/ + +/** @brief Performs a marker-based image segmentation using the watershed algorithm. + +The function implements one of the variants of watershed, non-parametric marker-based segmentation +algorithm, described in @cite Meyer92 . + +Before passing the image to the function, you have to roughly outline the desired regions in the +image markers with positive (\>0) indices. So, every region is represented as one or more connected +components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary +mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are "seeds" of +the future image regions. All the other pixels in markers , whose relation to the outlined regions +is not known and should be defined by the algorithm, should be set to 0's. In the function output, +each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the +regions. + +@note Any two neighbor connected components are not necessarily separated by a watershed boundary +(-1's pixels); for example, they can touch each other in the initial marker image passed to the +function. + +@param image Input 8-bit 3-channel image. +@param markers Input/output 32-bit single-channel image (map) of markers. It should have the same +size as image . + +@sa findContours + +@ingroup imgproc_misc + */ +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! @addtogroup imgproc_filter +//! @{ + +/** @brief Performs initial step of meanshift segmentation of an image. + +The function implements the filtering stage of meanshift segmentation, that is, the output of the +function is the filtered "posterized" image with color gradients and fine-grain texture flattened. +At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes +meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is +considered: + +\f[(x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}\f] + +where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively +(though, the algorithm does not depend on the color space used, so any 3-component color space can +be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector +(R',G',B') are found and they act as the neighborhood center on the next iteration: + +\f[(X,Y)~(X',Y'), (R,G,B)~(R',G',B').\f] + +After the iterations over, the color components of the initial pixel (that is, the pixel from where +the iterations started) are set to the final value (average color at the last iteration): + +\f[I(X,Y) <- (R*,G*,B*)\f] + +When maxLevel \> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is +run on the smallest layer first. After that, the results are propagated to the larger layer and the +iterations are run again only on those pixels where the layer colors differ by more than sr from the +lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the +results will be actually different from the ones obtained by running the meanshift procedure on the +whole original image (i.e. when maxLevel==0). + +@param src The source 8-bit, 3-channel image. +@param dst The destination image of the same format and the same size as the source. +@param sp The spatial window radius. +@param sr The color window radius. +@param maxLevel Maximum level of the pyramid for the segmentation. +@param termcrit Termination criteria: when to stop meanshift iterations. + */ +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel = 1, + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! @} + +//! @addtogroup imgproc_misc +//! @{ + +/** @example samples/cpp/grabcut.cpp +An example using the GrabCut algorithm +![Sample Screenshot](grabcut_output1.jpg) +*/ + +/** @brief Runs the GrabCut algorithm. + +The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut). + +@param img Input 8-bit 3-channel image. +@param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when +mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses. +@param rect ROI containing a segmented object. The pixels outside of the ROI are marked as +"obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT . +@param bgdModel Temporary array for the background model. Do not modify it while you are +processing the same image. +@param fgdModel Temporary arrays for the foreground model. Do not modify it while you are +processing the same image. +@param iterCount Number of iterations the algorithm should make before returning the result. Note +that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or +mode==GC_EVAL . +@param mode Operation mode that could be one of the #GrabCutModes + */ +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +/** @example samples/cpp/distrans.cpp +An example on using the distance transform +*/ + +/** @brief Calculates the distance to the closest zero pixel for each pixel of the source image. + +The function cv::distanceTransform calculates the approximate or precise distance from every binary +image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero. + +When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the +algorithm described in @cite Felzenszwalb04 . This algorithm is parallelized with the TBB library. + +In other cases, the algorithm @cite Borgefors86 is used. This means that for a pixel the function +finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical, +diagonal, or knight's move (the latest is available for a \f$5\times 5\f$ mask). The overall +distance is calculated as a sum of these basic distances. Since the distance function should be +symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all +the diagonal shifts must have the same cost (denoted as `b`), and all knight's moves must have the +same cost (denoted as `c`). For the #DIST_C and #DIST_L1 types, the distance is calculated +precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a +relative error (a \f$5\times 5\f$ mask gives more accurate results). For `a`,`b`, and `c`, OpenCV +uses the values suggested in the original paper: +- DIST_L1: `a = 1, b = 2` +- DIST_L2: + - `3 x 3`: `a=0.955, b=1.3693` + - `5 x 5`: `a=1, b=1.4, c=2.1969` +- DIST_C: `a = 1, b = 1` + +Typically, for a fast, coarse distance estimation #DIST_L2, a \f$3\times 3\f$ mask is used. For a +more accurate distance estimation #DIST_L2, a \f$5\times 5\f$ mask or the precise algorithm is used. +Note that both the precise and the approximate algorithms are linear on the number of pixels. + +This variant of the function does not only compute the minimum distance for each pixel \f$(x, y)\f$ +but also identifies the nearest connected component consisting of zero pixels +(labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the +component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function +automatically finds connected components of zero pixels in the input image and marks them with +distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and +marks all the zero pixels with distinct labels. + +In this mode, the complexity is still linear. That is, the function provides a very fast way to +compute the Voronoi diagram for a binary image. Currently, the second variant can use only the +approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported +yet. + +@param src 8-bit, single-channel (binary) source image. +@param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, +single-channel image of the same size as src. +@param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type +CV_32SC1 and the same size as src. +@param distanceType Type of distance, see #DistanceTypes +@param maskSize Size of the distance transform mask, see #DistanceTransformMasks. +#DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type, +the parameter is forced to 3 because a \f$3\times 3\f$ mask gives the same result as \f$5\times +5\f$ or any larger aperture. +@param labelType Type of the label array to build, see #DistanceTransformLabelTypes. + */ +CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize, + int labelType = DIST_LABEL_CCOMP ); + +/** @overload +@param src 8-bit, single-channel (binary) source image. +@param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, +single-channel image of the same size as src . +@param distanceType Type of distance, see #DistanceTypes +@param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the +#DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \f$3\times 3\f$ mask gives +the same result as \f$5\times 5\f$ or any larger aperture. +@param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for +the first variant of the function and distanceType == #DIST_L1. +*/ +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize, int dstType=CV_32F); + +/** @example samples/cpp/ffilldemo.cpp +An example using the FloodFill technique +*/ + +/** @overload + +variant without `mask` parameter +*/ +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect = 0, + Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), + int flags = 4 ); + +/** @brief Fills a connected component with the given color. + +The function cv::floodFill fills a connected component starting from the seed point with the specified +color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The +pixel at \f$(x,y)\f$ is considered to belong to the repainted domain if: + +- in case of a grayscale image and floating range +\f[\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\f] + + +- in case of a grayscale image and fixed range +\f[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\f] + + +- in case of a color image and floating range +\f[\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\f] +\f[\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\f] +and +\f[\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\f] + + +- in case of a color image and fixed range +\f[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\f] +\f[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\f] +and +\f[\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\f] + + +where \f$src(x',y')\f$ is the value of one of pixel neighbors that is already known to belong to the +component. That is, to be added to the connected component, a color/brightness of the pixel should +be close enough to: +- Color/brightness of one of its neighbors that already belong to the connected component in case +of a floating range. +- Color/brightness of the seed point in case of a fixed range. + +Use these functions to either mark a connected component with the specified color in-place, or build +a mask and then extract the contour, or copy the region to another image, and so on. + +@param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the +function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See +the details below. +@param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels +taller than image. Since this is both an input and output parameter, you must take responsibility +of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example, +an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the +mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags +as described below. Additionally, the function fills the border of the mask with ones to simplify +internal processing. It is therefore possible to use the same mask in multiple calls to the function +to make sure the filled areas do not overlap. +@param seedPoint Starting point. +@param newVal New value of the repainted domain pixels. +@param loDiff Maximal lower brightness/color difference between the currently observed pixel and +one of its neighbors belonging to the component, or a seed pixel being added to the component. +@param upDiff Maximal upper brightness/color difference between the currently observed pixel and +one of its neighbors belonging to the component, or a seed pixel being added to the component. +@param rect Optional output parameter set by the function to the minimum bounding rectangle of the +repainted domain. +@param flags Operation flags. The first 8 bits contain a connectivity value. The default value of +4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A +connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) +will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill +the mask (the default value is 1). For example, 4 | ( 255 \<\< 8 ) will consider 4 nearest +neighbours and fill the mask with a value of 255. The following additional options occupy higher +bits and therefore may be further combined with the connectivity and mask fill values using +bit-wise or (|), see #FloodFillFlags. + +@note Since the mask is larger than the filled image, a pixel \f$(x, y)\f$ in image corresponds to the +pixel \f$(x+1, y+1)\f$ in the mask . + +@sa findContours + */ +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), + int flags = 4 ); + +//! Performs linear blending of two images: +//! \f[ \texttt{dst}(i,j) = \texttt{weights1}(i,j)*\texttt{src1}(i,j) + \texttt{weights2}(i,j)*\texttt{src2}(i,j) \f] +//! @param src1 It has a type of CV_8UC(n) or CV_32FC(n), where n is a positive integer. +//! @param src2 It has the same type and size as src1. +//! @param weights1 It has a type of CV_32FC1 and the same size with src1. +//! @param weights2 It has a type of CV_32FC1 and the same size with src1. +//! @param dst It is created if it does not have the same size and type with src1. +CV_EXPORTS_W void blendLinear(InputArray src1, InputArray src2, InputArray weights1, InputArray weights2, OutputArray dst); + +//! @} imgproc_misc + +//! @addtogroup imgproc_color_conversions +//! @{ + +/** @brief Converts an image from one color space to another. + +The function converts an input image from one color space to another. In case of a transformation +to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note +that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the +bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue +component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and +sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on. + +The conventional ranges for R, G, and B channel values are: +- 0 to 255 for CV_8U images +- 0 to 65535 for CV_16U images +- 0 to 1 for CV_32F images + +In case of linear transformations, the range does not matter. But in case of a non-linear +transformation, an input RGB image should be normalized to the proper value range to get the correct +results, for example, for RGB \f$\rightarrow\f$ L\*u\*v\* transformation. For example, if you have a +32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will +have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor , +you need first to scale the image down: +@code + img *= 1./255; + cvtColor(img, img, COLOR_BGR2Luv); +@endcode +If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many +applications, this will not be noticeable but it is recommended to use 32-bit images in applications +that need the full range of colors or that convert an image before an operation and then convert +back. + +If conversion adds the alpha channel, its value will set to the maximum of corresponding channel +range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F. + +@param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision +floating-point. +@param dst output image of the same size and depth as src. +@param code color space conversion code (see #ColorConversionCodes). +@param dstCn number of channels in the destination image; if the parameter is 0, the number of the +channels is derived automatically from src and code. + +@see @ref imgproc_color_conversions + */ +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn = 0 ); + +/** @brief Converts an image from one color space to another where the source image is +stored in two planes. + +This function only supports YUV420 to RGB conversion as of now. + +@param src1: 8-bit image (#CV_8U) of the Y plane. +@param src2: image containing interleaved U/V plane. +@param dst: output image. +@param code: Specifies the type of conversion. It can take any of the following values: +- #COLOR_YUV2BGR_NV12 +- #COLOR_YUV2RGB_NV12 +- #COLOR_YUV2BGRA_NV12 +- #COLOR_YUV2RGBA_NV12 +- #COLOR_YUV2BGR_NV21 +- #COLOR_YUV2RGB_NV21 +- #COLOR_YUV2BGRA_NV21 +- #COLOR_YUV2RGBA_NV21 +*/ +CV_EXPORTS_W void cvtColorTwoPlane( InputArray src1, InputArray src2, OutputArray dst, int code ); + +/** @brief main function for all demosaicing processes + +@param src input image: 8-bit unsigned or 16-bit unsigned. +@param dst output image of the same size and depth as src. +@param code Color space conversion code (see the description below). +@param dstCn number of channels in the destination image; if the parameter is 0, the number of the +channels is derived automatically from src and code. + +The function can do the following transformations: + +- Demosaicing using bilinear interpolation + + #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR + + #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY + +- Demosaicing using Variable Number of Gradients. + + #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG + +- Edge-Aware Demosaicing. + + #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA + +- Demosaicing with alpha channel + + #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA + +@sa cvtColor +*/ +CV_EXPORTS_W void demosaicing(InputArray src, OutputArray dst, int code, int dstCn = 0); + +//! @} imgproc_color_conversions + +//! @addtogroup imgproc_shape +//! @{ + +/** @brief Calculates all of the moments up to the third order of a polygon or rasterized shape. + +The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The +results are returned in the structure cv::Moments. + +@param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( +\f$1 \times N\f$ or \f$N \times 1\f$ ) of 2D points (Point or Point2f ). +@param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is +used for images only. +@returns moments. + +@note Only applicable to contour moments calculations from Python bindings: Note that the numpy +type for the input array should be either np.int32 or np.float32. + +@sa contourArea, arcLength + */ +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage = false ); + +/** @brief Calculates seven Hu invariants. + +The function calculates seven Hu invariants (introduced in @cite Hu62; see also +) defined as: + +\f[\begin{array}{l} hu[0]= \eta _{20}+ \eta _{02} \\ hu[1]=( \eta _{20}- \eta _{02})^{2}+4 \eta _{11}^{2} \\ hu[2]=( \eta _{30}-3 \eta _{12})^{2}+ (3 \eta _{21}- \eta _{03})^{2} \\ hu[3]=( \eta _{30}+ \eta _{12})^{2}+ ( \eta _{21}+ \eta _{03})^{2} \\ hu[4]=( \eta _{30}-3 \eta _{12})( \eta _{30}+ \eta _{12})[( \eta _{30}+ \eta _{12})^{2}-3( \eta _{21}+ \eta _{03})^{2}]+(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ hu[5]=( \eta _{20}- \eta _{02})[( \eta _{30}+ \eta _{12})^{2}- ( \eta _{21}+ \eta _{03})^{2}]+4 \eta _{11}( \eta _{30}+ \eta _{12})( \eta _{21}+ \eta _{03}) \\ hu[6]=(3 \eta _{21}- \eta _{03})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}]-( \eta _{30}-3 \eta _{12})( \eta _{21}+ \eta _{03})[3( \eta _{30}+ \eta _{12})^{2}-( \eta _{21}+ \eta _{03})^{2}] \\ \end{array}\f] + +where \f$\eta_{ji}\f$ stands for \f$\texttt{Moments::nu}_{ji}\f$ . + +These values are proved to be invariants to the image scale, rotation, and reflection except the +seventh one, whose sign is changed by reflection. This invariance is proved with the assumption of +infinite image resolution. In case of raster images, the computed Hu invariants for the original and +transformed images are a bit different. + +@param moments Input moments computed with moments . +@param hu Output Hu invariants. + +@sa matchShapes + */ +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); + +/** @overload */ +CV_EXPORTS_W void HuMoments( const Moments& m, OutputArray hu ); + +//! @} imgproc_shape + +//! @addtogroup imgproc_object +//! @{ + +//! type of the template matching operation +enum TemplateMatchModes { + TM_SQDIFF = 0, //!< \f[R(x,y)= \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2\f] + TM_SQDIFF_NORMED = 1, //!< \f[R(x,y)= \frac{\sum_{x',y'} (T(x',y')-I(x+x',y+y'))^2}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}\f] + TM_CCORR = 2, //!< \f[R(x,y)= \sum _{x',y'} (T(x',y') \cdot I(x+x',y+y'))\f] + TM_CCORR_NORMED = 3, //!< \f[R(x,y)= \frac{\sum_{x',y'} (T(x',y') \cdot I(x+x',y+y'))}{\sqrt{\sum_{x',y'}T(x',y')^2 \cdot \sum_{x',y'} I(x+x',y+y')^2}}\f] + TM_CCOEFF = 4, //!< \f[R(x,y)= \sum _{x',y'} (T'(x',y') \cdot I'(x+x',y+y'))\f] + //!< where + //!< \f[\begin{array}{l} T'(x',y')=T(x',y') - 1/(w \cdot h) \cdot \sum _{x'',y''} T(x'',y'') \\ I'(x+x',y+y')=I(x+x',y+y') - 1/(w \cdot h) \cdot \sum _{x'',y''} I(x+x'',y+y'') \end{array}\f] + TM_CCOEFF_NORMED = 5 //!< \f[R(x,y)= \frac{ \sum_{x',y'} (T'(x',y') \cdot I'(x+x',y+y')) }{ \sqrt{\sum_{x',y'}T'(x',y')^2 \cdot \sum_{x',y'} I'(x+x',y+y')^2} }\f] +}; + +/** @example samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp +An example using Template Matching algorithm +*/ + +/** @brief Compares a template against overlapped image regions. + +The function slides through image , compares the overlapped patches of size \f$w \times h\f$ against +templ using the specified method and stores the comparison results in result . Here are the formulae +for the available comparison methods ( \f$I\f$ denotes image, \f$T\f$ template, \f$R\f$ result ). The summation +is done over template and/or the image patch: \f$x' = 0...w-1, y' = 0...h-1\f$ + +After the function finishes the comparison, the best matches can be found as global minimums (when +#TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the +#minMaxLoc function. In case of a color image, template summation in the numerator and each sum in +the denominator is done over all of the channels and separate mean values are used for each channel. +That is, the function can take a color template and a color image. The result will still be a +single-channel image, which is easier to analyze. + +@param image Image where the search is running. It must be 8-bit or 32-bit floating-point. +@param templ Searched template. It must be not greater than the source image and have the same +data type. +@param result Map of comparison results. It must be single-channel 32-bit floating-point. If image +is \f$W \times H\f$ and templ is \f$w \times h\f$ , then result is \f$(W-w+1) \times (H-h+1)\f$ . +@param method Parameter specifying the comparison method, see #TemplateMatchModes +@param mask Mask of searched template. It must have the same datatype and size with templ. It is +not set by default. Currently, only the #TM_SQDIFF and #TM_CCORR_NORMED methods are supported. + */ +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method, InputArray mask = noArray() ); + +//! @} + +//! @addtogroup imgproc_shape +//! @{ + +/** @example samples/cpp/connected_components.cpp +This program demonstrates connected components and use of the trackbar +*/ + +/** @brief computes the connected components labeled image of boolean image + +image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 +represents the background label. ltype specifies the output label image type, an important +consideration based on the total number of labels or alternatively the total number of pixels in +the source image. ccltype specifies the connected components labeling algorithm to use, currently +Grana (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes +for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not. +This function uses parallel version of both Grana and Wu's algorithms if at least one allowed +parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. + +@param image the 8-bit single-channel image to be labeled +@param labels destination labeled image +@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively +@param ltype output image label type. Currently CV_32S and CV_16U are supported. +@param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes). +*/ +CV_EXPORTS_AS(connectedComponentsWithAlgorithm) int connectedComponents(InputArray image, OutputArray labels, + int connectivity, int ltype, int ccltype); + + +/** @overload + +@param image the 8-bit single-channel image to be labeled +@param labels destination labeled image +@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively +@param ltype output image label type. Currently CV_32S and CV_16U are supported. +*/ +CV_EXPORTS_W int connectedComponents(InputArray image, OutputArray labels, + int connectivity = 8, int ltype = CV_32S); + + +/** @brief computes the connected components labeled image of boolean image and also produces a statistics output for each label + +image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 +represents the background label. ltype specifies the output label image type, an important +consideration based on the total number of labels or alternatively the total number of pixels in +the source image. ccltype specifies the connected components labeling algorithm to use, currently +Grana's (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes +for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not. +This function uses parallel version of both Grana and Wu's algorithms (statistics included) if at least one allowed +parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. + +@param image the 8-bit single-channel image to be labeled +@param labels destination labeled image +@param stats statistics output for each label, including the background label. +Statistics are accessed via stats(label, COLUMN) where COLUMN is one of +#ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. +@param centroids centroid output for each label, including the background label. Centroids are +accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. +@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively +@param ltype output image label type. Currently CV_32S and CV_16U are supported. +@param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes). +*/ +CV_EXPORTS_AS(connectedComponentsWithStatsWithAlgorithm) int connectedComponentsWithStats(InputArray image, OutputArray labels, + OutputArray stats, OutputArray centroids, + int connectivity, int ltype, int ccltype); + +/** @overload +@param image the 8-bit single-channel image to be labeled +@param labels destination labeled image +@param stats statistics output for each label, including the background label. +Statistics are accessed via stats(label, COLUMN) where COLUMN is one of +#ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. +@param centroids centroid output for each label, including the background label. Centroids are +accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. +@param connectivity 8 or 4 for 8-way or 4-way connectivity respectively +@param ltype output image label type. Currently CV_32S and CV_16U are supported. +*/ +CV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labels, + OutputArray stats, OutputArray centroids, + int connectivity = 8, int ltype = CV_32S); + + +/** @brief Finds contours in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . The contours +are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the +OpenCV sample directory. +@note Since opencv 3.2 source image is not modified by this function. + +@param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1). +@param contours Detected contours. Each contour is stored as a vector of points (e.g. +std::vector >). +@param hierarchy Optional output vector (e.g. std::vector), containing information about the image topology. It has +as many elements as the number of contours. For each i-th contour contours[i], the elements +hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices +in contours of the next and previous contours at the same hierarchical level, the first child +contour and the parent contour, respectively. If for the contour i there are no next, previous, +parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. +@note In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + */ +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset = Point()); + +/** @overload */ +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset = Point()); + +/** @example samples/cpp/squares.cpp +A program using pyramid scaling, Canny, contours and contour simplification to find +squares in a list of images (pic1-6.png). Returns sequence of squares detected on the image. +*/ + +/** @example samples/tapi/squares.cpp +A program using pyramid scaling, Canny, contours and contour simplification to find +squares in the input image. +*/ + +/** @brief Approximates a polygonal curve(s) with the specified precision. + +The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less +vertices so that the distance between them is less or equal to the specified precision. It uses the +Douglas-Peucker algorithm + +@param curve Input vector of a 2D point stored in std::vector or Mat +@param approxCurve Result of the approximation. The type should match the type of the input curve. +@param epsilon Parameter specifying the approximation accuracy. This is the maximum distance +between the original curve and its approximation. +@param closed If true, the approximated curve is closed (its first and last vertices are +connected). Otherwise, it is not closed. + */ +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +/** @brief Calculates a contour perimeter or a curve length. + +The function computes a curve length or a closed contour perimeter. + +@param curve Input vector of 2D points, stored in std::vector or Mat. +@param closed Flag indicating whether the curve is closed or not. + */ +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); + +/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image. + +The function calculates and returns the minimal up-right bounding rectangle for the specified point set or +non-zero pixels of gray-scale image. + +@param array Input gray-scale image or 2D point set, stored in std::vector or Mat. + */ +CV_EXPORTS_W Rect boundingRect( InputArray array ); + +/** @brief Calculates a contour area. + +The function computes a contour area. Similarly to moments , the area is computed using the Green +formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using +#drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong +results for contours with self-intersections. + +Example: +@code + vector contour; + contour.push_back(Point2f(0, 0)); + contour.push_back(Point2f(10, 0)); + contour.push_back(Point2f(10, 10)); + contour.push_back(Point2f(5, 4)); + + double area0 = contourArea(contour); + vector approx; + approxPolyDP(contour, approx, 5, true); + double area1 = contourArea(approx); + + cout << "area0 =" << area0 << endl << + "area1 =" << area1 << endl << + "approx poly vertices" << approx.size() << endl; +@endcode +@param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat. +@param oriented Oriented area flag. If it is true, the function returns a signed area value, +depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can +determine orientation of a contour by taking the sign of an area. By default, the parameter is +false, which means that the absolute value is returned. + */ +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented = false ); + +/** @brief Finds a rotated rectangle of the minimum area enclosing the input 2D point set. + +The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a +specified point set. Developer should keep in mind that the returned RotatedRect can contain negative +indices when data is close to the containing Mat element boundary. + +@param points Input vector of 2D points, stored in std::vector\<\> or Mat + */ +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); + +/** @brief Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle. + +The function finds the four vertices of a rotated rectangle. This function is useful to draw the +rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please +visit the @ref tutorial_bounding_rotated_ellipses "tutorial on Creating Bounding rotated boxes and ellipses for contours" for more information. + +@param box The input rotated rectangle. It may be the output of +@param points The output array of four vertices of rectangles. + */ +CV_EXPORTS_W void boxPoints(RotatedRect box, OutputArray points); + +/** @brief Finds a circle of the minimum area enclosing a 2D point set. + +The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm. + +@param points Input vector of 2D points, stored in std::vector\<\> or Mat +@param center Output center of the circle. +@param radius Output radius of the circle. + */ +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); + +/** @example samples/cpp/minarea.cpp +*/ + +/** @brief Finds a triangle of minimum area enclosing a 2D point set and returns its area. + +The function finds a triangle of minimum area enclosing the given set of 2D points and returns its +area. The output for a given 2D point set is shown in the image below. 2D points are depicted in +*red* and the enclosing triangle in *yellow*. + +![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png) + +The implementation of the algorithm is based on O'Rourke's @cite ORourke86 and Klee and Laskowski's +@cite KleeLaskowski85 papers. O'Rourke provides a \f$\theta(n)\f$ algorithm for finding the minimal +enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function +takes a 2D point set as input an additional preprocessing step of computing the convex hull of the +2D point set is required. The complexity of the #convexHull function is \f$O(n log(n))\f$ which is higher +than \f$\theta(n)\f$. Thus the overall complexity of the function is \f$O(n log(n))\f$. + +@param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector\<\> or Mat +@param triangle Output vector of three 2D points defining the vertices of the triangle. The depth +of the OutputArray must be CV_32F. + */ +CV_EXPORTS_W double minEnclosingTriangle( InputArray points, CV_OUT OutputArray triangle ); + +/** @brief Compares two shapes. + +The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments) + +@param contour1 First contour or grayscale image. +@param contour2 Second contour or grayscale image. +@param method Comparison method, see #ShapeMatchModes +@param parameter Method-specific parameter (not supported now). + */ +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); + +/** @example samples/cpp/convexhull.cpp +An example using the convexHull functionality +*/ + +/** @brief Finds the convex hull of a point set. + +The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm @cite Sklansky82 +that has *O(N logN)* complexity in the current implementation. + +@param points Input 2D point set, stored in std::vector or Mat. +@param hull Output convex hull. It is either an integer vector of indices or vector of points. In +the first case, the hull elements are 0-based indices of the convex hull points in the original +array (since the set of convex hull points is a subset of the original point set). In the second +case, hull elements are the convex hull points themselves. +@param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise. +Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing +to the right, and its Y axis pointing upwards. +@param returnPoints Operation flag. In case of a matrix, when the flag is true, the function +returns convex hull points. Otherwise, it returns indices of the convex hull points. When the +output array is std::vector, the flag is ignored, and the output depends on the type of the +vector: std::vector\ implies returnPoints=false, std::vector\ implies +returnPoints=true. + +@note `points` and `hull` should be different arrays, inplace processing isn't supported. + +Check @ref tutorial_hull "the corresponding tutorial" for more details. + +useful links: + +https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/ + */ +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise = false, bool returnPoints = true ); + +/** @brief Finds the convexity defects of a contour. + +The figure below displays convexity defects of a hand contour: + +![image](pics/defects.png) + +@param contour Input contour. +@param convexhull Convex hull obtained using convexHull that should contain indices of the contour +points that make the hull. +@param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java +interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i): +(start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices +in the original contour of the convexity defect beginning, end and the farthest point, and +fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the +farthest contour point and the hull. That is, to get the floating-point value of the depth will be +fixpt_depth/256.0. + */ +CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); + +/** @brief Tests a contour convexity. + +The function tests whether the input contour is convex or not. The contour must be simple, that is, +without self-intersections. Otherwise, the function output is undefined. + +@param contour Input vector of 2D points, stored in std::vector\<\> or Mat + */ +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +/** @example samples/cpp/intersectExample.cpp +Examples of how intersectConvexConvex works +*/ + +/** @brief Finds intersection of two convex polygons + +@param p1 First polygon +@param p2 Second polygon +@param p12 Output polygon describing the intersecting area +@param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other. +When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge +of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested. + +@returns Absolute value of area of intersecting polygon + +@note intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't. + */ +CV_EXPORTS_W float intersectConvexConvex( InputArray p1, InputArray p2, + OutputArray p12, bool handleNested = true ); + +/** @example samples/cpp/fitellipse.cpp +An example using the fitEllipse technique +*/ + +/** @brief Fits an ellipse around a set of 2D points. + +The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of +all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by @cite Fitzgibbon95 +is used. Developer should keep in mind that it is possible that the returned +ellipse/rotatedRect data contains negative indices, due to the data points being close to the +border of the containing Mat element. + +@param points Input 2D point set, stored in std::vector\<\> or Mat + */ +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +/** @brief Fits an ellipse around a set of 2D points. + + The function calculates the ellipse that fits a set of 2D points. + It returns the rotated rectangle in which the ellipse is inscribed. + The Approximate Mean Square (AMS) proposed by @cite Taubin1991 is used. + + For an ellipse, this basis set is \f$ \chi= \left(x^2, x y, y^2, x, y, 1\right) \f$, + which is a set of six free coefficients \f$ A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \f$. + However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \f$ (a,b) \f$, + the position \f$ (x_0,y_0) \f$, and the orientation \f$ \theta \f$. This is because the basis set includes lines, + quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits. + If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used. + The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves + by imposing the condition that \f$ A^T ( D_x^T D_x + D_y^T D_y) A = 1 \f$ where + the matrices \f$ Dx \f$ and \f$ Dy \f$ are the partial derivatives of the design matrix \f$ D \f$ with + respect to x and y. The matrices are formed row by row applying the following to + each of the points in the set: + \f{align*}{ + D(i,:)&=\left\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\right\} & + D_x(i,:)&=\left\{2 x_i,y_i,0,1,0,0\right\} & + D_y(i,:)&=\left\{0,x_i,2 y_i,0,1,0\right\} + \f} + The AMS method minimizes the cost function + \f{equation*}{ + \epsilon ^2=\frac{ A^T D^T D A }{ A^T (D_x^T D_x + D_y^T D_y) A^T } + \f} + + The minimum cost is found by solving the generalized eigenvalue problem. + + \f{equation*}{ + D^T D A = \lambda \left( D_x^T D_x + D_y^T D_y\right) A + \f} + + @param points Input 2D point set, stored in std::vector\<\> or Mat + */ +CV_EXPORTS_W RotatedRect fitEllipseAMS( InputArray points ); + + +/** @brief Fits an ellipse around a set of 2D points. + + The function calculates the ellipse that fits a set of 2D points. + It returns the rotated rectangle in which the ellipse is inscribed. + The Direct least square (Direct) method by @cite Fitzgibbon1999 is used. + + For an ellipse, this basis set is \f$ \chi= \left(x^2, x y, y^2, x, y, 1\right) \f$, + which is a set of six free coefficients \f$ A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \f$. + However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \f$ (a,b) \f$, + the position \f$ (x_0,y_0) \f$, and the orientation \f$ \theta \f$. This is because the basis set includes lines, + quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits. + The Direct method confines the fit to ellipses by ensuring that \f$ 4 A_{xx} A_{yy}- A_{xy}^2 > 0 \f$. + The condition imposed is that \f$ 4 A_{xx} A_{yy}- A_{xy}^2=1 \f$ which satisfies the inequality + and as the coefficients can be arbitrarily scaled is not overly restrictive. + + \f{equation*}{ + \epsilon ^2= A^T D^T D A \quad \text{with} \quad A^T C A =1 \quad \text{and} \quad C=\left(\begin{matrix} + 0 & 0 & 2 & 0 & 0 & 0 \\ + 0 & -1 & 0 & 0 & 0 & 0 \\ + 2 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 + \end{matrix} \right) + \f} + + The minimum cost is found by solving the generalized eigenvalue problem. + + \f{equation*}{ + D^T D A = \lambda \left( C\right) A + \f} + + The system produces only one positive eigenvalue \f$ \lambda\f$ which is chosen as the solution + with its eigenvector \f$\mathbf{u}\f$. These are used to find the coefficients + + \f{equation*}{ + A = \sqrt{\frac{1}{\mathbf{u}^T C \mathbf{u}}} \mathbf{u} + \f} + The scaling factor guarantees that \f$A^T C A =1\f$. + + @param points Input 2D point set, stored in std::vector\<\> or Mat + */ +CV_EXPORTS_W RotatedRect fitEllipseDirect( InputArray points ); + +/** @brief Fits a line to a 2D or 3D point set. + +The function fitLine fits a line to a 2D or 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where +\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance function, one +of the following: +- DIST_L2 +\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f] +- DIST_L1 +\f[\rho (r) = r\f] +- DIST_L12 +\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f] +- DIST_FAIR +\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f] +- DIST_WELSCH +\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f] +- DIST_HUBER +\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f] + +The algorithm is based on the M-estimator ( ) technique +that iteratively fits the line using the weighted least-squares algorithm. After each iteration the +weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ . + +@param points Input vector of 2D or 3D points, stored in std::vector\<\> or Mat. +@param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements +(like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and +(x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like +Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line +and (x0, y0, z0) is a point on the line. +@param distType Distance used by the M-estimator, see #DistanceTypes +@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value +is chosen. +@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line). +@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps. + */ +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); + +/** @brief Performs a point-in-contour test. + +The function determines whether the point is inside a contour, outside, or lies on an edge (or +coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge) +value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively. +Otherwise, the return value is a signed distance between the point and the nearest contour edge. + +See below a sample output of the function where each image pixel is tested against the contour: + +![sample output](pics/pointpolygon.png) + +@param contour Input contour. +@param pt Point tested against the contour. +@param measureDist If true, the function estimates the signed distance from the point to the +nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not. + */ +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + +/** @brief Finds out if there is any intersection between two rotated rectangles. + +If there is then the vertices of the intersecting region are returned as well. + +Below are some examples of intersection configurations. The hatched pattern indicates the +intersecting region and the red vertices are returned by the function. + +![intersection examples](pics/intersection.png) + +@param rect1 First rectangle +@param rect2 Second rectangle +@param intersectingRegion The output array of the vertices of the intersecting region. It returns +at most 8 vertices. Stored as std::vector\ or cv::Mat as Mx1 of type CV_32FC2. +@returns One of #RectanglesIntersectTypes + */ +CV_EXPORTS_W int rotatedRectangleIntersection( const RotatedRect& rect1, const RotatedRect& rect2, OutputArray intersectingRegion ); + +/** @brief Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it. +*/ +CV_EXPORTS_W Ptr createGeneralizedHoughBallard(); + +/** @brief Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it. +*/ +CV_EXPORTS_W Ptr createGeneralizedHoughGuil(); + +//! @} imgproc_shape + +//! @addtogroup imgproc_colormap +//! @{ + +//! GNU Octave/MATLAB equivalent colormaps +enum ColormapTypes +{ + COLORMAP_AUTUMN = 0, //!< ![autumn](pics/colormaps/colorscale_autumn.jpg) + COLORMAP_BONE = 1, //!< ![bone](pics/colormaps/colorscale_bone.jpg) + COLORMAP_JET = 2, //!< ![jet](pics/colormaps/colorscale_jet.jpg) + COLORMAP_WINTER = 3, //!< ![winter](pics/colormaps/colorscale_winter.jpg) + COLORMAP_RAINBOW = 4, //!< ![rainbow](pics/colormaps/colorscale_rainbow.jpg) + COLORMAP_OCEAN = 5, //!< ![ocean](pics/colormaps/colorscale_ocean.jpg) + COLORMAP_SUMMER = 6, //!< ![summer](pics/colormaps/colorscale_summer.jpg) + COLORMAP_SPRING = 7, //!< ![spring](pics/colormaps/colorscale_spring.jpg) + COLORMAP_COOL = 8, //!< ![cool](pics/colormaps/colorscale_cool.jpg) + COLORMAP_HSV = 9, //!< ![HSV](pics/colormaps/colorscale_hsv.jpg) + COLORMAP_PINK = 10, //!< ![pink](pics/colormaps/colorscale_pink.jpg) + COLORMAP_HOT = 11, //!< ![hot](pics/colormaps/colorscale_hot.jpg) + COLORMAP_PARULA = 12, //!< ![parula](pics/colormaps/colorscale_parula.jpg) + COLORMAP_MAGMA = 13, //!< ![magma](pics/colormaps/colorscale_magma.jpg) + COLORMAP_INFERNO = 14, //!< ![inferno](pics/colormaps/colorscale_inferno.jpg) + COLORMAP_PLASMA = 15, //!< ![plasma](pics/colormaps/colorscale_plasma.jpg) + COLORMAP_VIRIDIS = 16, //!< ![viridis](pics/colormaps/colorscale_viridis.jpg) + COLORMAP_CIVIDIS = 17, //!< ![cividis](pics/colormaps/colorscale_cividis.jpg) + COLORMAP_TWILIGHT = 18, //!< ![twilight](pics/colormaps/colorscale_twilight.jpg) + COLORMAP_TWILIGHT_SHIFTED = 19, //!< ![twilight shifted](pics/colormaps/colorscale_twilight_shifted.jpg) + COLORMAP_TURBO = 20, //!< ![turbo](pics/colormaps/colorscale_turbo.jpg) + COLORMAP_DEEPGREEN = 21 //!< ![deepgreen](pics/colormaps/colorscale_deepgreen.jpg) +}; + +/** @example samples/cpp/falsecolor.cpp +An example using applyColorMap function +*/ + +/** @brief Applies a GNU Octave/MATLAB equivalent colormap on a given image. + +@param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3. +@param dst The result is the colormapped source image. Note: Mat::create is called on dst. +@param colormap The colormap to apply, see #ColormapTypes +*/ +CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap); + +/** @brief Applies a user colormap on a given image. + +@param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3. +@param dst The result is the colormapped source image. Note: Mat::create is called on dst. +@param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256 +*/ +CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, InputArray userColor); + +//! @} imgproc_colormap + +//! @addtogroup imgproc_draw +//! @{ + + +/** OpenCV color channel order is BGR[A] */ +#define CV_RGB(r, g, b) cv::Scalar((b), (g), (r), 0) + +/** @brief Draws a line segment connecting two points. + +The function line draws the line segment between pt1 and pt2 points in the image. The line is +clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected +or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased +lines are drawn using Gaussian filtering. + +@param img Image. +@param pt1 First point of the line segment. +@param pt2 Second point of the line segment. +@param color Line color. +@param thickness Line thickness. +@param lineType Type of the line. See #LineTypes. +@param shift Number of fractional bits in the point coordinates. + */ +CV_EXPORTS_W void line(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, + int thickness = 1, int lineType = LINE_8, int shift = 0); + +/** @brief Draws a arrow segment pointing from the first point to the second one. + +The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. + +@param img Image. +@param pt1 The point the arrow starts from. +@param pt2 The point the arrow points to. +@param color Line color. +@param thickness Line thickness. +@param line_type Type of the line. See #LineTypes +@param shift Number of fractional bits in the point coordinates. +@param tipLength The length of the arrow tip in relation to the arrow length + */ +CV_EXPORTS_W void arrowedLine(InputOutputArray img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int line_type=8, int shift=0, double tipLength=0.1); + +/** @brief Draws a simple, thick, or filled up-right rectangle. + +The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners +are pt1 and pt2. + +@param img Image. +@param pt1 Vertex of the rectangle. +@param pt2 Vertex of the rectangle opposite to pt1 . +@param color Rectangle color or brightness (grayscale image). +@param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED, +mean that the function has to draw a filled rectangle. +@param lineType Type of the line. See #LineTypes +@param shift Number of fractional bits in the point coordinates. + */ +CV_EXPORTS_W void rectangle(InputOutputArray img, Point pt1, Point pt2, + const Scalar& color, int thickness = 1, + int lineType = LINE_8, int shift = 0); + +/** @overload + +use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and +r.br()-Point(1,1)` are opposite corners +*/ +CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, + const Scalar& color, int thickness = 1, + int lineType = LINE_8, int shift = 0); + +/** @example samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_2.cpp +An example using drawing functions +*/ + +/** @brief Draws a circle. + +The function cv::circle draws a simple or filled circle with a given center and radius. +@param img Image where the circle is drawn. +@param center Center of the circle. +@param radius Radius of the circle. +@param color Circle color. +@param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED, +mean that a filled circle is to be drawn. +@param lineType Type of the circle boundary. See #LineTypes +@param shift Number of fractional bits in the coordinates of the center and in the radius value. + */ +CV_EXPORTS_W void circle(InputOutputArray img, Point center, int radius, + const Scalar& color, int thickness = 1, + int lineType = LINE_8, int shift = 0); + +/** @brief Draws a simple or thick elliptic arc or fills an ellipse sector. + +The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic +arc, or a filled ellipse sector. The drawing code uses general parametric form. +A piecewise-linear curve is used to approximate the elliptic arc +boundary. If you need more control of the ellipse rendering, you can retrieve the curve using +#ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first +variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and +`endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains +the meaning of the parameters to draw the blue arc. + +![Parameters of Elliptic Arc](pics/ellipse.svg) + +@param img Image. +@param center Center of the ellipse. +@param axes Half of the size of the ellipse main axes. +@param angle Ellipse rotation angle in degrees. +@param startAngle Starting angle of the elliptic arc in degrees. +@param endAngle Ending angle of the elliptic arc in degrees. +@param color Ellipse color. +@param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that +a filled ellipse sector is to be drawn. +@param lineType Type of the ellipse boundary. See #LineTypes +@param shift Number of fractional bits in the coordinates of the center and values of axes. + */ +CV_EXPORTS_W void ellipse(InputOutputArray img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness = 1, + int lineType = LINE_8, int shift = 0); + +/** @overload +@param img Image. +@param box Alternative ellipse representation via RotatedRect. This means that the function draws +an ellipse inscribed in the rotated rectangle. +@param color Ellipse color. +@param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that +a filled ellipse sector is to be drawn. +@param lineType Type of the ellipse boundary. See #LineTypes +*/ +CV_EXPORTS_W void ellipse(InputOutputArray img, const RotatedRect& box, const Scalar& color, + int thickness = 1, int lineType = LINE_8); + +/* ----------------------------------------------------------------------------------------- */ +/* ADDING A SET OF PREDEFINED MARKERS WHICH COULD BE USED TO HIGHLIGHT POSITIONS IN AN IMAGE */ +/* ----------------------------------------------------------------------------------------- */ + +//! Possible set of marker types used for the cv::drawMarker function +enum MarkerTypes +{ + MARKER_CROSS = 0, //!< A crosshair marker shape + MARKER_TILTED_CROSS = 1, //!< A 45 degree tilted crosshair marker shape + MARKER_STAR = 2, //!< A star marker shape, combination of cross and tilted cross + MARKER_DIAMOND = 3, //!< A diamond marker shape + MARKER_SQUARE = 4, //!< A square marker shape + MARKER_TRIANGLE_UP = 5, //!< An upwards pointing triangle marker shape + MARKER_TRIANGLE_DOWN = 6 //!< A downwards pointing triangle marker shape +}; + +/** @brief Draws a marker on a predefined position in an image. + +The function cv::drawMarker draws a marker on a given position in the image. For the moment several +marker types are supported, see #MarkerTypes for more information. + +@param img Image. +@param position The point where the crosshair is positioned. +@param color Line color. +@param markerType The specific type of marker you want to use, see #MarkerTypes +@param thickness Line thickness. +@param line_type Type of the line, See #LineTypes +@param markerSize The length of the marker axis [default = 20 pixels] + */ +CV_EXPORTS_W void drawMarker(CV_IN_OUT Mat& img, Point position, const Scalar& color, + int markerType = MARKER_CROSS, int markerSize=20, int thickness=1, + int line_type=8); + +/* ----------------------------------------------------------------------------------------- */ +/* END OF MARKER SECTION */ +/* ----------------------------------------------------------------------------------------- */ + +/** @overload */ +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType = LINE_8, + int shift = 0); + +/** @brief Fills a convex polygon. + +The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the +function #fillPoly . It can fill not only convex polygons but any monotonic polygon without +self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) +twice at the most (though, its top-most and/or the bottom edge could be horizontal). + +@param img Image. +@param points Polygon vertices. +@param color Polygon color. +@param lineType Type of the polygon boundaries. See #LineTypes +@param shift Number of fractional bits in the vertex coordinates. + */ +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType = LINE_8, + int shift = 0); + +/** @overload */ +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType = LINE_8, int shift = 0, + Point offset = Point() ); + +/** @example samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp +An example using drawing functions +Check @ref tutorial_random_generator_and_text "the corresponding tutorial" for more details +*/ + +/** @brief Fills the area bounded by one or more polygons. + +The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill +complex areas, for example, areas with holes, contours with self-intersections (some of their +parts), and so forth. + +@param img Image. +@param pts Array of polygons where each polygon is represented as an array of points. +@param color Polygon color. +@param lineType Type of the polygon boundaries. See #LineTypes +@param shift Number of fractional bits in the vertex coordinates. +@param offset Optional offset of all points of the contours. + */ +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType = LINE_8, int shift = 0, + Point offset = Point() ); + +/** @overload */ +CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness = 1, int lineType = LINE_8, int shift = 0 ); + +/** @brief Draws several polygonal curves. + +@param img Image. +@param pts Array of polygonal curves. +@param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, +the function draws a line from the last vertex of each curve to its first vertex. +@param color Polyline color. +@param thickness Thickness of the polyline edges. +@param lineType Type of the line segments. See #LineTypes +@param shift Number of fractional bits in the vertex coordinates. + +The function cv::polylines draws one or more polygonal curves. + */ +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness = 1, int lineType = LINE_8, int shift = 0 ); + +/** @example samples/cpp/contours2.cpp +An example program illustrates the use of cv::findContours and cv::drawContours +\image html WindowsQtContoursOutput.png "Screenshot of the program" +*/ + +/** @example samples/cpp/segment_objects.cpp +An example using drawContours to clean up a background segmentation result +*/ + +/** @brief Draws contours outlines or filled contours. + +The function draws contour outlines in the image if \f$\texttt{thickness} \ge 0\f$ or fills the area +bounded by the contours if \f$\texttt{thickness}<0\f$ . The example below shows how to retrieve +connected components from the binary image and label them: : +@include snippets/imgproc_drawContours.cpp + +@param image Destination image. +@param contours All the input contours. Each contour is stored as a point vector. +@param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. +@param color Color of the contours. +@param thickness Thickness of lines the contours are drawn with. If it is negative (for example, +thickness=#FILLED ), the contour interiors are drawn. +@param lineType Line connectivity. See #LineTypes +@param hierarchy Optional information about hierarchy. It is only needed if you want to draw only +some of the contours (see maxLevel ). +@param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn. +If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function +draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This +parameter is only taken into account when there is hierarchy available. +@param offset Optional contour shift parameter. Shift all the drawn contours by the specified +\f$\texttt{offset}=(dx,dy)\f$ . +@note When thickness=#FILLED, the function is designed to handle connected components with holes correctly +even when no hierarchy date is provided. This is done by analyzing all the outlines together +using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved +contours. In order to solve this problem, you need to call #drawContours separately for each sub-group +of contours, or iterate over the collection using contourIdx parameter. + */ +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness = 1, int lineType = LINE_8, + InputArray hierarchy = noArray(), + int maxLevel = INT_MAX, Point offset = Point() ); + +/** @brief Clips the line against the image rectangle. + +The function cv::clipLine calculates a part of the line segment that is entirely within the specified +rectangle. it returns false if the line segment is completely outside the rectangle. Otherwise, +it returns true . +@param imgSize Image size. The image rectangle is Rect(0, 0, imgSize.width, imgSize.height) . +@param pt1 First line point. +@param pt2 Second line point. + */ +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +/** @overload +@param imgSize Image size. The image rectangle is Rect(0, 0, imgSize.width, imgSize.height) . +@param pt1 First line point. +@param pt2 Second line point. +*/ +CV_EXPORTS bool clipLine(Size2l imgSize, CV_IN_OUT Point2l& pt1, CV_IN_OUT Point2l& pt2); + +/** @overload +@param imgRect Image rectangle. +@param pt1 First line point. +@param pt2 Second line point. +*/ +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/** @brief Approximates an elliptic arc with a polyline. + +The function ellipse2Poly computes the vertices of a polyline that approximates the specified +elliptic arc. It is used by #ellipse. If `arcStart` is greater than `arcEnd`, they are swapped. + +@param center Center of the arc. +@param axes Half of the size of the ellipse main axes. See #ellipse for details. +@param angle Rotation angle of the ellipse in degrees. See #ellipse for details. +@param arcStart Starting angle of the elliptic arc in degrees. +@param arcEnd Ending angle of the elliptic arc in degrees. +@param delta Angle between the subsequent polyline vertices. It defines the approximation +accuracy. +@param pts Output vector of polyline vertices. + */ +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT std::vector& pts ); + +/** @overload +@param center Center of the arc. +@param axes Half of the size of the ellipse main axes. See #ellipse for details. +@param angle Rotation angle of the ellipse in degrees. See #ellipse for details. +@param arcStart Starting angle of the elliptic arc in degrees. +@param arcEnd Ending angle of the elliptic arc in degrees. +@param delta Angle between the subsequent polyline vertices. It defines the approximation accuracy. +@param pts Output vector of polyline vertices. +*/ +CV_EXPORTS void ellipse2Poly(Point2d center, Size2d axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT std::vector& pts); + +/** @brief Draws a text string. + +The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered +using the specified font are replaced by question marks. See #getTextSize for a text rendering code +example. + +@param img Image. +@param text Text string to be drawn. +@param org Bottom-left corner of the text string in the image. +@param fontFace Font type, see #HersheyFonts. +@param fontScale Font scale factor that is multiplied by the font-specific base size. +@param color Text color. +@param thickness Thickness of the lines used to draw a text. +@param lineType Line type. See #LineTypes +@param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise, +it is at the top-left corner. + */ +CV_EXPORTS_W void putText( InputOutputArray img, const String& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness = 1, int lineType = LINE_8, + bool bottomLeftOrigin = false ); + +/** @brief Calculates the width and height of a text string. + +The function cv::getTextSize calculates and returns the size of a box that contains the specified text. +That is, the following code renders some text, the tight box surrounding it, and the baseline: : +@code + String text = "Funny text inside the box"; + int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX; + double fontScale = 2; + int thickness = 3; + + Mat img(600, 800, CV_8UC3, Scalar::all(0)); + + int baseline=0; + Size textSize = getTextSize(text, fontFace, + fontScale, thickness, &baseline); + baseline += thickness; + + // center the text + Point textOrg((img.cols - textSize.width)/2, + (img.rows + textSize.height)/2); + + // draw the box + rectangle(img, textOrg + Point(0, baseline), + textOrg + Point(textSize.width, -textSize.height), + Scalar(0,0,255)); + // ... and the baseline first + line(img, textOrg + Point(0, thickness), + textOrg + Point(textSize.width, thickness), + Scalar(0, 0, 255)); + + // then put the text itself + putText(img, text, textOrg, fontFace, fontScale, + Scalar::all(255), thickness, 8); +@endcode + +@param text Input text string. +@param fontFace Font to use, see #HersheyFonts. +@param fontScale Font scale factor that is multiplied by the font-specific base size. +@param thickness Thickness of lines used to render the text. See #putText for details. +@param[out] baseLine y-coordinate of the baseline relative to the bottom-most text +point. +@return The size of a box that contains the specified text. + +@see putText + */ +CV_EXPORTS_W Size getTextSize(const String& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + + +/** @brief Calculates the font-specific size to use to achieve a given height in pixels. + +@param fontFace Font to use, see cv::HersheyFonts. +@param pixelHeight Pixel height to compute the fontScale for +@param thickness Thickness of lines used to render the text.See putText for details. +@return The fontSize to use for cv::putText + +@see cv::putText +*/ +CV_EXPORTS_W double getFontScaleFromHeight(const int fontFace, + const int pixelHeight, + const int thickness = 1); + +/** @brief Line iterator + +The class is used to iterate over all the pixels on the raster line +segment connecting two specified points. + +The class LineIterator is used to get each pixel of a raster line. It +can be treated as versatile implementation of the Bresenham algorithm +where you can stop at each pixel and do some extra processing, for +example, grab pixel values along the line or draw a line with an effect +(for example, with XOR operation). + +The number of pixels along the line is stored in LineIterator::count. +The method LineIterator::pos returns the current position in the image: + +@code{.cpp} +// grabs pixels along the line (pt1, pt2) +// from 8-bit 3-channel image to the buffer +LineIterator it(img, pt1, pt2, 8); +LineIterator it2 = it; +vector buf(it.count); + +for(int i = 0; i < it.count; i++, ++it) + buf[i] = *(const Vec3b*)*it; + +// alternative way of iterating through the line +for(int i = 0; i < it2.count; i++, ++it2) +{ + Vec3b val = img.at(it2.pos()); + CV_Assert(buf[i] == val); +} +@endcode +*/ +class CV_EXPORTS LineIterator +{ +public: + /** @brief initializes the iterator + + creates iterators for the line connecting pt1 and pt2 + the line will be clipped on the image boundaries + the line is 8-connected or 4-connected + If leftToRight=true, then the iteration is always done + from the left-most point to the right most, + not to depend on the ordering of pt1 and pt2 parameters + */ + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity = 8, bool leftToRight = false ); + /** @brief returns pointer to the current pixel + */ + uchar* operator *(); + /** @brief prefix increment operator (++it). shifts iterator to the next pixel + */ + LineIterator& operator ++(); + /** @brief postfix increment operator (it++). shifts iterator to the next pixel + */ + LineIterator operator ++(int); + /** @brief returns coordinates of the current pixel + */ + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! @cond IGNORED + +// === LineIterator implementation === + +inline +uchar* LineIterator::operator *() +{ + return ptr; +} + +inline +LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} + +inline +LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} + +inline +Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +//! @endcond + +//! @} imgproc_draw + +//! @} imgproc + +} // cv + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/imgproc/imgproc_c.h" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/detail/distortion_model.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/detail/distortion_model.hpp new file mode 100644 index 00000000..a9c3ddec --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/detail/distortion_model.hpp @@ -0,0 +1,123 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP +#define OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP + +//! @cond IGNORED + +namespace cv { namespace detail { +/** +Computes the matrix for the projection onto a tilted image sensor +\param tauX angular parameter rotation around x-axis +\param tauY angular parameter rotation around y-axis +\param matTilt if not NULL returns the matrix +\f[ +\vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} +{0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} +{0}{0}{1} R(\tau_x, \tau_y) +\f] +where +\f[ +R(\tau_x, \tau_y) = +\vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)} +\vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} = +\vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)} +{0}{\cos(\tau_x)}{\sin(\tau_x)} +{\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}. +\f] +\param dMatTiltdTauX if not NULL it returns the derivative of matTilt with +respect to \f$\tau_x\f$. +\param dMatTiltdTauY if not NULL it returns the derivative of matTilt with +respect to \f$\tau_y\f$. +\param invMatTilt if not NULL it returns the inverse of matTilt +**/ +template +void computeTiltProjectionMatrix(FLOAT tauX, + FLOAT tauY, + Matx* matTilt = 0, + Matx* dMatTiltdTauX = 0, + Matx* dMatTiltdTauY = 0, + Matx* invMatTilt = 0) +{ + FLOAT cTauX = cos(tauX); + FLOAT sTauX = sin(tauX); + FLOAT cTauY = cos(tauY); + FLOAT sTauY = sin(tauY); + Matx matRotX = Matx(1,0,0,0,cTauX,sTauX,0,-sTauX,cTauX); + Matx matRotY = Matx(cTauY,0,-sTauY,0,1,0,sTauY,0,cTauY); + Matx matRotXY = matRotY * matRotX; + Matx matProjZ = Matx(matRotXY(2,2),0,-matRotXY(0,2),0,matRotXY(2,2),-matRotXY(1,2),0,0,1); + if (matTilt) + { + // Matrix for trapezoidal distortion of tilted image sensor + *matTilt = matProjZ * matRotXY; + } + if (dMatTiltdTauX) + { + // Derivative with respect to tauX + Matx dMatRotXYdTauX = matRotY * Matx(0,0,0,0,-sTauX,cTauX,0,-cTauX,-sTauX); + Matx dMatProjZdTauX = Matx(dMatRotXYdTauX(2,2),0,-dMatRotXYdTauX(0,2), + 0,dMatRotXYdTauX(2,2),-dMatRotXYdTauX(1,2),0,0,0); + *dMatTiltdTauX = (matProjZ * dMatRotXYdTauX) + (dMatProjZdTauX * matRotXY); + } + if (dMatTiltdTauY) + { + // Derivative with respect to tauY + Matx dMatRotXYdTauY = Matx(-sTauY,0,-cTauY,0,0,0,cTauY,0,-sTauY) * matRotX; + Matx dMatProjZdTauY = Matx(dMatRotXYdTauY(2,2),0,-dMatRotXYdTauY(0,2), + 0,dMatRotXYdTauY(2,2),-dMatRotXYdTauY(1,2),0,0,0); + *dMatTiltdTauY = (matProjZ * dMatRotXYdTauY) + (dMatProjZdTauY * matRotXY); + } + if (invMatTilt) + { + FLOAT inv = 1./matRotXY(2,2); + Matx invMatProjZ = Matx(inv,0,inv*matRotXY(0,2),0,inv,inv*matRotXY(1,2),0,0,1); + *invMatTilt = matRotXY.t()*invMatProjZ; + } +} +}} // namespace detail, cv + + +//! @endcond + +#endif // OPENCV_IMGPROC_DETAIL_DISTORTION_MODEL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/hal.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/hal.hpp new file mode 100644 index 00000000..9e5728d0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/hal.hpp @@ -0,0 +1,246 @@ +#ifndef CV_IMGPROC_HAL_HPP +#define CV_IMGPROC_HAL_HPP + +#include "opencv2/core/cvdef.h" +#include "opencv2/core/cvstd.hpp" +#include "opencv2/core/hal/interface.h" + +namespace cv { namespace hal { + +//! @addtogroup imgproc_hal_functions +//! @{ + +//--------------------------- +//! @cond IGNORED + +struct CV_EXPORTS Filter2D +{ + CV_DEPRECATED static Ptr create(uchar * , size_t , int , + int , int , + int , int , + int , int , + int , double , + int , int , + bool , bool ); + virtual void apply(uchar * , size_t , + uchar * , size_t , + int , int , + int , int , + int , int ) = 0; + virtual ~Filter2D() {} +}; + +struct CV_EXPORTS SepFilter2D +{ + CV_DEPRECATED static Ptr create(int , int , int , + uchar * , int , + uchar * , int , + int , int , + double , int ); + virtual void apply(uchar * , size_t , + uchar * , size_t , + int , int , + int , int , + int , int ) = 0; + virtual ~SepFilter2D() {} +}; + + +struct CV_EXPORTS Morph +{ + CV_DEPRECATED static Ptr create(int , int , int , int , int , + int , uchar * , size_t , + int , int , + int , int , + int , const double *, + int , bool , bool ); + virtual void apply(uchar * , size_t , uchar * , size_t , int , int , + int , int , int , int , + int , int , int , int ) = 0; + virtual ~Morph() {} +}; + +//! @endcond +//--------------------------- + +CV_EXPORTS void filter2D(int stype, int dtype, int kernel_type, + uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int full_width, int full_height, + int offset_x, int offset_y, + uchar * kernel_data, size_t kernel_step, + int kernel_width, int kernel_height, + int anchor_x, int anchor_y, + double delta, int borderType, + bool isSubmatrix); + +CV_EXPORTS void sepFilter2D(int stype, int dtype, int ktype, + uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int full_width, int full_height, + int offset_x, int offset_y, + uchar * kernelx_data, int kernelx_len, + uchar * kernely_data, int kernely_len, + int anchor_x, int anchor_y, + double delta, int borderType); + +CV_EXPORTS void morph(int op, int src_type, int dst_type, + uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int roi_width, int roi_height, int roi_x, int roi_y, + int roi_width2, int roi_height2, int roi_x2, int roi_y2, + int kernel_type, uchar * kernel_data, size_t kernel_step, + int kernel_width, int kernel_height, int anchor_x, int anchor_y, + int borderType, const double borderValue[4], + int iterations, bool isSubmatrix); + + +CV_EXPORTS void resize(int src_type, + const uchar * src_data, size_t src_step, int src_width, int src_height, + uchar * dst_data, size_t dst_step, int dst_width, int dst_height, + double inv_scale_x, double inv_scale_y, int interpolation); + +CV_EXPORTS void warpAffine(int src_type, + const uchar * src_data, size_t src_step, int src_width, int src_height, + uchar * dst_data, size_t dst_step, int dst_width, int dst_height, + const double M[6], int interpolation, int borderType, const double borderValue[4]); + +CV_EXPORTS void warpPerspectve(int src_type, + const uchar * src_data, size_t src_step, int src_width, int src_height, + uchar * dst_data, size_t dst_step, int dst_width, int dst_height, + const double M[9], int interpolation, int borderType, const double borderValue[4]); + +CV_EXPORTS void cvtBGRtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, int dcn, bool swapBlue); + +CV_EXPORTS void cvtBGRtoBGR5x5(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int scn, bool swapBlue, int greenBits); + +CV_EXPORTS void cvtBGR5x5toBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int dcn, bool swapBlue, int greenBits); + +CV_EXPORTS void cvtBGRtoGray(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, bool swapBlue); + +CV_EXPORTS void cvtGraytoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int dcn); + +CV_EXPORTS void cvtBGR5x5toGray(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int greenBits); + +CV_EXPORTS void cvtGraytoBGR5x5(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int greenBits); +CV_EXPORTS void cvtBGRtoYUV(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, bool swapBlue, bool isCbCr); + +CV_EXPORTS void cvtYUVtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int dcn, bool swapBlue, bool isCbCr); + +CV_EXPORTS void cvtBGRtoXYZ(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, bool swapBlue); + +CV_EXPORTS void cvtXYZtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int dcn, bool swapBlue); + +CV_EXPORTS void cvtBGRtoHSV(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, bool swapBlue, bool isFullRange, bool isHSV); + +CV_EXPORTS void cvtHSVtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int dcn, bool swapBlue, bool isFullRange, bool isHSV); + +CV_EXPORTS void cvtBGRtoLab(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int scn, bool swapBlue, bool isLab, bool srgb); + +CV_EXPORTS void cvtLabtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int depth, int dcn, bool swapBlue, bool isLab, bool srgb); + +CV_EXPORTS void cvtTwoPlaneYUVtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int dst_width, int dst_height, + int dcn, bool swapBlue, int uIdx); + +//! Separate Y and UV planes +CV_EXPORTS void cvtTwoPlaneYUVtoBGR(const uchar * y_data, const uchar * uv_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int dst_width, int dst_height, + int dcn, bool swapBlue, int uIdx); + +CV_EXPORTS void cvtTwoPlaneYUVtoBGR(const uchar * y_data, size_t y_step, const uchar * uv_data, size_t uv_step, + uchar * dst_data, size_t dst_step, + int dst_width, int dst_height, + int dcn, bool swapBlue, int uIdx); + +CV_EXPORTS void cvtThreePlaneYUVtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int dst_width, int dst_height, + int dcn, bool swapBlue, int uIdx); + +CV_EXPORTS void cvtBGRtoThreePlaneYUV(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int scn, bool swapBlue, int uIdx); + +//! Separate Y and UV planes +CV_EXPORTS void cvtBGRtoTwoPlaneYUV(const uchar * src_data, size_t src_step, + uchar * y_data, uchar * uv_data, size_t dst_step, + int width, int height, + int scn, bool swapBlue, int uIdx); + +CV_EXPORTS void cvtOnePlaneYUVtoBGR(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height, + int dcn, bool swapBlue, int uIdx, int ycn); + +CV_EXPORTS void cvtRGBAtoMultipliedRGBA(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height); + +CV_EXPORTS void cvtMultipliedRGBAtoRGBA(const uchar * src_data, size_t src_step, + uchar * dst_data, size_t dst_step, + int width, int height); + +CV_EXPORTS void integral(int depth, int sdepth, int sqdepth, + const uchar* src, size_t srcstep, + uchar* sum, size_t sumstep, + uchar* sqsum, size_t sqsumstep, + uchar* tilted, size_t tstep, + int width, int height, int cn); + +//! @} + +}} + +#endif // CV_IMGPROC_HAL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/interface.h b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/interface.h new file mode 100644 index 00000000..f8dbcfe7 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/hal/interface.h @@ -0,0 +1,46 @@ +#ifndef OPENCV_IMGPROC_HAL_INTERFACE_H +#define OPENCV_IMGPROC_HAL_INTERFACE_H + +//! @addtogroup imgproc_hal_interface +//! @{ + +//! @name Interpolation modes +//! @sa cv::InterpolationFlags +//! @{ +#define CV_HAL_INTER_NEAREST 0 +#define CV_HAL_INTER_LINEAR 1 +#define CV_HAL_INTER_CUBIC 2 +#define CV_HAL_INTER_AREA 3 +#define CV_HAL_INTER_LANCZOS4 4 +//! @} + +//! @name Morphology operations +//! @sa cv::MorphTypes +//! @{ +#define CV_HAL_MORPH_ERODE 0 +#define CV_HAL_MORPH_DILATE 1 +//! @} + +//! @name Threshold types +//! @sa cv::ThresholdTypes +//! @{ +#define CV_HAL_THRESH_BINARY 0 +#define CV_HAL_THRESH_BINARY_INV 1 +#define CV_HAL_THRESH_TRUNC 2 +#define CV_HAL_THRESH_TOZERO 3 +#define CV_HAL_THRESH_TOZERO_INV 4 +#define CV_HAL_THRESH_MASK 7 +#define CV_HAL_THRESH_OTSU 8 +#define CV_HAL_THRESH_TRIANGLE 16 +//! @} + +//! @name Adaptive threshold algorithm +//! @sa cv::AdaptiveThresholdTypes +//! @{ +#define CV_HAL_ADAPTIVE_THRESH_MEAN_C 0 +#define CV_HAL_ADAPTIVE_THRESH_GAUSSIAN_C 1 +//! @} + +//! @} + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc.hpp b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc.hpp new file mode 100644 index 00000000..4175bd0b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/imgproc.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc_c.h b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc_c.h new file mode 100644 index 00000000..2fe85b6c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/imgproc_c.h @@ -0,0 +1,1210 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGPROC_IMGPROC_C_H +#define OPENCV_IMGPROC_IMGPROC_C_H + +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup imgproc_c +@{ +*/ + +/*********************** Background statistics accumulation *****************************/ + +/** @brief Adds image to accumulator +@see cv::accumulate +*/ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @brief Adds squared image to accumulator +@see cv::accumulateSquare +*/ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @brief Adds a product of two images to accumulator +@see cv::accumulateProduct +*/ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/** @brief Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha +@see cv::accumulateWeighted +*/ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/** Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/** @brief Smooths the image in one of several ways. + +@param src The source image +@param dst The destination image +@param smoothtype Type of the smoothing, see SmoothMethod_c +@param size1 The first parameter of the smoothing operation, the aperture width. Must be a +positive odd number (1, 3, 5, ...) +@param size2 The second parameter of the smoothing operation, the aperture height. Ignored by +CV_MEDIAN and CV_BILATERAL methods. In the case of simple scaled/non-scaled and Gaussian blur if +size2 is zero, it is set to size1. Otherwise it must be a positive odd number. +@param sigma1 In the case of a Gaussian parameter this parameter may specify Gaussian \f$\sigma\f$ +(standard deviation). If it is zero, it is calculated from the kernel size: +\f[\sigma = 0.3 (n/2 - 1) + 0.8 \quad \text{where} \quad n= \begin{array}{l l} \mbox{\texttt{size1} for horizontal kernel} \\ \mbox{\texttt{size2} for vertical kernel} \end{array}\f] +Using standard sigma for small kernels ( \f$3\times 3\f$ to \f$7\times 7\f$ ) gives better speed. If +sigma1 is not zero, while size1 and size2 are zeros, the kernel size is calculated from the +sigma (to provide accurate enough operation). +@param sigma2 additional parameter for bilateral filtering + +@see cv::GaussianBlur, cv::blur, cv::medianBlur, cv::bilateralFilter. + */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/** @brief Convolves an image with the kernel. + +@param src input image. +@param dst output image of the same size and the same number of channels as src. +@param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point +matrix; if you want to apply different kernels to different channels, split the image into +separate color planes using split and process them individually. +@param anchor anchor of the kernel that indicates the relative position of a filtered point within +the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor +is at the kernel center. + +@see cv::filter2D + */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/** @brief Finds integral image: SUM(X,Y) = sum(x \texttt{hist1}(I)\)}{\frac{\texttt{hist2}(I) \cdot \texttt{scale}}{\texttt{hist1}(I)}}{if \(\texttt{hist1}(I) \ne 0\) and \(\texttt{hist2}(I) \le \texttt{hist1}(I)\)}\f] + +@param hist1 First histogram (the divisor). +@param hist2 Second histogram. +@param dst_hist Destination histogram. +@param scale Scale factor for the destination histogram. + */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/** @brief equalizes histogram of 8-bit single-channel image +@see cv::equalizeHist +*/ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/** @brief Applies distance transform to binary image +@see cv::distanceTransform +*/ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL), + int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP)); + + +/** @brief Applies fixed-level threshold to grayscale image. + + This is a basic operation applied before retrieving contours +@see cv::threshold +*/ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/** @brief Applies adaptive threshold to grayscale image. + + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) +@see cv::adaptiveThreshold +*/ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/** @brief Fills the connected component until the color difference gets large enough +@see cv::floodFill +*/ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/** @brief Runs canny edge detector +@see cv::Canny +*/ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/** @brief Calculates constraint image for corner detection + + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners +@see cv::preCornerDetect +*/ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/** @brief Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel +@see cv::cornerEigenValsAndVecs +*/ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/** @brief Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel +@see cv::cornerMinEigenVal +*/ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/** @brief Harris corner detector: + + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel +@see cv::cornerHarris +*/ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_response, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/** @brief Adjust corner position using some sort of gradient search +@see cv::cornerSubPix +*/ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/** @brief Finds a sparse set of points within the selected region + that seem to be easy to track +@see cv::goodFeaturesToTrack +*/ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/** @brief Finds lines on binary image using one of several methods. + + line_storage is either memory storage or 1 x _max number of lines_ CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale +@see cv::HoughLines +*/ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0), + double min_theta CV_DEFAULT(0), double max_theta CV_DEFAULT(CV_PI)); + +/** @brief Finds circles in the image +@see cv::HoughCircles +*/ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/** @brief Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) +@see cv::fitLine +*/ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_FILLED -1 + +#define CV_AA 16 + +/** @brief Draws 4-connected, 8-connected or antialiased line segment connecting two points +@see cv::line +*/ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/** @brief Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2) + + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn +@see cv::rectangle +*/ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/** @brief Draws a rectangle specified by a CvRect structure +@see cv::rectangle +*/ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/** @brief Draws a circle with specified center and radius. + + Thickness works in the same way as with cvRectangle +@see cv::circle +*/ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/** @brief Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector + + depending on _thickness_, _start_angle_ and _end_angle_ parameters. The resultant figure + is rotated by _angle_. All the angles are in degrees +@see cv::ellipse +*/ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes = cvSize( + cvRound(box.size.width*0.5), + cvRound(box.size.height*0.5) + ); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/** @brief Fills convex or monotonous polygon. +@see cv::fillConvexPoly +*/ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/** @brief Fills an area bounded by one or more arbitrary polygons +@see cv::fillPoly +*/ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/** @brief Draws one or more polygonal curves +@see cv::polylines +*/ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/** @brief Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + + (0<=xptr will point to pt1 (or pt2, see left_to_right description) location in +the image. Returns the number of pixels on the line between the ending points. +@see cv::LineIterator +*/ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/** Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red_component[, alpha_component]) + int font_face; //Qt: bool italic /** =CV_FONT_* */ + const int* ascii; //!< font data and metrics + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; //!< slope coefficient: 0 - normal, >0 - italic + int thickness; //!< Qt: weight /** letters thickness */ + float dx; //!< horizontal interval between letters + int line_type; //!< Qt: PointSize +} +CvFont; + +/** @brief Initializes font structure (OpenCV 1.x API). + +The function initializes the font structure that can be passed to text rendering functions. + +@param font Pointer to the font structure initialized by the function +@param font_face Font name identifier. See cv::HersheyFonts and corresponding old CV_* identifiers. +@param hscale Horizontal scale. If equal to 1.0f , the characters have the original width +depending on the font type. If equal to 0.5f , the characters are of half the original width. +@param vscale Vertical scale. If equal to 1.0f , the characters have the original height depending +on the font type. If equal to 0.5f , the characters are of half the original height. +@param shear Approximate tangent of the character slope relative to the vertical line. A zero +value means a non-italic font, 1.0f means about a 45 degree slope, etc. +@param thickness Thickness of the text strokes +@param line_type Type of the strokes, see line description + +@sa cvPutText + */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/** @brief Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont +@see cvInitFont, cvGetTextSize, cvFont, cv::putText +*/ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/** @brief Calculates bounding box of text stroke (useful for alignment) +@see cv::getTextSize +*/ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + +/** @brief Unpacks color value + +if arrtype is CV_8UC?, _color_ is treated as packed color value, otherwise the first channels +(depending on arrtype) of destination scalar are set to the same value = _color_ +*/ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/** @brief Returns the polygon points which make up the given ellipse. + +The ellipse is define by the box of size 'axes' rotated 'angle' around the 'center'. A partial +sweep of the ellipse arc can be done by specifying arc_start and arc_end to be something other than +0 and 360, respectively. The input array 'pts' must be large enough to hold the result. The total +number of points stored into 'pts' is returned by this function. +@see cv::ellipse2Poly +*/ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/** @brief Draws contour outlines or filled interiors on the image +@see cv::drawContours +*/ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/imgproc/types_c.h b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/types_c.h new file mode 100644 index 00000000..d3e55f57 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/imgproc/types_c.h @@ -0,0 +1,659 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_IMGPROC_TYPES_C_H +#define OPENCV_IMGPROC_TYPES_C_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup imgproc_c + @{ +*/ + +/** Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /** DBL_EPSILON ? 1./std::sqrt(am00) : 0; + } + operator cv::Moments() const + { + return cv::Moments(m00, m10, m01, m20, m11, m02, m30, m21, m12, m03); + } +#endif +} +CvMoments; + +#ifdef __cplusplus +} // extern "C" + +CV_INLINE CvMoments cvMoments() +{ +#if !defined(CV__ENABLE_C_API_CTORS) + CvMoments self = CV_STRUCT_INITIALIZER; return self; +#else + return CvMoments(); +#endif +} + +CV_INLINE CvMoments cvMoments(const cv::Moments& m) +{ +#if !defined(CV__ENABLE_C_API_CTORS) + double am00 = std::abs(m.m00); + CvMoments self = { + m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03, + m.mu20, m.mu11, m.mu02, m.mu30, m.mu21, m.mu12, m.mu03, + am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0 + }; + return self; +#else + return CvMoments(m); +#endif +} + +extern "C" { +#endif // __cplusplus + +/** Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /**< Hu invariants */ +} +CvHuMoments; + +/** Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/** Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3, + CV_RETR_FLOODFILL=4 +}; + +/** Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequential retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/** Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/** initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/** Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/** Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, //!< \f[I_1(A,B) = \sum _{i=1...7} \left | \frac{1}{m^A_i} - \frac{1}{m^B_i} \right |\f] + CV_CONTOURS_MATCH_I2 =2, //!< \f[I_2(A,B) = \sum _{i=1...7} \left | m^A_i - m^B_i \right |\f] + CV_CONTOURS_MATCH_I3 =3 //!< \f[I_3(A,B) = \max _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }\f] +}; + +/** Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/** Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /**< point of the contour where the defect begins */ + CvPoint* end; /**< point of the contour where the defect ends */ + CvPoint* depth_point; /**< the farthest from the convex hull point within the defect */ + float depth; /**< distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/** Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3, + CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA, + CV_COMP_CHISQR_ALT =4, + CV_COMP_KL_DIV =5 +}; + +/** Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/** Content of output label array: connected components or pixels */ +enum +{ + CV_DIST_LABEL_CCOMP = 0, + CV_DIST_LABEL_PIXEL = 1 +}; + +/** Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /**< User defined distance */ + CV_DIST_L1 =1, /**< distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /**< the simple euclidean distance */ + CV_DIST_C =3, /**< distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /**< L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /**< distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /**< distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /**< distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /**< value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /**< value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /**< value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /**< value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8, /**< use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ + CV_THRESH_TRIANGLE =16 /**< use Triangle algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values, but not + with CV_THRESH_OTSU */ +}; + +/** Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/** FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/** Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/** Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/ml.hpp b/third_party/opencv/uos/sw_64/include/opencv2/ml.hpp new file mode 100644 index 00000000..396a7921 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/ml.hpp @@ -0,0 +1,1989 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Copyright (C) 2014, Itseez Inc, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_ML_HPP +#define OPENCV_ML_HPP + +#ifdef __cplusplus +# include "opencv2/core.hpp" +#endif + +#ifdef __cplusplus + +#include +#include +#include + +/** + @defgroup ml Machine Learning + + The Machine Learning Library (MLL) is a set of classes and functions for statistical + classification, regression, and clustering of data. + + Most of the classification and regression algorithms are implemented as C++ classes. As the + algorithms have different sets of features (like an ability to handle missing measurements or + categorical input variables), there is a little common ground between the classes. This common + ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from. + + See detailed overview here: @ref ml_intro. + */ + +namespace cv +{ + +namespace ml +{ + +//! @addtogroup ml +//! @{ + +/** @brief Variable types */ +enum VariableTypes +{ + VAR_NUMERICAL =0, //!< same as VAR_ORDERED + VAR_ORDERED =0, //!< ordered variables + VAR_CATEGORICAL =1 //!< categorical variables +}; + +/** @brief %Error types */ +enum ErrorTypes +{ + TEST_ERROR = 0, + TRAIN_ERROR = 1 +}; + +/** @brief Sample types */ +enum SampleTypes +{ + ROW_SAMPLE = 0, //!< each training sample is a row of samples + COL_SAMPLE = 1 //!< each training sample occupies a column of samples +}; + +/** @brief The structure represents the logarithmic grid range of statmodel parameters. + +It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate +being computed by cross-validation. + */ +class CV_EXPORTS_W ParamGrid +{ +public: + /** @brief Default constructor */ + ParamGrid(); + /** @brief Constructor with parameters */ + ParamGrid(double _minVal, double _maxVal, double _logStep); + + CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0. + CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0. + /** @brief Logarithmic step for iterating the statmodel parameter. + + The grid determines the following iteration sequence of the statmodel parameter values: + \f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f] + where \f$n\f$ is the maximal index satisfying + \f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f] + The grid is logarithmic, so logStep must always be greater than 1. Default value is 1. + */ + CV_PROP_RW double logStep; + + /** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method + + @param minVal minimum value of the parameter grid + @param maxVal maximum value of the parameter grid + @param logstep Logarithmic step for iterating the statmodel parameter + */ + CV_WRAP static Ptr create(double minVal=0., double maxVal=0., double logstep=1.); +}; + +/** @brief Class encapsulating training data. + +Please note that the class only specifies the interface of training data, but not implementation. +All the statistical model classes in _ml_ module accepts Ptr\ as parameter. In other +words, you can create your own class derived from TrainData and pass smart pointer to the instance +of this class into StatModel::train. + +@sa @ref ml_intro_data + */ +class CV_EXPORTS_W TrainData +{ +public: + static inline float missingValue() { return FLT_MAX; } + virtual ~TrainData(); + + CV_WRAP virtual int getLayout() const = 0; + CV_WRAP virtual int getNTrainSamples() const = 0; + CV_WRAP virtual int getNTestSamples() const = 0; + CV_WRAP virtual int getNSamples() const = 0; + CV_WRAP virtual int getNVars() const = 0; + CV_WRAP virtual int getNAllVars() const = 0; + + CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0; + CV_WRAP virtual Mat getSamples() const = 0; + CV_WRAP virtual Mat getMissing() const = 0; + + /** @brief Returns matrix of train samples + + @param layout The requested layout. If it's different from the initial one, the matrix is + transposed. See ml::SampleTypes. + @param compressSamples if true, the function returns only the training samples (specified by + sampleIdx) + @param compressVars if true, the function returns the shorter training samples, containing only + the active variables. + + In current implementation the function tries to avoid physical data copying and returns the + matrix stored inside TrainData (unless the transposition or compression is needed). + */ + CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE, + bool compressSamples=true, + bool compressVars=true) const = 0; + + /** @brief Returns the vector of responses + + The function returns ordered or the original categorical responses. Usually it's used in + regression algorithms. + */ + CV_WRAP virtual Mat getTrainResponses() const = 0; + + /** @brief Returns the vector of normalized categorical responses + + The function returns vector of responses. Each response is integer from `0` to `-1`. The actual label value can be retrieved then from the class label vector, see + TrainData::getClassLabels. + */ + CV_WRAP virtual Mat getTrainNormCatResponses() const = 0; + CV_WRAP virtual Mat getTestResponses() const = 0; + CV_WRAP virtual Mat getTestNormCatResponses() const = 0; + CV_WRAP virtual Mat getResponses() const = 0; + CV_WRAP virtual Mat getNormCatResponses() const = 0; + CV_WRAP virtual Mat getSampleWeights() const = 0; + CV_WRAP virtual Mat getTrainSampleWeights() const = 0; + CV_WRAP virtual Mat getTestSampleWeights() const = 0; + CV_WRAP virtual Mat getVarIdx() const = 0; + CV_WRAP virtual Mat getVarType() const = 0; + CV_WRAP Mat getVarSymbolFlags() const; + CV_WRAP virtual int getResponseType() const = 0; + CV_WRAP virtual Mat getTrainSampleIdx() const = 0; + CV_WRAP virtual Mat getTestSampleIdx() const = 0; + CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0; + virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0; + CV_WRAP virtual Mat getDefaultSubstValues() const = 0; + + CV_WRAP virtual int getCatCount(int vi) const = 0; + + /** @brief Returns the vector of class labels + + The function returns vector of unique labels occurred in the responses. + */ + CV_WRAP virtual Mat getClassLabels() const = 0; + + CV_WRAP virtual Mat getCatOfs() const = 0; + CV_WRAP virtual Mat getCatMap() const = 0; + + /** @brief Splits the training data into the training and test parts + @sa TrainData::setTrainTestSplitRatio + */ + CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0; + + /** @brief Splits the training data into the training and test parts + + The function selects a subset of specified relative size and then returns it as the training + set. If the function is not called, all the data is used for training. Please, note that for + each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test + subset can be retrieved and processed as well. + @sa TrainData::setTrainTestSplit + */ + CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0; + CV_WRAP virtual void shuffleTrainTest() = 0; + + /** @brief Returns matrix of test samples */ + CV_WRAP Mat getTestSamples() const; + + /** @brief Returns vector of symbolic names captured in loadFromCSV() */ + CV_WRAP void getNames(std::vector& names) const; + + /** @brief Extract from 1D vector elements specified by passed indexes. + @param vec input vector (supported types: CV_32S, CV_32F, CV_64F) + @param idx 1D index vector + */ + static CV_WRAP Mat getSubVector(const Mat& vec, const Mat& idx); + + /** @brief Extract from matrix rows/cols specified by passed indexes. + @param matrix input matrix (supported types: CV_32S, CV_32F, CV_64F) + @param idx 1D index vector + @param layout specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES) + */ + static CV_WRAP Mat getSubMatrix(const Mat& matrix, const Mat& idx, int layout); + + /** @brief Reads the dataset from a .csv file and returns the ready-to-use training data. + + @param filename The input file name + @param headerLineCount The number of lines in the beginning to skip; besides the header, the + function also skips empty lines and lines staring with `#` + @param responseStartIdx Index of the first output variable. If -1, the function considers the + last variable as the response + @param responseEndIdx Index of the last output variable + 1. If -1, then there is single + response variable at responseStartIdx. + @param varTypeSpec The optional text string that specifies the variables' types. It has the + format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2` + (inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are + considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]` + should cover all the variables. If varTypeSpec is not specified, then algorithm uses the + following rules: + - all input variables are considered ordered by default. If some column contains has non- + numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding + variable is considered categorical. + - if there are several output variables, they are all considered as ordered. Error is + reported when non-numerical values are used. + - if there is a single output variable, then if its values are non-numerical or are all + integers, then it's considered categorical. Otherwise, it's considered ordered. + @param delimiter The character used to separate values in each line. + @param missch The character used to specify missing measurements. It should not be a digit. + Although it's a non-numerical value, it surely does not affect the decision of whether the + variable ordered or categorical. + @note If the dataset only contains input variables and no responses, use responseStartIdx = -2 + and responseEndIdx = 0. The output variables vector will just contain zeros. + */ + static Ptr loadFromCSV(const String& filename, + int headerLineCount, + int responseStartIdx=-1, + int responseEndIdx=-1, + const String& varTypeSpec=String(), + char delimiter=',', + char missch='?'); + + /** @brief Creates training data from in-memory arrays. + + @param samples matrix of samples. It should have CV_32F type. + @param layout see ml::SampleTypes. + @param responses matrix of responses. If the responses are scalar, they should be stored as a + single row or as a single column. The matrix should have type CV_32F or CV_32S (in the + former case the responses are considered as ordered by default; in the latter case - as + categorical) + @param varIdx vector specifying which variables to use for training. It can be an integer vector + (CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of + active variables. + @param sampleIdx vector specifying which samples to use for training. It can be an integer + vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask + of training samples. + @param sampleWeights optional vector with weights for each sample. It should have CV_32F type. + @param varType optional vector of type CV_8U and size ` + + `, containing types of each input and output variable. See + ml::VariableTypes. + */ + CV_WRAP static Ptr create(InputArray samples, int layout, InputArray responses, + InputArray varIdx=noArray(), InputArray sampleIdx=noArray(), + InputArray sampleWeights=noArray(), InputArray varType=noArray()); +}; + +/** @brief Base class for statistical models in OpenCV ML. + */ +class CV_EXPORTS_W StatModel : public Algorithm +{ +public: + /** Predict options */ + enum Flags { + UPDATE_MODEL = 1, + RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label + COMPRESSED_INPUT=2, + PREPROCESSED_INPUT=4 + }; + + /** @brief Returns the number of variables in training samples */ + CV_WRAP virtual int getVarCount() const = 0; + + CV_WRAP virtual bool empty() const CV_OVERRIDE; + + /** @brief Returns true if the model is trained */ + CV_WRAP virtual bool isTrained() const = 0; + /** @brief Returns true if the model is classifier */ + CV_WRAP virtual bool isClassifier() const = 0; + + /** @brief Trains the statistical model + + @param trainData training data that can be loaded from file using TrainData::loadFromCSV or + created with TrainData::create. + @param flags optional flags, depending on the model. Some of the models can be updated with the + new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP). + */ + CV_WRAP virtual bool train( const Ptr& trainData, int flags=0 ); + + /** @brief Trains the statistical model + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + */ + CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses ); + + /** @brief Computes error on the training or test dataset + + @param data the training data + @param test if true, the error is computed over the test subset of the data, otherwise it's + computed over the training subset of the data. Please note that if you loaded a completely + different dataset to evaluate already trained classifier, you will probably want not to set + the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so + that the error is computed for the whole new set. Yes, this sounds a bit confusing. + @param resp the optional output responses. + + The method uses StatModel::predict to compute the error. For regression models the error is + computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%). + */ + CV_WRAP virtual float calcError( const Ptr& data, bool test, OutputArray resp ) const; + + /** @brief Predicts response(s) for the provided sample(s) + + @param samples The input samples, floating-point matrix + @param results The optional output matrix of results. + @param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags. + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0; + + /** @brief Create and train model with default parameters + + The class must implement static `create()` method with no parameters or with all default parameter values + */ + template static Ptr<_Tp> train(const Ptr& data, int flags=0) + { + Ptr<_Tp> model = _Tp::create(); + return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>(); + } +}; + +/****************************************************************************************\ +* Normal Bayes Classifier * +\****************************************************************************************/ + +/** @brief Bayes classifier for normally distributed data. + +@sa @ref ml_intro_bayes + */ +class CV_EXPORTS_W NormalBayesClassifier : public StatModel +{ +public: + /** @brief Predicts the response for sample(s). + + The method estimates the most probable classes for input vectors. Input vectors (one or more) + are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one + output vector outputs. The predicted class for a single input vector is returned by the method. + The vector outputProbs contains the output probabilities corresponding to each element of + result. + */ + CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs, + OutputArray outputProbs, int flags=0 ) const = 0; + + /** Creates empty model + Use StatModel::train to train the model after creation. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized NormalBayesClassifier from a file + * + * Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk. + * Load the NormalBayesClassifier from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized NormalBayesClassifier + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* K-Nearest Neighbour Classifier * +\****************************************************************************************/ + +/** @brief The class implements K-Nearest Neighbors model + +@sa @ref ml_intro_knn + */ +class CV_EXPORTS_W KNearest : public StatModel +{ +public: + + /** Default number of neighbors to use in predict method. */ + /** @see setDefaultK */ + CV_WRAP virtual int getDefaultK() const = 0; + /** @copybrief getDefaultK @see getDefaultK */ + CV_WRAP virtual void setDefaultK(int val) = 0; + + /** Whether classification or regression model should be trained. */ + /** @see setIsClassifier */ + CV_WRAP virtual bool getIsClassifier() const = 0; + /** @copybrief getIsClassifier @see getIsClassifier */ + CV_WRAP virtual void setIsClassifier(bool val) = 0; + + /** Parameter for KDTree implementation. */ + /** @see setEmax */ + CV_WRAP virtual int getEmax() const = 0; + /** @copybrief getEmax @see getEmax */ + CV_WRAP virtual void setEmax(int val) = 0; + + /** %Algorithm type, one of KNearest::Types. */ + /** @see setAlgorithmType */ + CV_WRAP virtual int getAlgorithmType() const = 0; + /** @copybrief getAlgorithmType @see getAlgorithmType */ + CV_WRAP virtual void setAlgorithmType(int val) = 0; + + /** @brief Finds the neighbors and predicts responses for input vectors. + + @param samples Input samples stored by rows. It is a single-precision floating-point matrix of + ` * k` size. + @param k Number of used nearest neighbors. Should be greater than 1. + @param results Vector with results of prediction (regression or classification) for each input + sample. It is a single-precision floating-point vector with `` elements. + @param neighborResponses Optional output values for corresponding neighbors. It is a single- + precision floating-point matrix of ` * k` size. + @param dist Optional output distances from the input vectors to the corresponding neighbors. It + is a single-precision floating-point matrix of ` * k` size. + + For each input vector (a row of the matrix samples), the method finds the k nearest neighbors. + In case of regression, the predicted result is a mean value of the particular vector's neighbor + responses. In case of classification, the class is determined by voting. + + For each input vector, the neighbors are sorted by their distances to the vector. + + In case of C++ interface you can use output pointers to empty matrices and the function will + allocate memory itself. + + If only a single input vector is passed, all output matrices are optional and the predicted + value is returned by the method. + + The function is parallelized with the TBB library. + */ + CV_WRAP virtual float findNearest( InputArray samples, int k, + OutputArray results, + OutputArray neighborResponses=noArray(), + OutputArray dist=noArray() ) const = 0; + + /** @brief Implementations of KNearest algorithm + */ + enum Types + { + BRUTE_FORCE=1, + KDTREE=2 + }; + + /** @brief Creates the empty model + + The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method. + */ + CV_WRAP static Ptr create(); + /** @brief Loads and creates a serialized knearest from a file + * + * Use KNearest::save to serialize and store an KNearest to disk. + * Load the KNearest from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized KNearest + */ + CV_WRAP static Ptr load(const String& filepath); +}; + +/****************************************************************************************\ +* Support Vector Machines * +\****************************************************************************************/ + +/** @brief Support Vector Machines. + +@sa @ref ml_intro_svm + */ +class CV_EXPORTS_W SVM : public StatModel +{ +public: + + class CV_EXPORTS Kernel : public Algorithm + { + public: + virtual int getType() const = 0; + virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0; + }; + + /** Type of a %SVM formulation. + See SVM::Types. Default value is SVM::C_SVC. */ + /** @see setType */ + CV_WRAP virtual int getType() const = 0; + /** @copybrief getType @see getType */ + CV_WRAP virtual void setType(int val) = 0; + + /** Parameter \f$\gamma\f$ of a kernel function. + For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */ + /** @see setGamma */ + CV_WRAP virtual double getGamma() const = 0; + /** @copybrief getGamma @see getGamma */ + CV_WRAP virtual void setGamma(double val) = 0; + + /** Parameter _coef0_ of a kernel function. + For SVM::POLY or SVM::SIGMOID. Default value is 0.*/ + /** @see setCoef0 */ + CV_WRAP virtual double getCoef0() const = 0; + /** @copybrief getCoef0 @see getCoef0 */ + CV_WRAP virtual void setCoef0(double val) = 0; + + /** Parameter _degree_ of a kernel function. + For SVM::POLY. Default value is 0. */ + /** @see setDegree */ + CV_WRAP virtual double getDegree() const = 0; + /** @copybrief getDegree @see getDegree */ + CV_WRAP virtual void setDegree(double val) = 0; + + /** Parameter _C_ of a %SVM optimization problem. + For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */ + /** @see setC */ + CV_WRAP virtual double getC() const = 0; + /** @copybrief getC @see getC */ + CV_WRAP virtual void setC(double val) = 0; + + /** Parameter \f$\nu\f$ of a %SVM optimization problem. + For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */ + /** @see setNu */ + CV_WRAP virtual double getNu() const = 0; + /** @copybrief getNu @see getNu */ + CV_WRAP virtual void setNu(double val) = 0; + + /** Parameter \f$\epsilon\f$ of a %SVM optimization problem. + For SVM::EPS_SVR. Default value is 0. */ + /** @see setP */ + CV_WRAP virtual double getP() const = 0; + /** @copybrief getP @see getP */ + CV_WRAP virtual void setP(double val) = 0; + + /** Optional weights in the SVM::C_SVC problem, assigned to particular classes. + They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus + these weights affect the misclassification penalty for different classes. The larger weight, + the larger penalty on misclassification of data from the corresponding class. Default value is + empty Mat. */ + /** @see setClassWeights */ + CV_WRAP virtual cv::Mat getClassWeights() const = 0; + /** @copybrief getClassWeights @see getClassWeights */ + CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0; + + /** Termination criteria of the iterative %SVM training procedure which solves a partial + case of constrained quadratic optimization problem. + You can specify tolerance and/or the maximum number of iterations. Default value is + `TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */ + /** @see setTermCriteria */ + CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; + + /** Type of a %SVM kernel. + See SVM::KernelTypes. Default value is SVM::RBF. */ + CV_WRAP virtual int getKernelType() const = 0; + + /** Initialize with one of predefined kernels. + See SVM::KernelTypes. */ + CV_WRAP virtual void setKernel(int kernelType) = 0; + + /** Initialize with custom kernel. + See SVM::Kernel class for implementation details */ + virtual void setCustomKernel(const Ptr &_kernel) = 0; + + //! %SVM type + enum Types { + /** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows + imperfect separation of classes with penalty multiplier C for outliers. */ + C_SVC=100, + /** \f$\nu\f$-Support Vector Classification. n-class classification with possible + imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother + the decision boundary) is used instead of C. */ + NU_SVC=101, + /** Distribution Estimation (One-class %SVM). All the training data are from + the same class, %SVM builds a boundary that separates the class from the rest of the feature + space. */ + ONE_CLASS=102, + /** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors + from the training set and the fitting hyper-plane must be less than p. For outliers the + penalty multiplier C is used. */ + EPS_SVR=103, + /** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p. + See @cite LibSVM for details. */ + NU_SVR=104 + }; + + /** @brief %SVM kernel type + + A comparison of different kernels on the following 2D test case with four classes. Four + SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three + different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score. + Bright means max-score \> 0, dark means max-score \< 0. + ![image](pics/SVM_Comparison.png) + */ + enum KernelTypes { + /** Returned by SVM::getKernelType in case when custom kernel has been set */ + CUSTOM=-1, + /** Linear kernel. No mapping is done, linear discrimination (or regression) is + done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */ + LINEAR=0, + /** Polynomial kernel: + \f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */ + POLY=1, + /** Radial basis function (RBF), a good choice in most cases. + \f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */ + RBF=2, + /** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */ + SIGMOID=3, + /** Exponential Chi2 kernel, similar to the RBF kernel: + \f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */ + CHI2=4, + /** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */ + INTER=5 + }; + + //! %SVM params type + enum ParamTypes { + C=0, + GAMMA=1, + P=2, + NU=3, + COEF=4, + DEGREE=5 + }; + + /** @brief Trains an %SVM with optimal parameters. + + @param data the training data that can be constructed using TrainData::create or + TrainData::loadFromCSV. + @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + executed kFold times. + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree + @param balanced If true and the problem is 2-class classification then the method creates more + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. + + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. + + If there is no need to optimize a parameter, the corresponding grid step should be set to any + value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step + = 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value + `Gamma` is taken for gamma. + + And, finally, if the optimization in a parameter is required but the corresponding grid is + unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for + gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`. + + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. + */ + virtual bool trainAuto( const Ptr& data, int kFold = 10, + ParamGrid Cgrid = getDefaultGrid(C), + ParamGrid gammaGrid = getDefaultGrid(GAMMA), + ParamGrid pGrid = getDefaultGrid(P), + ParamGrid nuGrid = getDefaultGrid(NU), + ParamGrid coeffGrid = getDefaultGrid(COEF), + ParamGrid degreeGrid = getDefaultGrid(DEGREE), + bool balanced=false) = 0; + + /** @brief Trains an %SVM with optimal parameters + + @param samples training samples + @param layout See ml::SampleTypes. + @param responses vector of responses associated with the training samples. + @param kFold Cross-validation parameter. The training set is divided into kFold subsets. One + subset is used to test the model, the others form the train set. So, the %SVM algorithm is + @param Cgrid grid for C + @param gammaGrid grid for gamma + @param pGrid grid for p + @param nuGrid grid for nu + @param coeffGrid grid for coeff + @param degreeGrid grid for degree + @param balanced If true and the problem is 2-class classification then the method creates more + balanced cross-validation subsets that is proportions between classes in subsets are close + to such proportion in the whole train dataset. + + The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p, + nu, coef0, degree. Parameters are considered optimal when the cross-validation + estimate of the test set error is minimal. + + This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only + offers rudimentary parameter options. + + This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the + regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and + the usual %SVM with parameters specified in params is executed. + */ + CV_WRAP bool trainAuto(InputArray samples, + int layout, + InputArray responses, + int kFold = 10, + Ptr Cgrid = SVM::getDefaultGridPtr(SVM::C), + Ptr gammaGrid = SVM::getDefaultGridPtr(SVM::GAMMA), + Ptr pGrid = SVM::getDefaultGridPtr(SVM::P), + Ptr nuGrid = SVM::getDefaultGridPtr(SVM::NU), + Ptr coeffGrid = SVM::getDefaultGridPtr(SVM::COEF), + Ptr degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE), + bool balanced=false); + + /** @brief Retrieves all the support vectors + + The method returns all the support vectors as a floating-point matrix, where support vectors are + stored as matrix rows. + */ + CV_WRAP virtual Mat getSupportVectors() const = 0; + + /** @brief Retrieves all the uncompressed support vectors of a linear %SVM + + The method returns all the uncompressed support vectors of a linear %SVM that the compressed + support vector, used for prediction, was derived from. They are returned in a floating-point + matrix, where the support vectors are stored as matrix rows. + */ + CV_WRAP Mat getUncompressedSupportVectors() const; + + /** @brief Retrieves the decision function + + @param i the index of the decision function. If the problem solved is regression, 1-class or + 2-class classification, then there will be just one decision function and the index should + always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$ + decision functions. + @param alpha the optional output vector for weights, corresponding to different support vectors. + In the case of linear %SVM all the alpha's will be 1's. + @param svidx the optional output vector of indices of support vectors within the matrix of + support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear + %SVM each decision function consists of a single "compressed" support vector. + + The method returns rho parameter of the decision function, a scalar subtracted from the weighted + sum of kernel responses. + */ + CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0; + + /** @brief Generates a grid for %SVM parameters. + + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. + + The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be + passed to the function SVM::trainAuto. + */ + static ParamGrid getDefaultGrid( int param_id ); + + /** @brief Generates a grid for %SVM parameters. + + @param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is + generated for the parameter with this ID. + + The function generates a grid pointer for the specified parameter of the %SVM algorithm. + The grid may be passed to the function SVM::trainAuto. + */ + CV_WRAP static Ptr getDefaultGridPtr( int param_id ); + + /** Creates empty model. + Use StatModel::train to train the model. Since %SVM has several parameters, you may want to + find the best parameters for your problem, it can be done with SVM::trainAuto. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized svm from a file + * + * Use SVM::save to serialize and store an SVM to disk. + * Load the SVM from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized svm + */ + CV_WRAP static Ptr load(const String& filepath); +}; + +/****************************************************************************************\ +* Expectation - Maximization * +\****************************************************************************************/ + +/** @brief The class implements the Expectation Maximization algorithm. + +@sa @ref ml_intro_em + */ +class CV_EXPORTS_W EM : public StatModel +{ +public: + //! Type of covariation matrices + enum Types { + /** A scaled identity matrix \f$\mu_k * I\f$. There is the only + parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases, + when the constraint is relevant, or as a first step in the optimization (for example in case + when the data is preprocessed with PCA). The results of such preliminary estimation may be + passed again to the optimization procedure, this time with + covMatType=EM::COV_MAT_DIAGONAL. */ + COV_MAT_SPHERICAL=0, + /** A diagonal matrix with positive diagonal elements. The number of + free parameters is d for each matrix. This is most commonly used option yielding good + estimation results. */ + COV_MAT_DIAGONAL=1, + /** A symmetric positively defined matrix. The number of free + parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless + there is pretty accurate initial estimation of the parameters and/or a huge number of + training samples. */ + COV_MAT_GENERIC=2, + COV_MAT_DEFAULT=COV_MAT_DIAGONAL + }; + + //! Default parameters + enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100}; + + //! The initial step + enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0}; + + /** The number of mixture components in the Gaussian mixture model. + Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could + determine the optimal number of mixtures within a specified value range, but that is not the + case in ML yet. */ + /** @see setClustersNumber */ + CV_WRAP virtual int getClustersNumber() const = 0; + /** @copybrief getClustersNumber @see getClustersNumber */ + CV_WRAP virtual void setClustersNumber(int val) = 0; + + /** Constraint on covariance matrices which defines type of matrices. + See EM::Types. */ + /** @see setCovarianceMatrixType */ + CV_WRAP virtual int getCovarianceMatrixType() const = 0; + /** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */ + CV_WRAP virtual void setCovarianceMatrixType(int val) = 0; + + /** The termination criteria of the %EM algorithm. + The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of + M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default + maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; + + /** @brief Returns weights of the mixtures + + Returns vector with the number of elements equal to the number of mixtures. + */ + CV_WRAP virtual Mat getWeights() const = 0; + /** @brief Returns the cluster centers (means of the Gaussian mixture) + + Returns matrix with the number of rows equal to the number of mixtures and number of columns + equal to the space dimensionality. + */ + CV_WRAP virtual Mat getMeans() const = 0; + /** @brief Returns covariation matrices + + Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, + each matrix is a square floating-point matrix NxN, where N is the space dimensionality. + */ + CV_WRAP virtual void getCovs(CV_OUT std::vector& covs) const = 0; + + /** @brief Returns posterior probabilities for the provided samples + + @param samples The input samples, floating-point matrix + @param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains + posterior probabilities for each sample from the input + @param flags This parameter will be ignored + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; + + /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component + for the given sample. + + @param sample A sample for classification. It should be a one-channel matrix of + \f$1 \times dims\f$ or \f$dims \times 1\f$ size. + @param probs Optional output matrix that contains posterior probabilities of each component + given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type. + + The method returns a two-element double vector. Zero element is a likelihood logarithm value for + the sample. First element is an index of the most probable mixture component for the given + sample. + */ + CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Expectation step. Initial values of the model parameters will be + estimated by the k-means algorithm. + + Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take + responses (class labels or function values) as input. Instead, it computes the *Maximum + Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the + parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in + covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each + sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most + probable mixture component for each sample). + + The trained model can be used further for prediction, just like any other classifier. The + trained model is similar to the NormalBayesClassifier. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainEM(InputArray samples, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of + mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices + \f$S_k\f$ of mixture components. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of + \f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be + converted to the inner matrix of such type for the further computing. + @param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of + covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices + do not have CV_64F type they will be converted to the inner matrices of such type for the + further computing. + @param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel + floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size. + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainE(InputArray samples, InputArray means0, + InputArray covs0=noArray(), + InputArray weights0=noArray(), + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** @brief Estimate the Gaussian mixture parameters from a samples set. + + This variation starts with Maximization step. You need to provide initial probabilities + \f$p_{i,k}\f$ to use this option. + + @param samples Samples from which the Gaussian mixture model will be estimated. It should be a + one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type + it will be converted to the inner matrix of such type for the further computing. + @param probs0 the probabilities + @param logLikelihoods The optional output matrix that contains a likelihood logarithm value for + each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type. + @param labels The optional output "class label" for each sample: + \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable + mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type. + @param probs The optional output matrix that contains posterior probabilities of each Gaussian + mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and + CV_64FC1 type. + */ + CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0, + OutputArray logLikelihoods=noArray(), + OutputArray labels=noArray(), + OutputArray probs=noArray()) = 0; + + /** Creates empty %EM model. + The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you + can use one of the EM::train\* methods or load it from file using Algorithm::load\(filename). + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized EM from a file + * + * Use EM::save to serialize and store an EM to disk. + * Load the EM from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized EM + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Decision Tree * +\****************************************************************************************/ + +/** @brief The class represents a single decision tree or a collection of decision trees. + +The current public interface of the class allows user to train only a single decision tree, however +the class is capable of storing multiple decision trees and using them for prediction (by summing +responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost) +use this capability to implement decision tree ensembles. + +@sa @ref ml_intro_trees +*/ +class CV_EXPORTS_W DTrees : public StatModel +{ +public: + /** Predict options */ + enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) }; + + /** Cluster possible values of a categorical variable into K\<=maxCategories clusters to + find a suboptimal split. + If a discrete variable, on which the training procedure tries to make a split, takes more than + maxCategories values, the precise best subset estimation may take a very long time because the + algorithm is exponential. Instead, many decision trees engines (including our implementation) + try to find sub-optimal split in this case by clustering all the samples into maxCategories + clusters that is some categories are merged together. The clustering is applied only in n \> + 2-class classification problems for categorical variables with N \> max_categories possible + values. In case of regression and 2-class classification the optimal split can be found + efficiently without employing clustering, thus the parameter is not used in these cases. + Default value is 10.*/ + /** @see setMaxCategories */ + CV_WRAP virtual int getMaxCategories() const = 0; + /** @copybrief getMaxCategories @see getMaxCategories */ + CV_WRAP virtual void setMaxCategories(int val) = 0; + + /** The maximum possible depth of the tree. + That is the training algorithms attempts to split a node while its depth is less than maxDepth. + The root node has zero depth. The actual depth may be smaller if the other termination criteria + are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the + tree is pruned. Default value is INT_MAX.*/ + /** @see setMaxDepth */ + CV_WRAP virtual int getMaxDepth() const = 0; + /** @copybrief getMaxDepth @see getMaxDepth */ + CV_WRAP virtual void setMaxDepth(int val) = 0; + + /** If the number of samples in a node is less than this parameter then the node will not be split. + + Default value is 10.*/ + /** @see setMinSampleCount */ + CV_WRAP virtual int getMinSampleCount() const = 0; + /** @copybrief getMinSampleCount @see getMinSampleCount */ + CV_WRAP virtual void setMinSampleCount(int val) = 0; + + /** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold + cross-validation procedure where K is equal to CVFolds. + Default value is 10.*/ + /** @see setCVFolds */ + CV_WRAP virtual int getCVFolds() const = 0; + /** @copybrief getCVFolds @see getCVFolds */ + CV_WRAP virtual void setCVFolds(int val) = 0; + + /** If true then surrogate splits will be built. + These splits allow to work with missing data and compute variable importance correctly. + Default value is false. + @note currently it's not implemented.*/ + /** @see setUseSurrogates */ + CV_WRAP virtual bool getUseSurrogates() const = 0; + /** @copybrief getUseSurrogates @see getUseSurrogates */ + CV_WRAP virtual void setUseSurrogates(bool val) = 0; + + /** If true then a pruning will be harsher. + This will make a tree more compact and more resistant to the training data noise but a bit less + accurate. Default value is true.*/ + /** @see setUse1SERule */ + CV_WRAP virtual bool getUse1SERule() const = 0; + /** @copybrief getUse1SERule @see getUse1SERule */ + CV_WRAP virtual void setUse1SERule(bool val) = 0; + + /** If true then pruned branches are physically removed from the tree. + Otherwise they are retained and it is possible to get results from the original unpruned (or + pruned less aggressively) tree. Default value is true.*/ + /** @see setTruncatePrunedTree */ + CV_WRAP virtual bool getTruncatePrunedTree() const = 0; + /** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */ + CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0; + + /** Termination criteria for regression trees. + If all absolute differences between an estimated value in a node and values of train samples + in this node are less than this parameter then the node will not be split further. Default + value is 0.01f*/ + /** @see setRegressionAccuracy */ + CV_WRAP virtual float getRegressionAccuracy() const = 0; + /** @copybrief getRegressionAccuracy @see getRegressionAccuracy */ + CV_WRAP virtual void setRegressionAccuracy(float val) = 0; + + /** @brief The array of a priori class probabilities, sorted by the class label value. + + The parameter can be used to tune the decision tree preferences toward a certain class. For + example, if you want to detect some rare anomaly occurrence, the training base will likely + contain much more normal cases than anomalies, so a very good classification performance + will be achieved just by considering every case as normal. To avoid this, the priors can be + specified, where the anomaly probability is artificially increased (up to 0.5 or even + greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is + adjusted properly. + + You can also think about this parameter as weights of prediction categories which determine + relative weights that you give to misclassification. That is, if the weight of the first + category is 1 and the weight of the second category is 10, then each mistake in predicting + the second category is equivalent to making 10 mistakes in predicting the first category. + Default value is empty Mat.*/ + /** @see setPriors */ + CV_WRAP virtual cv::Mat getPriors() const = 0; + /** @copybrief getPriors @see getPriors */ + CV_WRAP virtual void setPriors(const cv::Mat &val) = 0; + + /** @brief The class represents a decision tree node. + */ + class CV_EXPORTS Node + { + public: + Node(); + double value; //!< Value at the node: a class label in case of classification or estimated + //!< function value in case of regression. + int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the + //!< node. It is used internally in classification trees and tree ensembles. + int parent; //!< Index of the parent node + int left; //!< Index of the left child node + int right; //!< Index of right child node + int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the + //!< case of missing values. + int split; //!< Index of the first split + }; + + /** @brief The class represents split in a decision tree. + */ + class CV_EXPORTS Split + { + public: + Split(); + int varIdx; //!< Index of variable on which the split is created. + bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right + //!< branches are exchanged in the rule expressions below). + float quality; //!< The split quality, a positive number. It is used to choose the best split. + int next; //!< Index of the next split in the list of splits for the node + float c; /**< The threshold value in case of split on an ordered variable. + The rule is: + @code{.none} + if var_value < c + then next_node <- left + else next_node <- right + @endcode */ + int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable. + The rule is: + @code{.none} + if bitset[var_value] == 1 + then next_node <- left + else next_node <- right + @endcode */ + }; + + /** @brief Returns indices of root nodes + */ + virtual const std::vector& getRoots() const = 0; + /** @brief Returns all the nodes + + all the node indices are indices in the returned vector + */ + virtual const std::vector& getNodes() const = 0; + /** @brief Returns all the splits + + all the split indices are indices in the returned vector + */ + virtual const std::vector& getSplits() const = 0; + /** @brief Returns all the bitsets for categorical splits + + Split::subsetOfs is an offset in the returned vector + */ + virtual const std::vector& getSubsets() const = 0; + + /** @brief Creates the empty model + + The static method creates empty decision tree with the specified parameters. It should be then + trained using train method (see StatModel::train). Alternatively, you can load the model from + file using Algorithm::load\(filename). + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized DTrees from a file + * + * Use DTree::save to serialize and store an DTree to disk. + * Load the DTree from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized DTree + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Random Trees Classifier * +\****************************************************************************************/ + +/** @brief The class implements the random forest predictor. + +@sa @ref ml_intro_rtrees + */ +class CV_EXPORTS_W RTrees : public DTrees +{ +public: + + /** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance. + Default value is false.*/ + /** @see setCalculateVarImportance */ + CV_WRAP virtual bool getCalculateVarImportance() const = 0; + /** @copybrief getCalculateVarImportance @see getCalculateVarImportance */ + CV_WRAP virtual void setCalculateVarImportance(bool val) = 0; + + /** The size of the randomly selected subset of features at each tree node and that are used + to find the best split(s). + If you set it to 0 then the size will be set to the square root of the total number of + features. Default value is 0.*/ + /** @see setActiveVarCount */ + CV_WRAP virtual int getActiveVarCount() const = 0; + /** @copybrief getActiveVarCount @see getActiveVarCount */ + CV_WRAP virtual void setActiveVarCount(int val) = 0; + + /** The termination criteria that specifies when the training algorithm stops. + Either when the specified number of trees is trained and added to the ensemble or when + sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the + better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes + pass a certain number of trees. Also to keep in mind, the number of tree increases the + prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + + TermCriteria::EPS, 50, 0.1)*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0; + + /** Returns the variable importance array. + The method returns the variable importance vector, computed at the training stage when + CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is + returned. + */ + CV_WRAP virtual Mat getVarImportance() const = 0; + + /** Returns the result of each individual tree in the forest. + In case the model is a regression problem, the method will return each of the trees' + results for each of the sample cases. If the model is a classifier, it will return + a Mat with samples + 1 rows, where the first row gives the class number and the + following rows return the votes each class had for each sample. + @param samples Array containing the samples for which votes will be calculated. + @param results Array where the result of the calculation will be written. + @param flags Flags for defining the type of RTrees. + */ + CV_WRAP void getVotes(InputArray samples, OutputArray results, int flags) const; + + /** Returns the OOB error value, computed at the training stage when calcOOBError is set to true. + * If this flag was set to false, 0 is returned. The OOB error is also scaled by sample weighting. + */ +#if CV_VERSION_MAJOR == 3 + CV_WRAP double getOOBError() const; +#else + /*CV_WRAP*/ virtual double getOOBError() const = 0; +#endif + + /** Creates the empty model. + Use StatModel::train to train the model, StatModel::train to create and train the model, + Algorithm::load to load the pre-trained model. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized RTree from a file + * + * Use RTree::save to serialize and store an RTree to disk. + * Load the RTree from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized RTree + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Boosted tree classifier * +\****************************************************************************************/ + +/** @brief Boosted tree classifier derived from DTrees + +@sa @ref ml_intro_boost + */ +class CV_EXPORTS_W Boost : public DTrees +{ +public: + /** Type of the boosting algorithm. + See Boost::Types. Default value is Boost::REAL. */ + /** @see setBoostType */ + CV_WRAP virtual int getBoostType() const = 0; + /** @copybrief getBoostType @see getBoostType */ + CV_WRAP virtual void setBoostType(int val) = 0; + + /** The number of weak classifiers. + Default value is 100. */ + /** @see setWeakCount */ + CV_WRAP virtual int getWeakCount() const = 0; + /** @copybrief getWeakCount @see getWeakCount */ + CV_WRAP virtual void setWeakCount(int val) = 0; + + /** A threshold between 0 and 1 used to save computational time. + Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* + iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/ + /** @see setWeightTrimRate */ + CV_WRAP virtual double getWeightTrimRate() const = 0; + /** @copybrief getWeightTrimRate @see getWeightTrimRate */ + CV_WRAP virtual void setWeightTrimRate(double val) = 0; + + /** Boosting type. + Gentle AdaBoost and Real AdaBoost are often the preferable choices. */ + enum Types { + DISCRETE=0, //!< Discrete AdaBoost. + REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions + //!< and works well with categorical data. + LOGIT=2, //!< LogitBoost. It can produce good regression fits. + GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that + //!(filename) to load the pre-trained model. */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized Boost from a file + * + * Use Boost::save to serialize and store an RTree to disk. + * Load the Boost from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized Boost + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + +/****************************************************************************************\ +* Gradient Boosted Trees * +\****************************************************************************************/ + +/*class CV_EXPORTS_W GBTrees : public DTrees +{ +public: + struct CV_EXPORTS_W_MAP Params : public DTrees::Params + { + CV_PROP_RW int weakCount; + CV_PROP_RW int lossFunctionType; + CV_PROP_RW float subsamplePortion; + CV_PROP_RW float shrinkage; + + Params(); + Params( int lossFunctionType, int weakCount, float shrinkage, + float subsamplePortion, int maxDepth, bool useSurrogates ); + }; + + enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS}; + + virtual void setK(int k) = 0; + + virtual float predictSerial( InputArray samples, + OutputArray weakResponses, int flags) const = 0; + + static Ptr create(const Params& p); +};*/ + +/****************************************************************************************\ +* Artificial Neural Networks (ANN) * +\****************************************************************************************/ + +/////////////////////////////////// Multi-Layer Perceptrons ////////////////////////////// + +/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. + +Unlike many other models in ML that are constructed and trained at once, in the MLP model these +steps are separated. First, a network with the specified topology is created using the non-default +constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is +trained using a set of input and output vectors. The training procedure can be repeated more than +once, that is, the weights can be adjusted based on the new training data. + +Additional flags for StatModel::train are available: ANN_MLP::TrainFlags. + +@sa @ref ml_intro_ann + */ +class CV_EXPORTS_W ANN_MLP : public StatModel +{ +public: + /** Available training methods */ + enum TrainingMethods { + BACKPROP=0, //!< The back-propagation algorithm. + RPROP = 1, //!< The RPROP algorithm. See @cite RPROP93 for details. + ANNEAL = 2 //!< The simulated annealing algorithm. See @cite Kirkpatrick83 for details. + }; + + /** Sets training method and common parameters. + @param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods. + @param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL. + @param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL. + */ + CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0; + + /** Returns current training method */ + CV_WRAP virtual int getTrainMethod() const = 0; + + /** Initialize the activation function for each neuron. + Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM. + @param type The type of activation function. See ANN_MLP::ActivationFunctions. + @param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0. + @param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0. + */ + CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. Default value is empty Mat. + @sa getLayerSizes */ + CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0; + + /** Integer vector specifying the number of neurons in each layer including the input and output layers. + The very first element specifies the number of elements in the input layer. + The last element - number of elements in the output layer. + @sa setLayerSizes */ + CV_WRAP virtual cv::Mat getLayerSizes() const = 0; + + /** Termination criteria of the training algorithm. + You can specify the maximum number of iterations (maxCount) and/or how much the error could + change between the iterations to make the algorithm continue (epsilon). Default value is + TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; + + /** BPROP: Strength of the weight gradient term. + The recommended value is about 0.1. Default value is 0.1.*/ + /** @see setBackpropWeightScale */ + CV_WRAP virtual double getBackpropWeightScale() const = 0; + /** @copybrief getBackpropWeightScale @see getBackpropWeightScale */ + CV_WRAP virtual void setBackpropWeightScale(double val) = 0; + + /** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations). + This parameter provides some inertia to smooth the random fluctuations of the weights. It can + vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough. + Default value is 0.1.*/ + /** @see setBackpropMomentumScale */ + CV_WRAP virtual double getBackpropMomentumScale() const = 0; + /** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */ + CV_WRAP virtual void setBackpropMomentumScale(double val) = 0; + + /** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. + Default value is 0.1.*/ + /** @see setRpropDW0 */ + CV_WRAP virtual double getRpropDW0() const = 0; + /** @copybrief getRpropDW0 @see getRpropDW0 */ + CV_WRAP virtual void setRpropDW0(double val) = 0; + + /** RPROP: Increase factor \f$\eta^+\f$. + It must be \>1. Default value is 1.2.*/ + /** @see setRpropDWPlus */ + CV_WRAP virtual double getRpropDWPlus() const = 0; + /** @copybrief getRpropDWPlus @see getRpropDWPlus */ + CV_WRAP virtual void setRpropDWPlus(double val) = 0; + + /** RPROP: Decrease factor \f$\eta^-\f$. + It must be \<1. Default value is 0.5.*/ + /** @see setRpropDWMinus */ + CV_WRAP virtual double getRpropDWMinus() const = 0; + /** @copybrief getRpropDWMinus @see getRpropDWMinus */ + CV_WRAP virtual void setRpropDWMinus(double val) = 0; + + /** RPROP: Update-values lower limit \f$\Delta_{min}\f$. + It must be positive. Default value is FLT_EPSILON.*/ + /** @see setRpropDWMin */ + CV_WRAP virtual double getRpropDWMin() const = 0; + /** @copybrief getRpropDWMin @see getRpropDWMin */ + CV_WRAP virtual void setRpropDWMin(double val) = 0; + + /** RPROP: Update-values upper limit \f$\Delta_{max}\f$. + It must be \>1. Default value is 50.*/ + /** @see setRpropDWMax */ + CV_WRAP virtual double getRpropDWMax() const = 0; + /** @copybrief getRpropDWMax @see getRpropDWMax */ + CV_WRAP virtual void setRpropDWMax(double val) = 0; + + /** ANNEAL: Update initial temperature. + It must be \>=0. Default value is 10.*/ + /** @see setAnnealInitialT */ + CV_WRAP double getAnnealInitialT() const; + /** @copybrief getAnnealInitialT @see getAnnealInitialT */ + CV_WRAP void setAnnealInitialT(double val); + + /** ANNEAL: Update final temperature. + It must be \>=0 and less than initialT. Default value is 0.1.*/ + /** @see setAnnealFinalT */ + CV_WRAP double getAnnealFinalT() const; + /** @copybrief getAnnealFinalT @see getAnnealFinalT */ + CV_WRAP void setAnnealFinalT(double val); + + /** ANNEAL: Update cooling ratio. + It must be \>0 and less than 1. Default value is 0.95.*/ + /** @see setAnnealCoolingRatio */ + CV_WRAP double getAnnealCoolingRatio() const; + /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */ + CV_WRAP void setAnnealCoolingRatio(double val); + + /** ANNEAL: Update iteration per step. + It must be \>0 . Default value is 10.*/ + /** @see setAnnealItePerStep */ + CV_WRAP int getAnnealItePerStep() const; + /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */ + CV_WRAP void setAnnealItePerStep(int val); + + /** @brief Set/initialize anneal RNG */ + void setAnnealEnergyRNG(const RNG& rng); + + /** possible activation functions */ + enum ActivationFunctions { + /** Identity function: \f$f(x)=x\f$ */ + IDENTITY = 0, + /** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x})\f$ + @note + If you are using the default sigmoid activation function with the default parameter values + fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output + will range from [-1.7159, 1.7159], instead of [0,1].*/ + SIGMOID_SYM = 1, + /** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */ + GAUSSIAN = 2, + /** ReLU function: \f$f(x)=max(0,x)\f$ */ + RELU = 3, + /** Leaky ReLU function: for x>0 \f$f(x)=x \f$ and x<=0 \f$f(x)=\alpha x \f$*/ + LEAKYRELU= 4 + }; + + /** Train options */ + enum TrainFlags { + /** Update the network weights, rather than compute them from scratch. In the latter case + the weights are initialized using the Nguyen-Widrow algorithm. */ + UPDATE_WEIGHTS = 1, + /** Do not normalize the input vectors. If this flag is not set, the training algorithm + normalizes each input feature independently, shifting its mean value to 0 and making the + standard deviation equal to 1. If the network is assumed to be updated frequently, the new + training data could be much different from original one. In this case, you should take care + of proper normalization. */ + NO_INPUT_SCALE = 2, + /** Do not normalize the output vectors. If the flag is not set, the training algorithm + normalizes each output feature independently, by transforming it to the certain range + depending on the used activation function. */ + NO_OUTPUT_SCALE = 4 + }; + + CV_WRAP virtual Mat getWeights(int layerIdx) const = 0; + + /** @brief Creates empty model + + Use StatModel::train to train the model, Algorithm::load\(filename) to load the pre-trained model. + Note that the train method has optional flags: ANN_MLP::TrainFlags. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized ANN from a file + * + * Use ANN::save to serialize and store an ANN to disk. + * Load the ANN from this file again, by calling this function with the path to the file. + * + * @param filepath path to serialized ANN + */ + CV_WRAP static Ptr load(const String& filepath); + +}; + +/****************************************************************************************\ +* Logistic Regression * +\****************************************************************************************/ + +/** @brief Implements Logistic Regression classifier. + +@sa @ref ml_intro_lr + */ +class CV_EXPORTS_W LogisticRegression : public StatModel +{ +public: + + /** Learning rate. */ + /** @see setLearningRate */ + CV_WRAP virtual double getLearningRate() const = 0; + /** @copybrief getLearningRate @see getLearningRate */ + CV_WRAP virtual void setLearningRate(double val) = 0; + + /** Number of iterations. */ + /** @see setIterations */ + CV_WRAP virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + CV_WRAP virtual void setIterations(int val) = 0; + + /** Kind of regularization to be applied. See LogisticRegression::RegKinds. */ + /** @see setRegularization */ + CV_WRAP virtual int getRegularization() const = 0; + /** @copybrief getRegularization @see getRegularization */ + CV_WRAP virtual void setRegularization(int val) = 0; + + /** Kind of training method used. See LogisticRegression::Methods. */ + /** @see setTrainMethod */ + CV_WRAP virtual int getTrainMethod() const = 0; + /** @copybrief getTrainMethod @see getTrainMethod */ + CV_WRAP virtual void setTrainMethod(int val) = 0; + + /** Specifies the number of training samples taken in each step of Mini-Batch Gradient + Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It + has to take values less than the total number of training samples. */ + /** @see setMiniBatchSize */ + CV_WRAP virtual int getMiniBatchSize() const = 0; + /** @copybrief getMiniBatchSize @see getMiniBatchSize */ + CV_WRAP virtual void setMiniBatchSize(int val) = 0; + + /** Termination criteria of the algorithm. */ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0; + + //! Regularization kinds + enum RegKinds { + REG_DISABLE = -1, //!< Regularization disabled + REG_L1 = 0, //!< %L1 norm + REG_L2 = 1 //!< %L2 norm + }; + + //! Training methods + enum Methods { + BATCH = 0, + MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method. + }; + + /** @brief Predicts responses for input samples and returns a float type. + + @param samples The input data for the prediction algorithm. Matrix [m x n], where each row + contains variables (features) of one object being classified. Should have data type CV_32F. + @param results Predicted labels as a column matrix of type CV_32S. + @param flags Not used. + */ + CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0; + + /** @brief This function returns the trained parameters arranged across rows. + + For a two class classification problem, it returns a row matrix. It returns learnt parameters of + the Logistic Regression as a matrix of type CV_32F. + */ + CV_WRAP virtual Mat get_learnt_thetas() const = 0; + + /** @brief Creates empty model. + + Creates Logistic Regression model with parameters given. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized LogisticRegression from a file + * + * Use LogisticRegression::save to serialize and store an LogisticRegression to disk. + * Load the LogisticRegression from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized LogisticRegression + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); +}; + + +/****************************************************************************************\ +* Stochastic Gradient Descent SVM Classifier * +\****************************************************************************************/ + +/*! +@brief Stochastic Gradient Descent SVM classifier + +SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach, +as presented in @cite bottou2010large. + +The classifier has following parameters: +- model type, +- margin type, +- margin regularization (\f$\lambda\f$), +- initial step size (\f$\gamma_0\f$), +- step decreasing power (\f$c\f$), +- and termination criteria. + +The model type may have one of the following values: \ref SGD and \ref ASGD. + +- \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula + \f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f] + where + - \f$w_t\f$ is the weights vector for decision function at step \f$t\f$, + - \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula + \f$\gamma(t) = \gamma_0 (1 + \lambda \gamma_0 t) ^ {-c}\f$ + - \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm. + +- \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula +\f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$ + +The recommended model type is ASGD (following @cite bottou2010large). + +The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN. + +- You should use \ref HARD_MARGIN type, if you have linearly separable sets. +- You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers. +- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN. + +The other parameters may be described as follows: +- Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers + (the less the parameter, the less probability that an outlier will be ignored). + Recommended value for SGD model is 0.0001, for ASGD model is 0.00001. + +- Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$. + You will have to find the best initial step for your problem. + +- Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above. + Recommended value for SGD model is 1, for ASGD model is 0.75. + +- Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS. + You will have to find the best termination criteria for your problem. + +Note that the parameters margin regularization, initial step size, and step decreasing power should be positive. + +To use SVMSGD algorithm do as follows: + +- first, create the SVMSGD object. The algorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(), + setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower(). + +- then the SVM model can be trained using the train features and the correspondent labels by the method train(). + +- after that, the label of a new feature vector can be predicted using the method predict(). + +@code +// Create empty object +cv::Ptr svmsgd = SVMSGD::create(); + +// Train the Stochastic Gradient Descent SVM +svmsgd->train(trainData); + +// Predict labels for the new samples +svmsgd->predict(samples, responses); +@endcode + +*/ + +class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel +{ +public: + + /** SVMSGD type. + ASGD is often the preferable choice. */ + enum SvmsgdType + { + SGD, //!< Stochastic Gradient Descent + ASGD //!< Average Stochastic Gradient Descent + }; + + /** Margin type.*/ + enum MarginType + { + SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers. + HARD_MARGIN //!< More accurate for the case of linearly separable sets. + }; + + /** + * @return the weights of the trained model (decision function f(x) = weights * x + shift). + */ + CV_WRAP virtual Mat getWeights() = 0; + + /** + * @return the shift of the trained model (decision function f(x) = weights * x + shift). + */ + CV_WRAP virtual float getShift() = 0; + + /** @brief Creates empty model. + * Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to + * find the best parameters for your problem or use setOptimalParameters() to set some default parameters. + */ + CV_WRAP static Ptr create(); + + /** @brief Loads and creates a serialized SVMSGD from a file + * + * Use SVMSGD::save to serialize and store an SVMSGD to disk. + * Load the SVMSGD from this file again, by calling this function with the path to the file. + * Optionally specify the node for the file containing the classifier + * + * @param filepath path to serialized SVMSGD + * @param nodeName name of node containing the classifier + */ + CV_WRAP static Ptr load(const String& filepath , const String& nodeName = String()); + + /** @brief Function sets optimal parameters values for chosen SVM SGD model. + * @param svmsgdType is the type of SVMSGD classifier. + * @param marginType is the type of margin constraint. + */ + CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0; + + /** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */ + /** @see setSvmsgdType */ + CV_WRAP virtual int getSvmsgdType() const = 0; + /** @copybrief getSvmsgdType @see getSvmsgdType */ + CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0; + + /** @brief %Margin type, one of SVMSGD::MarginType. */ + /** @see setMarginType */ + CV_WRAP virtual int getMarginType() const = 0; + /** @copybrief getMarginType @see getMarginType */ + CV_WRAP virtual void setMarginType(int marginType) = 0; + + /** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */ + /** @see setMarginRegularization */ + CV_WRAP virtual float getMarginRegularization() const = 0; + /** @copybrief getMarginRegularization @see getMarginRegularization */ + CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0; + + /** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */ + /** @see setInitialStepSize */ + CV_WRAP virtual float getInitialStepSize() const = 0; + /** @copybrief getInitialStepSize @see getInitialStepSize */ + CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0; + + /** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */ + /** @see setStepDecreasingPower */ + CV_WRAP virtual float getStepDecreasingPower() const = 0; + /** @copybrief getStepDecreasingPower @see getStepDecreasingPower */ + CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0; + + /** @brief Termination criteria of the training algorithm. + You can specify the maximum number of iterations (maxCount) and/or how much the error could + change between the iterations to make the algorithm continue (epsilon).*/ + /** @see setTermCriteria */ + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + /** @copybrief getTermCriteria @see getTermCriteria */ + CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0; +}; + + +/****************************************************************************************\ +* Auxiliary functions declarations * +\****************************************************************************************/ + +/** @brief Generates _sample_ from multivariate normal distribution + +@param mean an average row vector +@param cov symmetric covariation matrix +@param nsamples returned samples count +@param samples returned samples array +*/ +CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples); + +/** @brief Creates test set */ +CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses, + OutputArray samples, OutputArray responses); + +/** @brief Artificial Neural Networks - Multi-Layer Perceptrons. + +@sa @ref ml_intro_ann +*/ +class CV_EXPORTS_W ANN_MLP_ANNEAL : public ANN_MLP +{ +public: + /** @see setAnnealInitialT */ + CV_WRAP virtual double getAnnealInitialT() const = 0; + /** @copybrief getAnnealInitialT @see getAnnealInitialT */ + CV_WRAP virtual void setAnnealInitialT(double val) = 0; + + /** ANNEAL: Update final temperature. + It must be \>=0 and less than initialT. Default value is 0.1.*/ + /** @see setAnnealFinalT */ + CV_WRAP virtual double getAnnealFinalT() const = 0; + /** @copybrief getAnnealFinalT @see getAnnealFinalT */ + CV_WRAP virtual void setAnnealFinalT(double val) = 0; + + /** ANNEAL: Update cooling ratio. + It must be \>0 and less than 1. Default value is 0.95.*/ + /** @see setAnnealCoolingRatio */ + CV_WRAP virtual double getAnnealCoolingRatio() const = 0; + /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */ + CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0; + + /** ANNEAL: Update iteration per step. + It must be \>0 . Default value is 10.*/ + /** @see setAnnealItePerStep */ + CV_WRAP virtual int getAnnealItePerStep() const = 0; + /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */ + CV_WRAP virtual void setAnnealItePerStep(int val) = 0; + + /** @brief Set/initialize anneal RNG */ + virtual void setAnnealEnergyRNG(const RNG& rng) = 0; +}; + + +/****************************************************************************************\ +* Simulated annealing solver * +\****************************************************************************************/ + +#ifdef CV_DOXYGEN +/** @brief This class declares example interface for system state used in simulated annealing optimization algorithm. + +@note This class is not defined in C++ code and can't be use directly - you need your own implementation with the same methods. +*/ +struct SimulatedAnnealingSolverSystem +{ + /** Give energy value for a state of system.*/ + double energy() const; + /** Function which change the state of system (random perturbation).*/ + void changeState(); + /** Function to reverse to the previous state. Can be called once only after changeState(). */ + void reverseState(); +}; +#endif // CV_DOXYGEN + +/** @brief The class implements simulated annealing for optimization. + +@cite Kirkpatrick83 for details + +@param solverSystem optimization system (see SimulatedAnnealingSolverSystem) +@param initialTemperature initial temperature +@param finalTemperature final temperature +@param coolingRatio temperature step multiplies +@param iterationsPerStep number of iterations per temperature changing step +@param lastTemperature optional output for last used temperature +@param rngEnergy specify custom random numbers generator (cv::theRNG() by default) +*/ +template +int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, + double initialTemperature, double finalTemperature, double coolingRatio, + size_t iterationsPerStep, + CV_OUT double* lastTemperature = NULL, + cv::RNG& rngEnergy = cv::theRNG() +); + +//! @} ml + +} +} + +#include + +#endif // __cplusplus +#endif // OPENCV_ML_HPP + +/* End of file. */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.hpp b/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.hpp new file mode 100644 index 00000000..f6f9cd8f --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/ml.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.inl.hpp new file mode 100644 index 00000000..dc9c7839 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/ml/ml.inl.hpp @@ -0,0 +1,60 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_ML_INL_HPP +#define OPENCV_ML_INL_HPP + +namespace cv { namespace ml { + +// declared in ml.hpp +template +int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem, + double initialTemperature, double finalTemperature, double coolingRatio, + size_t iterationsPerStep, + CV_OUT double* lastTemperature, + cv::RNG& rngEnergy +) +{ + CV_Assert(finalTemperature > 0); + CV_Assert(initialTemperature > finalTemperature); + CV_Assert(iterationsPerStep > 0); + CV_Assert(coolingRatio < 1.0f); + double Ti = initialTemperature; + double previousEnergy = solverSystem.energy(); + int exchange = 0; + while (Ti > finalTemperature) + { + for (size_t i = 0; i < iterationsPerStep; i++) + { + solverSystem.changeState(); + double newEnergy = solverSystem.energy(); + if (newEnergy < previousEnergy) + { + previousEnergy = newEnergy; + exchange++; + } + else + { + double r = rngEnergy.uniform(0.0, 1.0); + if (r < std::exp(-(newEnergy - previousEnergy) / Ti)) + { + previousEnergy = newEnergy; + exchange++; + } + else + { + solverSystem.reverseState(); + } + } + } + Ti *= coolingRatio; + } + if (lastTemperature) + *lastTemperature = Ti; + return exchange; +} + +}} //namespace + +#endif // OPENCV_ML_INL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/objdetect.hpp b/third_party/opencv/uos/sw_64/include/opencv2/objdetect.hpp new file mode 100644 index 00000000..0387b102 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/objdetect.hpp @@ -0,0 +1,840 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OBJDETECT_HPP +#define OPENCV_OBJDETECT_HPP + +#include "opencv2/core.hpp" + +/** +@defgroup objdetect Object Detection + +Haar Feature-based Cascade Classifier for Object Detection +---------------------------------------------------------- + +The object detector described below has been initially proposed by Paul Viola @cite Viola01 and +improved by Rainer Lienhart @cite Lienhart02 . + +First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is +trained with a few hundred sample views of a particular object (i.e., a face or a car), called +positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary +images of the same size. + +After a classifier is trained, it can be applied to a region of interest (of the same size as used +during the training) in an input image. The classifier outputs a "1" if the region is likely to show +the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can +move the search window across the image and check every location using the classifier. The +classifier is designed so that it can be easily "resized" in order to be able to find the objects of +interest at different sizes, which is more efficient than resizing the image itself. So, to find an +object of an unknown size in the image the scan procedure should be done several times at different +scales. + +The word "cascade" in the classifier name means that the resultant classifier consists of several +simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some +stage the candidate is rejected or all the stages are passed. The word "boosted" means that the +classifiers at every stage of the cascade are complex themselves and they are built out of basic +classifiers using one of four different boosting techniques (weighted voting). Currently Discrete +Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are +decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic +classifiers, and are calculated as described below. The current algorithm uses the following +Haar-like features: + +![image](pics/haarfeatures.png) + +The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within +the region of interest and the scale (this scale is not the same as the scale used at the detection +stage, though these two scales are multiplied). For example, in the case of the third line feature +(2c) the response is calculated as the difference between the sum of image pixels under the +rectangle covering the whole feature (including the two white stripes and the black stripe in the +middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to +compensate for the differences in the size of areas. The sums of pixel values over a rectangular +regions are calculated rapidly using integral images (see below and the integral description). + +To see the object detector at work, have a look at the facedetect demo: + + +The following reference is for the detection part only. There is a separate application called +opencv_traincascade that can train a cascade of boosted classifiers from a set of samples. + +@note In the new C++ interface it is also possible to use LBP (local binary pattern) features in +addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection +using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at + + +@{ + @defgroup objdetect_c C API +@} + */ + +typedef struct CvHaarClassifierCascade CvHaarClassifierCascade; + +namespace cv +{ + +//! @addtogroup objdetect +//! @{ + +///////////////////////////// Object Detection //////////////////////////// + +//! class for grouping object candidates, detected by Cascade Classifier, HOG etc. +//! instance of the class is to be passed to cv::partition (see cxoperations.hpp) +class CV_EXPORTS SimilarRects +{ +public: + SimilarRects(double _eps) : eps(_eps) {} + inline bool operator()(const Rect& r1, const Rect& r2) const + { + double delta = eps * ((std::min)(r1.width, r2.width) + (std::min)(r1.height, r2.height)) * 0.5; + return std::abs(r1.x - r2.x) <= delta && + std::abs(r1.y - r2.y) <= delta && + std::abs(r1.x + r1.width - r2.x - r2.width) <= delta && + std::abs(r1.y + r1.height - r2.y - r2.height) <= delta; + } + double eps; +}; + +/** @brief Groups the object candidate rectangles. + +@param rectList Input/output vector of rectangles. Output vector includes retained and grouped +rectangles. (The Python list is not modified in place.) +@param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a +group of rectangles to retain it. +@param eps Relative difference between sides of the rectangles to merge them into a group. + +The function is a wrapper for the generic function partition . It clusters all the input rectangles +using the rectangle equivalence criteria that combines rectangles with similar sizes and similar +locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If +\f$\texttt{eps}\rightarrow +\inf\f$ , all the rectangles are put in one cluster. Then, the small +clusters containing less than or equal to groupThreshold rectangles are rejected. In each other +cluster, the average rectangle is computed and put into the output rectangle list. + */ +CV_EXPORTS void groupRectangles(std::vector& rectList, int groupThreshold, double eps = 0.2); +/** @overload */ +CV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector& rectList, CV_OUT std::vector& weights, + int groupThreshold, double eps = 0.2); +/** @overload */ +CV_EXPORTS void groupRectangles(std::vector& rectList, int groupThreshold, + double eps, std::vector* weights, std::vector* levelWeights ); +/** @overload */ +CV_EXPORTS void groupRectangles(std::vector& rectList, std::vector& rejectLevels, + std::vector& levelWeights, int groupThreshold, double eps = 0.2); +/** @overload */ +CV_EXPORTS void groupRectangles_meanshift(std::vector& rectList, std::vector& foundWeights, + std::vector& foundScales, + double detectThreshold = 0.0, Size winDetSize = Size(64, 128)); + +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvHaarClassifierCascade* obj) const; + +enum { CASCADE_DO_CANNY_PRUNING = 1, + CASCADE_SCALE_IMAGE = 2, + CASCADE_FIND_BIGGEST_OBJECT = 4, + CASCADE_DO_ROUGH_SEARCH = 8 + }; + +class CV_EXPORTS_W BaseCascadeClassifier : public Algorithm +{ +public: + virtual ~BaseCascadeClassifier(); + virtual bool empty() const CV_OVERRIDE = 0; + virtual bool load( const String& filename ) = 0; + virtual void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + double scaleFactor, + int minNeighbors, int flags, + Size minSize, Size maxSize ) = 0; + + virtual void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + CV_OUT std::vector& numDetections, + double scaleFactor, + int minNeighbors, int flags, + Size minSize, Size maxSize ) = 0; + + virtual void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + CV_OUT std::vector& rejectLevels, + CV_OUT std::vector& levelWeights, + double scaleFactor, + int minNeighbors, int flags, + Size minSize, Size maxSize, + bool outputRejectLevels ) = 0; + + virtual bool isOldFormatCascade() const = 0; + virtual Size getOriginalWindowSize() const = 0; + virtual int getFeatureType() const = 0; + virtual void* getOldCascade() = 0; + + class CV_EXPORTS MaskGenerator + { + public: + virtual ~MaskGenerator() {} + virtual Mat generateMask(const Mat& src)=0; + virtual void initializeMask(const Mat& /*src*/) { } + }; + virtual void setMaskGenerator(const Ptr& maskGenerator) = 0; + virtual Ptr getMaskGenerator() = 0; +}; + +/** @example samples/cpp/facedetect.cpp +This program demonstrates usage of the Cascade classifier class +\image html Cascade_Classifier_Tutorial_Result_Haar.jpg "Sample screenshot" width=321 height=254 +*/ +/** @brief Cascade classifier class for object detection. + */ +class CV_EXPORTS_W CascadeClassifier +{ +public: + CV_WRAP CascadeClassifier(); + /** @brief Loads a classifier from a file. + + @param filename Name of the file from which the classifier is loaded. + */ + CV_WRAP CascadeClassifier(const String& filename); + ~CascadeClassifier(); + /** @brief Checks whether the classifier has been loaded. + */ + CV_WRAP bool empty() const; + /** @brief Loads a classifier from a file. + + @param filename Name of the file from which the classifier is loaded. The file may contain an old + HAAR classifier trained by the haartraining application or a new cascade classifier trained by the + traincascade application. + */ + CV_WRAP bool load( const String& filename ); + /** @brief Reads a classifier from a FileStorage node. + + @note The file may contain a new cascade classifier (trained traincascade application) only. + */ + CV_WRAP bool read( const FileNode& node ); + + /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list + of rectangles. + + @param image Matrix of the type CV_8U containing an image where objects are detected. + @param objects Vector of rectangles where each rectangle contains the detected object, the + rectangles may be partially outside the original image. + @param scaleFactor Parameter specifying how much the image size is reduced at each image scale. + @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have + to retain it. + @param flags Parameter with the same meaning for an old cascade as in the function + cvHaarDetectObjects. It is not used for a new cascade. + @param minSize Minimum possible object size. Objects smaller than that are ignored. + @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale. + + The function is parallelized with the TBB library. + + @note + - (Python) A face detection example using cascade classifiers can be found at + opencv_source_code/samples/python/facedetect.py + */ + CV_WRAP void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + double scaleFactor = 1.1, + int minNeighbors = 3, int flags = 0, + Size minSize = Size(), + Size maxSize = Size() ); + + /** @overload + @param image Matrix of the type CV_8U containing an image where objects are detected. + @param objects Vector of rectangles where each rectangle contains the detected object, the + rectangles may be partially outside the original image. + @param numDetections Vector of detection numbers for the corresponding objects. An object's number + of detections is the number of neighboring positively classified rectangles that were joined + together to form the object. + @param scaleFactor Parameter specifying how much the image size is reduced at each image scale. + @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have + to retain it. + @param flags Parameter with the same meaning for an old cascade as in the function + cvHaarDetectObjects. It is not used for a new cascade. + @param minSize Minimum possible object size. Objects smaller than that are ignored. + @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale. + */ + CV_WRAP_AS(detectMultiScale2) void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + CV_OUT std::vector& numDetections, + double scaleFactor=1.1, + int minNeighbors=3, int flags=0, + Size minSize=Size(), + Size maxSize=Size() ); + + /** @overload + This function allows you to retrieve the final stage decision certainty of classification. + For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter. + For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage. + This value can then be used to separate strong from weaker classifications. + + A code sample on how to use it efficiently can be found below: + @code + Mat img; + vector weights; + vector levels; + vector detections; + CascadeClassifier model("/path/to/your/model.xml"); + model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true); + cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl; + @endcode + */ + CV_WRAP_AS(detectMultiScale3) void detectMultiScale( InputArray image, + CV_OUT std::vector& objects, + CV_OUT std::vector& rejectLevels, + CV_OUT std::vector& levelWeights, + double scaleFactor = 1.1, + int minNeighbors = 3, int flags = 0, + Size minSize = Size(), + Size maxSize = Size(), + bool outputRejectLevels = false ); + + CV_WRAP bool isOldFormatCascade() const; + CV_WRAP Size getOriginalWindowSize() const; + CV_WRAP int getFeatureType() const; + void* getOldCascade(); + + CV_WRAP static bool convert(const String& oldcascade, const String& newcascade); + + void setMaskGenerator(const Ptr& maskGenerator); + Ptr getMaskGenerator(); + + Ptr cc; +}; + +CV_EXPORTS Ptr createFaceDetectionMaskGenerator(); + +//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// + +//! struct for detection region of interest (ROI) +struct DetectionROI +{ + //! scale(size) of the bounding box + double scale; + //! set of requested locations to be evaluated + std::vector locations; + //! vector that will contain confidence values for each location + std::vector confidences; +}; + +/**@brief Implementation of HOG (Histogram of Oriented Gradients) descriptor and object detector. + +the HOG descriptor algorithm introduced by Navneet Dalal and Bill Triggs @cite Dalal2005 . + +useful links: + +https://hal.inria.fr/inria-00548512/document/ + +https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients + +https://software.intel.com/en-us/ipp-dev-reference-histogram-of-oriented-gradients-hog-descriptor + +http://www.learnopencv.com/histogram-of-oriented-gradients + +http://www.learnopencv.com/handwritten-digits-classification-an-opencv-c-python-tutorial + + */ +struct CV_EXPORTS_W HOGDescriptor +{ +public: + enum { L2Hys = 0 //!< Default histogramNormType + }; + enum { DEFAULT_NLEVELS = 64 //!< Default nlevels value. + }; + /**@brief Creates the HOG descriptor and detector with default params. + + aqual to HOGDescriptor(Size(64,128), Size(16,16), Size(8,8), Size(8,8), 9, 1 ) + */ + CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8), + cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1), + histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), + free_coef(-1.f), nlevels(HOGDescriptor::DEFAULT_NLEVELS), signedGradient(false) + {} + + /** @overload + @param _winSize sets winSize with given value. + @param _blockSize sets blockSize with given value. + @param _blockStride sets blockStride with given value. + @param _cellSize sets cellSize with given value. + @param _nbins sets nbins with given value. + @param _derivAperture sets derivAperture with given value. + @param _winSigma sets winSigma with given value. + @param _histogramNormType sets histogramNormType with given value. + @param _L2HysThreshold sets L2HysThreshold with given value. + @param _gammaCorrection sets gammaCorrection with given value. + @param _nlevels sets nlevels with given value. + @param _signedGradient sets signedGradient with given value. + */ + CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, + Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1, + int _histogramNormType=HOGDescriptor::L2Hys, + double _L2HysThreshold=0.2, bool _gammaCorrection=false, + int _nlevels=HOGDescriptor::DEFAULT_NLEVELS, bool _signedGradient=false) + : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize), + nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma), + histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold), + gammaCorrection(_gammaCorrection), free_coef(-1.f), nlevels(_nlevels), signedGradient(_signedGradient) + {} + + /** @overload + @param filename the file name containing HOGDescriptor properties and coefficients of the trained classifier + */ + CV_WRAP HOGDescriptor(const String& filename) + { + load(filename); + } + + /** @overload + @param d the HOGDescriptor which cloned to create a new one. + */ + HOGDescriptor(const HOGDescriptor& d) + { + d.copyTo(*this); + } + + /**@brief Default destructor. + */ + virtual ~HOGDescriptor() {} + + /**@brief Returns the number of coefficients required for the classification. + */ + CV_WRAP size_t getDescriptorSize() const; + + /** @brief Checks if detector size equal to descriptor size. + */ + CV_WRAP bool checkDetectorSize() const; + + /** @brief Returns winSigma value + */ + CV_WRAP double getWinSigma() const; + + /**@example samples/cpp/peopledetect.cpp + */ + /**@brief Sets coefficients for the linear SVM classifier. + @param _svmdetector coefficients for the linear SVM classifier. + */ + CV_WRAP virtual void setSVMDetector(InputArray _svmdetector); + + /** @brief Reads HOGDescriptor parameters from a file node. + @param fn File node + */ + virtual bool read(FileNode& fn); + + /** @brief Stores HOGDescriptor parameters in a file storage. + @param fs File storage + @param objname Object name + */ + virtual void write(FileStorage& fs, const String& objname) const; + + /** @brief loads coefficients for the linear SVM classifier from a file + @param filename Name of the file to read. + @param objname The optional name of the node to read (if empty, the first top-level node will be used). + */ + CV_WRAP virtual bool load(const String& filename, const String& objname = String()); + + /** @brief saves coefficients for the linear SVM classifier to a file + @param filename File name + @param objname Object name + */ + CV_WRAP virtual void save(const String& filename, const String& objname = String()) const; + + /** @brief clones the HOGDescriptor + @param c cloned HOGDescriptor + */ + virtual void copyTo(HOGDescriptor& c) const; + + /**@example samples/cpp/train_HOG.cpp + */ + /** @brief Computes HOG descriptors of given image. + @param img Matrix of the type CV_8U containing an image where HOG features will be calculated. + @param descriptors Matrix of the type CV_32F + @param winStride Window stride. It must be a multiple of block stride. + @param padding Padding + @param locations Vector of Point + */ + CV_WRAP virtual void compute(InputArray img, + CV_OUT std::vector& descriptors, + Size winStride = Size(), Size padding = Size(), + const std::vector& locations = std::vector()) const; + + /** @brief Performs object detection without a multi-scale window. + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries. + @param weights Vector that will contain confidence values for each detected object. + @param hitThreshold Threshold for the distance between features and SVM classifying plane. + Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient). + But if the free coefficient is omitted (which is allowed), you can specify it manually here. + @param winStride Window stride. It must be a multiple of block stride. + @param padding Padding + @param searchLocations Vector of Point includes set of requested locations to be evaluated. + */ + CV_WRAP virtual void detect(const Mat& img, CV_OUT std::vector& foundLocations, + CV_OUT std::vector& weights, + double hitThreshold = 0, Size winStride = Size(), + Size padding = Size(), + const std::vector& searchLocations = std::vector()) const; + + /** @brief Performs object detection without a multi-scale window. + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries. + @param hitThreshold Threshold for the distance between features and SVM classifying plane. + Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient). + But if the free coefficient is omitted (which is allowed), you can specify it manually here. + @param winStride Window stride. It must be a multiple of block stride. + @param padding Padding + @param searchLocations Vector of Point includes locations to search. + */ + virtual void detect(const Mat& img, CV_OUT std::vector& foundLocations, + double hitThreshold = 0, Size winStride = Size(), + Size padding = Size(), + const std::vector& searchLocations=std::vector()) const; + + /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list + of rectangles. + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param foundLocations Vector of rectangles where each rectangle contains the detected object. + @param foundWeights Vector that will contain confidence values for each detected object. + @param hitThreshold Threshold for the distance between features and SVM classifying plane. + Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient). + But if the free coefficient is omitted (which is allowed), you can specify it manually here. + @param winStride Window stride. It must be a multiple of block stride. + @param padding Padding + @param scale Coefficient of the detection window increase. + @param finalThreshold Final threshold + @param useMeanshiftGrouping indicates grouping algorithm + */ + CV_WRAP virtual void detectMultiScale(InputArray img, CV_OUT std::vector& foundLocations, + CV_OUT std::vector& foundWeights, double hitThreshold = 0, + Size winStride = Size(), Size padding = Size(), double scale = 1.05, + double finalThreshold = 2.0,bool useMeanshiftGrouping = false) const; + + /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list + of rectangles. + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param foundLocations Vector of rectangles where each rectangle contains the detected object. + @param hitThreshold Threshold for the distance between features and SVM classifying plane. + Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient). + But if the free coefficient is omitted (which is allowed), you can specify it manually here. + @param winStride Window stride. It must be a multiple of block stride. + @param padding Padding + @param scale Coefficient of the detection window increase. + @param finalThreshold Final threshold + @param useMeanshiftGrouping indicates grouping algorithm + */ + virtual void detectMultiScale(InputArray img, CV_OUT std::vector& foundLocations, + double hitThreshold = 0, Size winStride = Size(), + Size padding = Size(), double scale = 1.05, + double finalThreshold = 2.0, bool useMeanshiftGrouping = false) const; + + /** @brief Computes gradients and quantized gradient orientations. + @param img Matrix contains the image to be computed + @param grad Matrix of type CV_32FC2 contains computed gradients + @param angleOfs Matrix of type CV_8UC2 contains quantized gradient orientations + @param paddingTL Padding from top-left + @param paddingBR Padding from bottom-right + */ + CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs, + Size paddingTL = Size(), Size paddingBR = Size()) const; + + /** @brief Returns coefficients of the classifier trained for people detection (for 64x128 windows). + */ + CV_WRAP static std::vector getDefaultPeopleDetector(); + + /**@example samples/tapi/hog.cpp + */ + /** @brief Returns coefficients of the classifier trained for people detection (for 48x96 windows). + */ + CV_WRAP static std::vector getDaimlerPeopleDetector(); + + //! Detection window size. Align to block size and block stride. Default value is Size(64,128). + CV_PROP Size winSize; + + //! Block size in pixels. Align to cell size. Default value is Size(16,16). + CV_PROP Size blockSize; + + //! Block stride. It must be a multiple of cell size. Default value is Size(8,8). + CV_PROP Size blockStride; + + //! Cell size. Default value is Size(8,8). + CV_PROP Size cellSize; + + //! Number of bins used in the calculation of histogram of gradients. Default value is 9. + CV_PROP int nbins; + + //! not documented + CV_PROP int derivAperture; + + //! Gaussian smoothing window parameter. + CV_PROP double winSigma; + + //! histogramNormType + CV_PROP int histogramNormType; + + //! L2-Hys normalization method shrinkage. + CV_PROP double L2HysThreshold; + + //! Flag to specify whether the gamma correction preprocessing is required or not. + CV_PROP bool gammaCorrection; + + //! coefficients for the linear SVM classifier. + CV_PROP std::vector svmDetector; + + //! coefficients for the linear SVM classifier used when OpenCL is enabled + UMat oclSvmDetector; + + //! not documented + float free_coef; + + //! Maximum number of detection window increases. Default value is 64 + CV_PROP int nlevels; + + //! Indicates signed gradient will be used or not + CV_PROP bool signedGradient; + + /** @brief evaluate specified ROI and return confidence value for each location + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param locations Vector of Point + @param foundLocations Vector of Point where each Point is detected object's top-left point. + @param confidences confidences + @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually + it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if + the free coefficient is omitted (which is allowed), you can specify it manually here + @param winStride winStride + @param padding padding + */ + virtual void detectROI(const cv::Mat& img, const std::vector &locations, + CV_OUT std::vector& foundLocations, CV_OUT std::vector& confidences, + double hitThreshold = 0, cv::Size winStride = Size(), + cv::Size padding = Size()) const; + + /** @brief evaluate specified ROI and return confidence value for each location in multiple scales + @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. + @param foundLocations Vector of rectangles where each rectangle contains the detected object. + @param locations Vector of DetectionROI + @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually it is 0 and should be specified + in the detector coefficients (as the last free coefficient). But if the free coefficient is omitted (which is allowed), you can specify it manually here. + @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it. + */ + virtual void detectMultiScaleROI(const cv::Mat& img, + CV_OUT std::vector& foundLocations, + std::vector& locations, + double hitThreshold = 0, + int groupThreshold = 0) const; + + /** @brief read/parse Dalal's alt model file + @param modelfile Path of Dalal's alt model file. + */ + void readALTModel(String modelfile); + + /** @brief Groups the object candidate rectangles. + @param rectList Input/output vector of rectangles. Output vector includes retained and grouped rectangles. (The Python list is not modified in place.) + @param weights Input/output vector of weights of rectangles. Output vector includes weights of retained and grouped rectangles. (The Python list is not modified in place.) + @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it. + @param eps Relative difference between sides of the rectangles to merge them into a group. + */ + void groupRectangles(std::vector& rectList, std::vector& weights, int groupThreshold, double eps) const; +}; + +class CV_EXPORTS_W QRCodeDetector +{ +public: + CV_WRAP QRCodeDetector(); + ~QRCodeDetector(); + + /** @brief sets the epsilon used during the horizontal scan of QR code stop marker detection. + @param epsX Epsilon neighborhood, which allows you to determine the horizontal pattern + of the scheme 1:1:3:1:1 according to QR code standard. + */ + CV_WRAP void setEpsX(double epsX); + /** @brief sets the epsilon used during the vertical scan of QR code stop marker detection. + @param epsY Epsilon neighborhood, which allows you to determine the vertical pattern + of the scheme 1:1:3:1:1 according to QR code standard. + */ + CV_WRAP void setEpsY(double epsY); + + /** @brief Detects QR code in image and returns the quadrangle containing the code. + @param img grayscale or color (BGR) image containing (or not) QR code. + @param points Output vector of vertices of the minimum-area quadrangle containing the code. + */ + CV_WRAP bool detect(InputArray img, OutputArray points) const; + + /** @brief Decodes QR code in image once it's found by the detect() method. + + Returns UTF8-encoded output string or empty string if the code cannot be decoded. + @param img grayscale or color (BGR) image containing QR code. + @param points Quadrangle vertices found by detect() method (or some other algorithm). + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String decode(InputArray img, InputArray points, OutputArray straight_qrcode = noArray()); + + /** @brief Decodes QR code on a curved surface in image once it's found by the detect() method. + + Returns UTF8-encoded output string or empty string if the code cannot be decoded. + @param img grayscale or color (BGR) image containing QR code. + @param points Quadrangle vertices found by detect() method (or some other algorithm). + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String decodeCurved(InputArray img, InputArray points, OutputArray straight_qrcode = noArray()); + + /** @brief Both detects and decodes QR code + + @param img grayscale or color (BGR) image containing QR code. + @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found. + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String detectAndDecode(InputArray img, OutputArray points=noArray(), + OutputArray straight_qrcode = noArray()); + + /** @brief Both detects and decodes QR code on a curved surface + + @param img grayscale or color (BGR) image containing QR code. + @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found. + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String detectAndDecodeCurved(InputArray img, OutputArray points=noArray(), + OutputArray straight_qrcode = noArray()); + + /** @brief Detects QR codes in image and returns the vector of the quadrangles containing the codes. + @param img grayscale or color (BGR) image containing (or not) QR codes. + @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes. + */ + CV_WRAP + bool detectMulti(InputArray img, OutputArray points) const; + + /** @brief Decodes QR codes in image once it's found by the detect() method. + @param img grayscale or color (BGR) image containing QR codes. + @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded. + @param points vector of Quadrangle vertices found by detect() method (or some other algorithm). + @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes + */ + CV_WRAP + bool decodeMulti( + InputArray img, InputArray points, + CV_OUT std::vector& decoded_info, + OutputArrayOfArrays straight_qrcode = noArray() + ) const; + + /** @brief Both detects and decodes QR codes + @param img grayscale or color (BGR) image containing QR codes. + @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded. + @param points optional output vector of vertices of the found QR code quadrangles. Will be empty if not found. + @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes + */ + CV_WRAP + bool detectAndDecodeMulti( + InputArray img, CV_OUT std::vector& decoded_info, + OutputArray points = noArray(), + OutputArrayOfArrays straight_qrcode = noArray() + ) const; + + +#ifndef CV_DOXYGEN // COMPATIBILITY + inline bool decodeMulti( + InputArray img, InputArray points, + CV_OUT std::vector& decoded_info, + OutputArrayOfArrays straight_qrcode = noArray() + ) const + { + std::vector decoded_info_; + bool res = decodeMulti(img, points, decoded_info_, straight_qrcode); + decoded_info.resize(decoded_info_.size()); + for (size_t i = 0; i < decoded_info.size(); ++i) + { + cv::String s; std::swap(s, decoded_info_[i]); + decoded_info[i] = s; + } + return res; + } + + inline bool detectAndDecodeMulti( + InputArray img, CV_OUT std::vector& decoded_info, + OutputArray points = noArray(), + OutputArrayOfArrays straight_qrcode = noArray() + ) const + { + std::vector decoded_info_; + bool res = detectAndDecodeMulti(img, decoded_info_, points, straight_qrcode); + decoded_info.resize(decoded_info_.size()); + for (size_t i = 0; i < decoded_info.size(); ++i) + { + cv::String s; std::swap(s, decoded_info_[i]); + decoded_info[i] = s; + } + return res; + } +#endif + +protected: + struct Impl; + Ptr p; +}; + +/** @brief Detect QR code in image and return minimum area of quadrangle that describes QR code. + @param in Matrix of the type CV_8UC1 containing an image where QR code are detected. + @param points Output vector of vertices of a quadrangle of minimal area that describes QR code. + @param eps_x Epsilon neighborhood, which allows you to determine the horizontal pattern of the scheme 1:1:3:1:1 according to QR code standard. + @param eps_y Epsilon neighborhood, which allows you to determine the vertical pattern of the scheme 1:1:3:1:1 according to QR code standard. + */ +CV_EXPORTS bool detectQRCode(InputArray in, std::vector &points, double eps_x = 0.2, double eps_y = 0.1); + +/** @brief Decode QR code in image and return text that is encrypted in QR code. + @param in Matrix of the type CV_8UC1 containing an image where QR code are detected. + @param points Input vector of vertices of a quadrangle of minimal area that describes QR code. + @param decoded_info String information that is encrypted in QR code. + @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code. + */ +CV_EXPORTS bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray()); + +/** @brief Decode QR code on a curved surface in image and return text that is encrypted in QR code. + @param in Matrix of the type CV_8UC1 containing an image where QR code are detected. + @param points Input vector of vertices of a quadrangle of minimal area that describes QR code. + @param decoded_info String information that is encrypted in QR code. + @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code. + */ +CV_EXPORTS bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray()); + +//! @} objdetect +} + +#include "opencv2/objdetect/detection_based_tracker.hpp" + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/objdetect/objdetect_c.h" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/objdetect/detection_based_tracker.hpp b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/detection_based_tracker.hpp new file mode 100644 index 00000000..07dd5874 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/detection_based_tracker.hpp @@ -0,0 +1,227 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OBJDETECT_DBT_HPP +#define OPENCV_OBJDETECT_DBT_HPP + +#include + +// After this condition removal update blacklist for bindings: modules/python/common.cmake +#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || \ + defined(CV_CXX11) + +#include + +namespace cv +{ + +//! @addtogroup objdetect +//! @{ + +class CV_EXPORTS DetectionBasedTracker +{ + public: + struct CV_EXPORTS Parameters + { + int maxTrackLifetime; + int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0 + + Parameters(); + }; + + class IDetector + { + public: + IDetector(): + minObjSize(96, 96), + maxObjSize(INT_MAX, INT_MAX), + minNeighbours(2), + scaleFactor(1.1f) + {} + + virtual void detect(const cv::Mat& image, std::vector& objects) = 0; + + void setMinObjectSize(const cv::Size& min) + { + minObjSize = min; + } + void setMaxObjectSize(const cv::Size& max) + { + maxObjSize = max; + } + cv::Size getMinObjectSize() const + { + return minObjSize; + } + cv::Size getMaxObjectSize() const + { + return maxObjSize; + } + float getScaleFactor() + { + return scaleFactor; + } + void setScaleFactor(float value) + { + scaleFactor = value; + } + int getMinNeighbours() + { + return minNeighbours; + } + void setMinNeighbours(int value) + { + minNeighbours = value; + } + virtual ~IDetector() {} + + protected: + cv::Size minObjSize; + cv::Size maxObjSize; + int minNeighbours; + float scaleFactor; + }; + + DetectionBasedTracker(cv::Ptr mainDetector, cv::Ptr trackingDetector, const Parameters& params); + virtual ~DetectionBasedTracker(); + + virtual bool run(); + virtual void stop(); + virtual void resetTracking(); + + virtual void process(const cv::Mat& imageGray); + + bool setParameters(const Parameters& params); + const Parameters& getParameters() const; + + + typedef std::pair Object; + virtual void getObjects(std::vector& result) const; + virtual void getObjects(std::vector& result) const; + + enum ObjectStatus + { + DETECTED_NOT_SHOWN_YET, + DETECTED, + DETECTED_TEMPORARY_LOST, + WRONG_OBJECT + }; + struct ExtObject + { + int id; + cv::Rect location; + ObjectStatus status; + ExtObject(int _id, cv::Rect _location, ObjectStatus _status) + :id(_id), location(_location), status(_status) + { + } + }; + virtual void getObjects(std::vector& result) const; + + + virtual int addObject(const cv::Rect& location); //returns id of the new object + + protected: + class SeparateDetectionWork; + cv::Ptr separateDetectionWork; + friend void* workcycleObjectDetectorFunction(void* p); + + struct InnerParameters + { + int numLastPositionsToTrack; + int numStepsToWaitBeforeFirstShow; + int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown; + int numStepsToShowWithoutDetecting; + + float coeffTrackingWindowSize; + float coeffObjectSizeToTrack; + float coeffObjectSpeedUsingInPrediction; + + InnerParameters(); + }; + Parameters parameters; + InnerParameters innerParameters; + + struct TrackedObject + { + typedef std::vector PositionsVector; + + PositionsVector lastPositions; + + int numDetectedFrames; + int numFramesNotDetected; + int id; + + TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0) + { + lastPositions.push_back(rect); + id=getNextId(); + }; + + static int getNextId() + { + static int _id=0; + return _id++; + } + }; + + int numTrackedSteps; + std::vector trackedObjects; + + std::vector weightsPositionsSmoothing; + std::vector weightsSizesSmoothing; + + cv::Ptr cascadeForTracking; + + void updateTrackedObjects(const std::vector& detectedObjects); + cv::Rect calcTrackedObjectPositionToShow(int i) const; + cv::Rect calcTrackedObjectPositionToShow(int i, ObjectStatus& status) const; + void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector& detectedObjectsInRegions); +}; + +//! @} objdetect + +} //end of cv namespace +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect.hpp b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect.hpp new file mode 100644 index 00000000..3ee284f4 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/objdetect.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect_c.h b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect_c.h new file mode 100644 index 00000000..67dc2f4e --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/objdetect/objdetect_c.h @@ -0,0 +1,166 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_OBJDETECT_C_H +#define OPENCV_OBJDETECT_C_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +#include +#include + +extern "C" { +#endif + +/** @addtogroup objdetect_c + @{ + */ + +/****************************************************************************************\ +* Haar-like Object Detection functions * +\****************************************************************************************/ + +#define CV_HAAR_MAGIC_VAL 0x42500000 +#define CV_TYPE_NAME_HAAR "opencv-haar-classifier" + +#define CV_IS_HAAR_CLASSIFIER( haar ) \ + ((haar) != NULL && \ + (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL) + +#define CV_HAAR_FEATURE_MAX 3 +#define CV_HAAR_STAGE_MAX 1000 + +typedef struct CvHaarFeature +{ + int tilted; + struct + { + CvRect r; + float weight; + } rect[CV_HAAR_FEATURE_MAX]; +} CvHaarFeature; + +typedef struct CvHaarClassifier +{ + int count; + CvHaarFeature* haar_feature; + float* threshold; + int* left; + int* right; + float* alpha; +} CvHaarClassifier; + +typedef struct CvHaarStageClassifier +{ + int count; + float threshold; + CvHaarClassifier* classifier; + + int next; + int child; + int parent; +} CvHaarStageClassifier; + +typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade; + +typedef struct CvHaarClassifierCascade +{ + int flags; + int count; + CvSize orig_window_size; + CvSize real_window_size; + double scale; + CvHaarStageClassifier* stage_classifier; + CvHidHaarClassifierCascade* hid_cascade; +} CvHaarClassifierCascade; + +typedef struct CvAvgComp +{ + CvRect rect; + int neighbors; +} CvAvgComp; + +/* Loads haar classifier cascade from a directory. + It is obsolete: convert your cascade to xml and use cvLoad instead */ +CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade( + const char* directory, CvSize orig_window_size); + +CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade ); + +#define CV_HAAR_DO_CANNY_PRUNING 1 +#define CV_HAAR_SCALE_IMAGE 2 +#define CV_HAAR_FIND_BIGGEST_OBJECT 4 +#define CV_HAAR_DO_ROUGH_SEARCH 8 + +CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + double scale_factor CV_DEFAULT(1.1), + int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), + CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0))); + +/* sets images for haar classifier cascade */ +CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade, + const CvArr* sum, const CvArr* sqsum, + const CvArr* tilted_sum, double scale ); + +/* runs the cascade on the specified window */ +CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, + CvPoint pt, int start_stage CV_DEFAULT(0)); + +/** @} objdetect_c */ + +#ifdef __cplusplus +} + +CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, + std::vector& rejectLevels, std::vector& levelWeightds, + double scale_factor = 1.1, + int min_neighbors = 3, int flags = 0, + CvSize min_size = cvSize(0, 0), CvSize max_size = cvSize(0, 0), + bool outputRejectLevels = false ); + +#endif + +#endif /* OPENCV_OBJDETECT_C_H */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/opencv.hpp b/third_party/opencv/uos/sw_64/include/opencv2/opencv.hpp new file mode 100644 index 00000000..40481587 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/opencv.hpp @@ -0,0 +1,139 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_ALL_HPP +#define OPENCV_ALL_HPP + +// File that defines what modules where included during the build of OpenCV +// These are purely the defines of the correct HAVE_OPENCV_modulename values +#include "opencv2/opencv_modules.hpp" + +// Then the list of defines is checked to include the correct headers +// Core library is always included --> without no OpenCV functionality available +#include "opencv2/core.hpp" + +// Then the optional modules are checked +#ifdef HAVE_OPENCV_CALIB3D +#include "opencv2/calib3d.hpp" +#endif +#ifdef HAVE_OPENCV_FEATURES2D +#include "opencv2/features2d.hpp" +#endif +#ifdef HAVE_OPENCV_DNN +#include "opencv2/dnn.hpp" +#endif +#ifdef HAVE_OPENCV_FLANN +#include "opencv2/flann.hpp" +#endif +#ifdef HAVE_OPENCV_HIGHGUI +#include "opencv2/highgui.hpp" +#endif +#ifdef HAVE_OPENCV_IMGCODECS +#include "opencv2/imgcodecs.hpp" +#endif +#ifdef HAVE_OPENCV_IMGPROC +#include "opencv2/imgproc.hpp" +#endif +#ifdef HAVE_OPENCV_ML +#include "opencv2/ml.hpp" +#endif +#ifdef HAVE_OPENCV_OBJDETECT +#include "opencv2/objdetect.hpp" +#endif +#ifdef HAVE_OPENCV_PHOTO +#include "opencv2/photo.hpp" +#endif +#ifdef HAVE_OPENCV_SHAPE +#include "opencv2/shape.hpp" +#endif +#ifdef HAVE_OPENCV_STITCHING +#include "opencv2/stitching.hpp" +#endif +#ifdef HAVE_OPENCV_SUPERRES +#include "opencv2/superres.hpp" +#endif +#ifdef HAVE_OPENCV_VIDEO +#include "opencv2/video.hpp" +#endif +#ifdef HAVE_OPENCV_VIDEOIO +#include "opencv2/videoio.hpp" +#endif +#ifdef HAVE_OPENCV_VIDEOSTAB +#include "opencv2/videostab.hpp" +#endif +#ifdef HAVE_OPENCV_VIZ +#include "opencv2/viz.hpp" +#endif + +// Finally CUDA specific entries are checked and added +#ifdef HAVE_OPENCV_CUDAARITHM +#include "opencv2/cudaarithm.hpp" +#endif +#ifdef HAVE_OPENCV_CUDABGSEGM +#include "opencv2/cudabgsegm.hpp" +#endif +#ifdef HAVE_OPENCV_CUDACODEC +#include "opencv2/cudacodec.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAFEATURES2D +#include "opencv2/cudafeatures2d.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAFILTERS +#include "opencv2/cudafilters.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAIMGPROC +#include "opencv2/cudaimgproc.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAOBJDETECT +#include "opencv2/cudaobjdetect.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAOPTFLOW +#include "opencv2/cudaoptflow.hpp" +#endif +#ifdef HAVE_OPENCV_CUDASTEREO +#include "opencv2/cudastereo.hpp" +#endif +#ifdef HAVE_OPENCV_CUDAWARPING +#include "opencv2/cudawarping.hpp" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/opencv_modules.hpp b/third_party/opencv/uos/sw_64/include/opencv2/opencv_modules.hpp new file mode 100644 index 00000000..e14f3ec7 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/opencv_modules.hpp @@ -0,0 +1,31 @@ +/* + * ** File generated automatically, do not modify ** + * + * This file defines the list of modules available in current build configuration + * + * +*/ + +// This definition means that OpenCV is built with enabled non-free code. +// For example, patented algorithms for non-profit/non-commercial use only. +/* #undef OPENCV_ENABLE_NONFREE */ + +#define HAVE_OPENCV_CALIB3D +#define HAVE_OPENCV_CORE +#define HAVE_OPENCV_DNN +#define HAVE_OPENCV_FEATURES2D +#define HAVE_OPENCV_FLANN +#define HAVE_OPENCV_HIGHGUI +#define HAVE_OPENCV_IMGCODECS +#define HAVE_OPENCV_IMGPROC +#define HAVE_OPENCV_ML +#define HAVE_OPENCV_OBJDETECT +#define HAVE_OPENCV_PHOTO +#define HAVE_OPENCV_SHAPE +#define HAVE_OPENCV_STITCHING +#define HAVE_OPENCV_SUPERRES +#define HAVE_OPENCV_VIDEO +#define HAVE_OPENCV_VIDEOIO +#define HAVE_OPENCV_VIDEOSTAB + + diff --git a/third_party/opencv/uos/sw_64/include/opencv2/photo.hpp b/third_party/opencv/uos/sw_64/include/opencv2/photo.hpp new file mode 100644 index 00000000..a457295a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/photo.hpp @@ -0,0 +1,862 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_PHOTO_HPP +#define OPENCV_PHOTO_HPP + +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" + +/** +@defgroup photo Computational Photography + +This module includes photo processing algorithms +@{ + @defgroup photo_inpaint Inpainting + @defgroup photo_denoise Denoising + @defgroup photo_hdr HDR imaging + +This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment, +camera calibration with multiple exposures and exposure fusion. + + @defgroup photo_decolor Contrast Preserving Decolorization + +Useful links: + +http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html + + @defgroup photo_clone Seamless Cloning + +Useful links: + +https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp + + @defgroup photo_render Non-Photorealistic Rendering + +Useful links: + +http://www.inf.ufrgs.br/~eslgastal/DomainTransform + +https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/ + + @defgroup photo_c C API +@} + */ + +namespace cv +{ + +//! @addtogroup photo +//! @{ + +//! @addtogroup photo_inpaint +//! @{ +//! the inpainting algorithm +enum +{ + INPAINT_NS = 0, //!< Use Navier-Stokes based method + INPAINT_TELEA = 1 //!< Use the algorithm proposed by Alexandru Telea @cite Telea04 +}; + +/** @brief Restores the selected region in an image using the region neighborhood. + +@param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image. +@param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that +needs to be inpainted. +@param dst Output image with the same size and type as src . +@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered +by the algorithm. +@param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA + +The function reconstructs the selected image area from the pixel near the area boundary. The +function may be used to remove dust and scratches from a scanned photo, or to remove undesirable +objects from still images or video. See for more details. + +@note + - An example using the inpainting technique can be found at + opencv_source_code/samples/cpp/inpaint.cpp + - (Python) An example using the inpainting technique can be found at + opencv_source_code/samples/python/inpaint.py + */ +CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, + OutputArray dst, double inpaintRadius, int flags ); + +//! @} photo_inpaint + +//! @addtogroup photo_denoise +//! @{ + +/** @brief Perform image denoising using Non-local Means Denoising algorithm + with several computational +optimizations. Noise expected to be a gaussian white noise + +@param src Input 8-bit 1-channel, 2-channel, 3-channel or 4-channel image. +@param dst Output image with the same size and type as src . +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Parameter regulating filter strength. Big h value perfectly removes noise but also +removes image details, smaller h value preserves details but also preserves some noise + +This function expected to be applied to grayscale images. For colored images look at +fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored +image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting +image to CIELAB colorspace and then separately denoise L and AB components with different h +parameter. + */ +CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3, + int templateWindowSize = 7, int searchWindowSize = 21); + +/** @brief Perform image denoising using Non-local Means Denoising algorithm + with several computational +optimizations. Noise expected to be a gaussian white noise + +@param src Input 8-bit or 16-bit (only with NORM_L1) 1-channel, +2-channel, 3-channel or 4-channel image. +@param dst Output image with the same size and type as src . +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Array of parameters regulating filter strength, either one +parameter applied to all channels or one per channel in dst. Big h value +perfectly removes noise but also removes image details, smaller h +value preserves details but also preserves some noise +@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 + +This function expected to be applied to grayscale images. For colored images look at +fastNlMeansDenoisingColored. Advanced usage of this functions can be manual denoising of colored +image in different colorspaces. Such approach is used in fastNlMeansDenoisingColored by converting +image to CIELAB colorspace and then separately denoise L and AB components with different h +parameter. + */ +CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, + const std::vector& h, + int templateWindowSize = 7, int searchWindowSize = 21, + int normType = NORM_L2); + +/** @brief Modification of fastNlMeansDenoising function for colored images + +@param src Input 8-bit 3-channel image. +@param dst Output image with the same size and type as src . +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly +removes noise but also removes image details, smaller h value preserves details but also preserves +some noise +@param hColor The same as h but for color components. For most images value equals 10 +will be enough to remove colored noise and do not distort colors + +The function converts image to CIELAB colorspace and then separately denoise L and AB components +with given h parameters using fastNlMeansDenoising function. + */ +CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst, + float h = 3, float hColor = 3, + int templateWindowSize = 7, int searchWindowSize = 21); + +/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been +captured in small period of time. For example video. This version of the function is for grayscale +images or for manual manipulation with colorspaces. For more details see + + +@param srcImgs Input 8-bit 1-channel, 2-channel, 3-channel or +4-channel images sequence. All images should have the same type and +size. +@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence +@param temporalWindowSize Number of surrounding images to use for target image denoising. Should +be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to +imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise +srcImgs[imgToDenoiseIndex] image. +@param dst Output image with the same size and type as srcImgs images. +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Parameter regulating filter strength. Bigger h value +perfectly removes noise but also removes image details, smaller h +value preserves details but also preserves some noise + */ +CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst, + int imgToDenoiseIndex, int temporalWindowSize, + float h = 3, int templateWindowSize = 7, int searchWindowSize = 21); + +/** @brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been +captured in small period of time. For example video. This version of the function is for grayscale +images or for manual manipulation with colorspaces. For more details see + + +@param srcImgs Input 8-bit or 16-bit (only with NORM_L1) 1-channel, +2-channel, 3-channel or 4-channel images sequence. All images should +have the same type and size. +@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence +@param temporalWindowSize Number of surrounding images to use for target image denoising. Should +be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to +imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise +srcImgs[imgToDenoiseIndex] image. +@param dst Output image with the same size and type as srcImgs images. +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Array of parameters regulating filter strength, either one +parameter applied to all channels or one per channel in dst. Big h value +perfectly removes noise but also removes image details, smaller h +value preserves details but also preserves some noise +@param normType Type of norm used for weight calculation. Can be either NORM_L2 or NORM_L1 + */ +CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst, + int imgToDenoiseIndex, int temporalWindowSize, + const std::vector& h, + int templateWindowSize = 7, int searchWindowSize = 21, + int normType = NORM_L2); + +/** @brief Modification of fastNlMeansDenoisingMulti function for colored images sequences + +@param srcImgs Input 8-bit 3-channel images sequence. All images should have the same type and +size. +@param imgToDenoiseIndex Target image to denoise index in srcImgs sequence +@param temporalWindowSize Number of surrounding images to use for target image denoising. Should +be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to +imgToDenoiseIndex - temporalWindowSize / 2 from srcImgs will be used to denoise +srcImgs[imgToDenoiseIndex] image. +@param dst Output image with the same size and type as srcImgs images. +@param templateWindowSize Size in pixels of the template patch that is used to compute weights. +Should be odd. Recommended value 7 pixels +@param searchWindowSize Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater searchWindowsSize - greater +denoising time. Recommended value 21 pixels +@param h Parameter regulating filter strength for luminance component. Bigger h value perfectly +removes noise but also removes image details, smaller h value preserves details but also preserves +some noise. +@param hColor The same as h but for color components. + +The function converts images to CIELAB colorspace and then separately denoise L and AB components +with given h parameters using fastNlMeansDenoisingMulti function. + */ +CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst, + int imgToDenoiseIndex, int temporalWindowSize, + float h = 3, float hColor = 3, + int templateWindowSize = 7, int searchWindowSize = 21); + +/** @brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, +finding a function to minimize some functional). As the image denoising, in particular, may be seen +as the variational problem, primal-dual algorithm then can be used to perform denoising and this is +exactly what is implemented. + +It should be noted, that this implementation was taken from the July 2013 blog entry +@cite MA13 , which also contained (slightly more general) ready-to-use source code on Python. +Subsequently, that code was rewritten on C++ with the usage of openCV by Vadim Pisarevsky at the end +of July 2013 and finally it was slightly adapted by later authors. + +Although the thorough discussion and justification of the algorithm involved may be found in +@cite ChambolleEtAl, it might make sense to skim over it here, following @cite MA13 . To begin +with, we consider the 1-byte gray-level images as the functions from the rectangular domain of +pixels (it may be seen as set +\f$\left\{(x,y)\in\mathbb{N}\times\mathbb{N}\mid 1\leq x\leq n,\;1\leq y\leq m\right\}\f$ for some +\f$m,\;n\in\mathbb{N}\f$) into \f$\{0,1,\dots,255\}\f$. We shall denote the noised images as \f$f_i\f$ and with +this view, given some image \f$x\f$ of the same size, we may measure how bad it is by the formula + +\f[\left\|\left\|\nabla x\right\|\right\| + \lambda\sum_i\left\|\left\|x-f_i\right\|\right\|\f] + +\f$\|\|\cdot\|\|\f$ here denotes \f$L_2\f$-norm and as you see, the first addend states that we want our +image to be smooth (ideally, having zero gradient, thus being constant) and the second states that +we want our result to be close to the observations we've got. If we treat \f$x\f$ as a function, this is +exactly the functional what we seek to minimize and here the Primal-Dual algorithm comes into play. + +@param observations This array should contain one or more noised versions of the image that is to +be restored. +@param result Here the denoised image will be stored. There is no need to do pre-allocation of +storage space, as it will be automatically allocated, if necessary. +@param lambda Corresponds to \f$\lambda\f$ in the formulas above. As it is enlarged, the smooth +(blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly +speaking, as it becomes smaller, the result will be more blur but more sever outliers will be +removed. +@param niters Number of iterations that the algorithm will run. Of course, as more iterations as +better, but it is hard to quantitatively refine this statement, so just use the default and +increase it if the results are poor. + */ +CV_EXPORTS_W void denoise_TVL1(const std::vector& observations,Mat& result, double lambda=1.0, int niters=30); + +//! @} photo_denoise + +//! @addtogroup photo_hdr +//! @{ + +enum { LDR_SIZE = 256 }; + +/** @brief Base class for tonemapping algorithms - tools that are used to map HDR image to 8-bit range. + */ +class CV_EXPORTS_W Tonemap : public Algorithm +{ +public: + /** @brief Tonemaps image + + @param src source image - CV_32FC3 Mat (float 32 bits 3 channels) + @param dst destination image - CV_32FC3 Mat with values in [0, 1] range + */ + CV_WRAP virtual void process(InputArray src, OutputArray dst) = 0; + + CV_WRAP virtual float getGamma() const = 0; + CV_WRAP virtual void setGamma(float gamma) = 0; +}; + +/** @brief Creates simple linear mapper with gamma correction + +@param gamma positive value for gamma correction. Gamma value of 1.0 implies no correction, gamma +equal to 2.2f is suitable for most displays. +Generally gamma \> 1 brightens the image and gamma \< 1 darkens it. + */ +CV_EXPORTS_W Ptr createTonemap(float gamma = 1.0f); + +/** @brief Adaptive logarithmic mapping is a fast global tonemapping algorithm that scales the image in +logarithmic domain. + +Since it's a global operator the same function is applied to all the pixels, it is controlled by the +bias parameter. + +Optional saturation enhancement is possible as described in @cite FL02 . + +For more information see @cite DM03 . + */ +class CV_EXPORTS_W TonemapDrago : public Tonemap +{ +public: + + CV_WRAP virtual float getSaturation() const = 0; + CV_WRAP virtual void setSaturation(float saturation) = 0; + + CV_WRAP virtual float getBias() const = 0; + CV_WRAP virtual void setBias(float bias) = 0; +}; + +/** @brief Creates TonemapDrago object + +@param gamma gamma value for gamma correction. See createTonemap +@param saturation positive saturation enhancement value. 1.0 preserves saturation, values greater +than 1 increase saturation and values less than 1 decrease it. +@param bias value for bias function in [0, 1] range. Values from 0.7 to 0.9 usually give best +results, default value is 0.85. + */ +CV_EXPORTS_W Ptr createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f); + + +/** @brief This is a global tonemapping operator that models human visual system. + +Mapping function is controlled by adaptation parameter, that is computed using light adaptation and +color adaptation. + +For more information see @cite RD05 . + */ +class CV_EXPORTS_W TonemapReinhard : public Tonemap +{ +public: + CV_WRAP virtual float getIntensity() const = 0; + CV_WRAP virtual void setIntensity(float intensity) = 0; + + CV_WRAP virtual float getLightAdaptation() const = 0; + CV_WRAP virtual void setLightAdaptation(float light_adapt) = 0; + + CV_WRAP virtual float getColorAdaptation() const = 0; + CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0; +}; + +/** @brief Creates TonemapReinhard object + +@param gamma gamma value for gamma correction. See createTonemap +@param intensity result intensity in [-8, 8] range. Greater intensity produces brighter results. +@param light_adapt light adaptation in [0, 1] range. If 1 adaptation is based only on pixel +value, if 0 it's global, otherwise it's a weighted mean of this two cases. +@param color_adapt chromatic adaptation in [0, 1] range. If 1 channels are treated independently, +if 0 adaptation level is the same for each channel. + */ +CV_EXPORTS_W Ptr +createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f); + +/** @brief This algorithm transforms image to contrast using gradients on all levels of gaussian pyramid, +transforms contrast values to HVS response and scales the response. After this the image is +reconstructed from new contrast values. + +For more information see @cite MM06 . + */ +class CV_EXPORTS_W TonemapMantiuk : public Tonemap +{ +public: + CV_WRAP virtual float getScale() const = 0; + CV_WRAP virtual void setScale(float scale) = 0; + + CV_WRAP virtual float getSaturation() const = 0; + CV_WRAP virtual void setSaturation(float saturation) = 0; +}; + +/** @brief Creates TonemapMantiuk object + +@param gamma gamma value for gamma correction. See createTonemap +@param scale contrast scale factor. HVS response is multiplied by this parameter, thus compressing +dynamic range. Values from 0.6 to 0.9 produce best results. +@param saturation saturation enhancement value. See createTonemapDrago + */ +CV_EXPORTS_W Ptr +createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f); + +/** @brief The base class for algorithms that align images of the same scene with different exposures + */ +class CV_EXPORTS_W AlignExposures : public Algorithm +{ +public: + /** @brief Aligns images + + @param src vector of input images + @param dst vector of aligned images + @param times vector of exposure time values for each image + @param response 256x1 matrix with inverse camera response function for each pixel value, it should + have the same number of channels as images. + */ + CV_WRAP virtual void process(InputArrayOfArrays src, std::vector& dst, + InputArray times, InputArray response) = 0; +}; + +/** @brief This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median +luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations. + +It is invariant to exposure, so exposure values and camera response are not necessary. + +In this implementation new image regions are filled with zeros. + +For more information see @cite GW03 . + */ +class CV_EXPORTS_W AlignMTB : public AlignExposures +{ +public: + CV_WRAP virtual void process(InputArrayOfArrays src, std::vector& dst, + InputArray times, InputArray response) CV_OVERRIDE = 0; + + /** @brief Short version of process, that doesn't take extra arguments. + + @param src vector of input images + @param dst vector of aligned images + */ + CV_WRAP virtual void process(InputArrayOfArrays src, std::vector& dst) = 0; + + /** @brief Calculates shift between two images, i. e. how to shift the second image to correspond it with the + first. + + @param img0 first image + @param img1 second image + */ + CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0; + /** @brief Helper function, that shift Mat filling new regions with zeros. + + @param src input image + @param dst result image + @param shift shift value + */ + CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0; + /** @brief Computes median threshold and exclude bitmaps of given image. + + @param img input image + @param tb median threshold bitmap + @param eb exclude bitmap + */ + CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0; + + CV_WRAP virtual int getMaxBits() const = 0; + CV_WRAP virtual void setMaxBits(int max_bits) = 0; + + CV_WRAP virtual int getExcludeRange() const = 0; + CV_WRAP virtual void setExcludeRange(int exclude_range) = 0; + + CV_WRAP virtual bool getCut() const = 0; + CV_WRAP virtual void setCut(bool value) = 0; +}; + +/** @brief Creates AlignMTB object + +@param max_bits logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are +usually good enough (31 and 63 pixels shift respectively). +@param exclude_range range for exclusion bitmap that is constructed to suppress noise around the +median value. +@param cut if true cuts images, otherwise fills the new regions with zeros. + */ +CV_EXPORTS_W Ptr createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true); + +/** @brief The base class for camera response calibration algorithms. + */ +class CV_EXPORTS_W CalibrateCRF : public Algorithm +{ +public: + /** @brief Recovers inverse camera response. + + @param src vector of input images + @param dst 256x1 matrix with inverse camera response function + @param times vector of exposure time values for each image + */ + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; +}; + +/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective +function as linear system. Objective function is constructed using pixel values on the same position +in all images, extra term is added to make the result smoother. + +For more information see @cite DM97 . + */ +class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF +{ +public: + CV_WRAP virtual float getLambda() const = 0; + CV_WRAP virtual void setLambda(float lambda) = 0; + + CV_WRAP virtual int getSamples() const = 0; + CV_WRAP virtual void setSamples(int samples) = 0; + + CV_WRAP virtual bool getRandom() const = 0; + CV_WRAP virtual void setRandom(bool random) = 0; +}; + +/** @brief Creates CalibrateDebevec object + +@param samples number of pixel locations to use +@param lambda smoothness term weight. Greater values produce smoother results, but can alter the +response. +@param random if true sample pixel locations are chosen at random, otherwise they form a +rectangular grid. + */ +CV_EXPORTS_W Ptr createCalibrateDebevec(int samples = 70, float lambda = 10.0f, bool random = false); + +/** @brief Inverse camera response function is extracted for each brightness value by minimizing an objective +function as linear system. This algorithm uses all image pixels. + +For more information see @cite RB99 . + */ +class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF +{ +public: + CV_WRAP virtual int getMaxIter() const = 0; + CV_WRAP virtual void setMaxIter(int max_iter) = 0; + + CV_WRAP virtual float getThreshold() const = 0; + CV_WRAP virtual void setThreshold(float threshold) = 0; + + CV_WRAP virtual Mat getRadiance() const = 0; +}; + +/** @brief Creates CalibrateRobertson object + +@param max_iter maximal number of Gauss-Seidel solver iterations. +@param threshold target difference between results of two successive steps of the minimization. + */ +CV_EXPORTS_W Ptr createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f); + +/** @brief The base class algorithms that can merge exposure sequence to a single image. + */ +class CV_EXPORTS_W MergeExposures : public Algorithm +{ +public: + /** @brief Merges images. + + @param src vector of input images + @param dst result image + @param times vector of exposure time values for each image + @param response 256x1 matrix with inverse camera response function for each pixel value, it should + have the same number of channels as images. + */ + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, + InputArray times, InputArray response) = 0; +}; + +/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure +values and camera response. + +For more information see @cite DM97 . + */ +class CV_EXPORTS_W MergeDebevec : public MergeExposures +{ +public: + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, + InputArray times, InputArray response) CV_OVERRIDE = 0; + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; +}; + +/** @brief Creates MergeDebevec object + */ +CV_EXPORTS_W Ptr createMergeDebevec(); + +/** @brief Pixels are weighted using contrast, saturation and well-exposedness measures, than images are +combined using laplacian pyramids. + +The resulting image weight is constructed as weighted average of contrast, saturation and +well-exposedness measures. + +The resulting image doesn't require tonemapping and can be converted to 8-bit image by multiplying +by 255, but it's recommended to apply gamma correction and/or linear tonemapping. + +For more information see @cite MK07 . + */ +class CV_EXPORTS_W MergeMertens : public MergeExposures +{ +public: + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, + InputArray times, InputArray response) CV_OVERRIDE = 0; + /** @brief Short version of process, that doesn't take extra arguments. + + @param src vector of input images + @param dst result image + */ + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0; + + CV_WRAP virtual float getContrastWeight() const = 0; + CV_WRAP virtual void setContrastWeight(float contrast_weiht) = 0; + + CV_WRAP virtual float getSaturationWeight() const = 0; + CV_WRAP virtual void setSaturationWeight(float saturation_weight) = 0; + + CV_WRAP virtual float getExposureWeight() const = 0; + CV_WRAP virtual void setExposureWeight(float exposure_weight) = 0; +}; + +/** @brief Creates MergeMertens object + +@param contrast_weight contrast measure weight. See MergeMertens. +@param saturation_weight saturation measure weight +@param exposure_weight well-exposedness measure weight + */ +CV_EXPORTS_W Ptr +createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, float exposure_weight = 0.0f); + +/** @brief The resulting HDR image is calculated as weighted average of the exposures considering exposure +values and camera response. + +For more information see @cite RB99 . + */ +class CV_EXPORTS_W MergeRobertson : public MergeExposures +{ +public: + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, + InputArray times, InputArray response) CV_OVERRIDE = 0; + CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0; +}; + +/** @brief Creates MergeRobertson object + */ +CV_EXPORTS_W Ptr createMergeRobertson(); + +//! @} photo_hdr + +//! @addtogroup photo_decolor +//! @{ + +/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized +black-and-white photograph rendering, and in many single channel image processing applications +@cite CL12 . + +@param src Input 8-bit 3-channel image. +@param grayscale Output 8-bit 1-channel image. +@param color_boost Output 8-bit 3-channel image. + +This function is to be applied on color images. + */ +CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost); + +//! @} photo_decolor + +//! @addtogroup photo_clone +//! @{ + + +//! seamlessClone algorithm flags +enum +{ + /** The power of the method is fully expressed when inserting objects with complex outlines into a new background*/ + NORMAL_CLONE = 1, + /** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable + halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.*/ + MIXED_CLONE = 2, + /** Monochrome transfer allows the user to easily replace certain features of one object by alternative features.*/ + MONOCHROME_TRANSFER = 3}; + + +/** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp +An example using seamlessClone function +*/ +/** @brief Image editing tasks concern either global changes (color/intensity corrections, filters, +deformations) or local changes concerned to a selection. Here we are interested in achieving local +changes, ones that are restricted to a region manually selected (ROI), in a seamless and effortless +manner. The extent of the changes ranges from slight distortions to complete replacement by novel +content @cite PM03 . + +@param src Input 8-bit 3-channel image. +@param dst Input 8-bit 3-channel image. +@param mask Input 8-bit 1 or 3-channel image. +@param p Point in dst image where object is placed. +@param blend Output image with the same size and type as dst. +@param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER + */ +CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p, + OutputArray blend, int flags); + +/** @brief Given an original color image, two differently colored versions of this image can be mixed +seamlessly. + +@param src Input 8-bit 3-channel image. +@param mask Input 8-bit 1 or 3-channel image. +@param dst Output image with the same size and type as src . +@param red_mul R-channel multiply factor. +@param green_mul G-channel multiply factor. +@param blue_mul B-channel multiply factor. + +Multiplication factor is between .5 to 2.5. + */ +CV_EXPORTS_W void colorChange(InputArray src, InputArray mask, OutputArray dst, float red_mul = 1.0f, + float green_mul = 1.0f, float blue_mul = 1.0f); + +/** @brief Applying an appropriate non-linear transformation to the gradient field inside the selection and +then integrating back with a Poisson solver, modifies locally the apparent illumination of an image. + +@param src Input 8-bit 3-channel image. +@param mask Input 8-bit 1 or 3-channel image. +@param dst Output image with the same size and type as src. +@param alpha Value ranges between 0-2. +@param beta Value ranges between 0-2. + +This is useful to highlight under-exposed foreground objects or to reduce specular reflections. + */ +CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArray dst, + float alpha = 0.2f, float beta = 0.4f); + +/** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one +washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used. + +@param src Input 8-bit 3-channel image. +@param mask Input 8-bit 1 or 3-channel image. +@param dst Output image with the same size and type as src. +@param low_threshold %Range from 0 to 100. +@param high_threshold Value \> 100. +@param kernel_size The size of the Sobel kernel to be used. + +@note +The algorithm assumes that the color of the source image is close to that of the destination. This +assumption means that when the colors don't match, the source image color gets tinted toward the +color of the destination image. + */ +CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray dst, + float low_threshold = 30, float high_threshold = 45, + int kernel_size = 3); + +//! @} photo_clone + +//! @addtogroup photo_render +//! @{ + +//! Edge preserving filters +enum +{ + RECURS_FILTER = 1, //!< Recursive Filtering + NORMCONV_FILTER = 2 //!< Normalized Convolution Filtering +}; + +/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing +filters are used in many different applications @cite EM11 . + +@param src Input 8-bit 3-channel image. +@param dst Output 8-bit 3-channel image. +@param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER +@param sigma_s %Range between 0 to 200. +@param sigma_r %Range between 0 to 1. + */ +CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1, + float sigma_s = 60, float sigma_r = 0.4f); + +/** @brief This filter enhances the details of a particular image. + +@param src Input 8-bit 3-channel image. +@param dst Output image with the same size and type as src. +@param sigma_s %Range between 0 to 200. +@param sigma_r %Range between 0 to 1. + */ +CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10, + float sigma_r = 0.15f); + +/** @example samples/cpp/tutorial_code/photo/non_photorealistic_rendering/npr_demo.cpp +An example using non-photorealistic line drawing functions +*/ +/** @brief Pencil-like non-photorealistic line drawing + +@param src Input 8-bit 3-channel image. +@param dst1 Output 8-bit 1-channel image. +@param dst2 Output image with the same size and type as src. +@param sigma_s %Range between 0 to 200. +@param sigma_r %Range between 0 to 1. +@param shade_factor %Range between 0 to 0.1. + */ +CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2, + float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f); + +/** @brief Stylization aims to produce digital imagery with a wide variety of effects not focused on +photorealism. Edge-aware filters are ideal for stylization, as they can abstract regions of low +contrast while preserving, or enhancing, high-contrast features. + +@param src Input 8-bit 3-channel image. +@param dst Output image with the same size and type as src. +@param sigma_s %Range between 0 to 200. +@param sigma_r %Range between 0 to 1. + */ +CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60, + float sigma_r = 0.45f); + +//! @} photo_render + +//! @} photo + +} // cv + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/photo/photo_c.h" +#endif + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/photo/cuda.hpp b/third_party/opencv/uos/sw_64/include/opencv2/photo/cuda.hpp new file mode 100644 index 00000000..a2f38167 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/photo/cuda.hpp @@ -0,0 +1,132 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_PHOTO_CUDA_HPP +#define OPENCV_PHOTO_CUDA_HPP + +#include "opencv2/core/cuda.hpp" + +namespace cv { namespace cuda { + +//! @addtogroup photo_denoise +//! @{ + +/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast. + +@param src Source image. Supports only CV_8UC1, CV_8UC2 and CV_8UC3. +@param dst Destination image. +@param h Filter sigma regulating filter strength for color. +@param search_window Size of search window. +@param block_size Size of block used for computing weights. +@param borderMode Border type. See borderInterpolate for details. BORDER_REFLECT101 , +BORDER_REPLICATE , BORDER_CONSTANT , BORDER_REFLECT and BORDER_WRAP are supported for now. +@param stream Stream for the asynchronous version. + +@sa + fastNlMeansDenoising + */ +CV_EXPORTS void nonLocalMeans(InputArray src, OutputArray dst, + float h, + int search_window = 21, + int block_size = 7, + int borderMode = BORDER_DEFAULT, + Stream& stream = Stream::Null()); + +/** @brief Perform image denoising using Non-local Means Denoising algorithm + with several computational +optimizations. Noise expected to be a gaussian white noise + +@param src Input 8-bit 1-channel, 2-channel or 3-channel image. +@param dst Output image with the same size and type as src . +@param h Parameter regulating filter strength. Big h value perfectly removes noise but also +removes image details, smaller h value preserves details but also preserves some noise +@param search_window Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater search_window - greater +denoising time. Recommended value 21 pixels +@param block_size Size in pixels of the template patch that is used to compute weights. Should be +odd. Recommended value 7 pixels +@param stream Stream for the asynchronous invocations. + +This function expected to be applied to grayscale images. For colored images look at +FastNonLocalMeansDenoising::labMethod. + +@sa + fastNlMeansDenoising + */ +CV_EXPORTS void fastNlMeansDenoising(InputArray src, OutputArray dst, + float h, + int search_window = 21, + int block_size = 7, + Stream& stream = Stream::Null()); + +/** @brief Modification of fastNlMeansDenoising function for colored images + +@param src Input 8-bit 3-channel image. +@param dst Output image with the same size and type as src . +@param h_luminance Parameter regulating filter strength. Big h value perfectly removes noise but +also removes image details, smaller h value preserves details but also preserves some noise +@param photo_render float The same as h but for color components. For most images value equals 10 will be +enough to remove colored noise and do not distort colors +@param search_window Size in pixels of the window that is used to compute weighted average for +given pixel. Should be odd. Affect performance linearly: greater search_window - greater +denoising time. Recommended value 21 pixels +@param block_size Size in pixels of the template patch that is used to compute weights. Should be +odd. Recommended value 7 pixels +@param stream Stream for the asynchronous invocations. + +The function converts image to CIELAB colorspace and then separately denoise L and AB components +with given h parameters using FastNonLocalMeansDenoising::simpleMethod function. + +@sa + fastNlMeansDenoisingColored + */ +CV_EXPORTS void fastNlMeansDenoisingColored(InputArray src, OutputArray dst, + float h_luminance, float photo_render, + int search_window = 21, + int block_size = 7, + Stream& stream = Stream::Null()); + +//! @} photo + +}} // namespace cv { namespace cuda { + +#endif /* OPENCV_PHOTO_CUDA_HPP */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/photo/photo.hpp b/third_party/opencv/uos/sw_64/include/opencv2/photo/photo.hpp new file mode 100644 index 00000000..8af5e9f0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/photo/photo.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/photo.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/photo/photo_c.h b/third_party/opencv/uos/sw_64/include/opencv2/photo/photo_c.h new file mode 100644 index 00000000..cd623c19 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/photo/photo_c.h @@ -0,0 +1,74 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_PHOTO_C_H +#define OPENCV_PHOTO_C_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup photo_c + @{ + */ + +/* Inpainting algorithms */ +enum InpaintingModes +{ + CV_INPAINT_NS =0, + CV_INPAINT_TELEA =1 +}; + + +/* Inpaints the selected region in the image */ +CVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask, + CvArr* dst, double inpaintRange, int flags ); + +/** @} */ + +#ifdef __cplusplus +} //extern "C" +#endif + +#endif //OPENCV_PHOTO_C_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape.hpp new file mode 100644 index 00000000..f302b6bb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape.hpp @@ -0,0 +1,57 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_SHAPE_HPP +#define OPENCV_SHAPE_HPP + +#include "opencv2/shape/emdL1.hpp" +#include "opencv2/shape/shape_transformer.hpp" +#include "opencv2/shape/hist_cost.hpp" +#include "opencv2/shape/shape_distance.hpp" + +/** + @defgroup shape Shape Distance and Matching + */ + +#endif + +/* End of file. */ diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape/emdL1.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape/emdL1.hpp new file mode 100644 index 00000000..a15d68c2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape/emdL1.hpp @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_EMD_L1_HPP +#define OPENCV_EMD_L1_HPP + +#include "opencv2/core.hpp" + +namespace cv +{ +/****************************************************************************************\ +* EMDL1 Function * +\****************************************************************************************/ + +//! @addtogroup shape +//! @{ + +/** @brief Computes the "minimal work" distance between two weighted point configurations base on the papers +"EMD-L1: An efficient and Robust Algorithm for comparing histogram-based descriptors", by Haibin +Ling and Kazunori Okuda; and "The Earth Mover's Distance is the Mallows Distance: Some Insights from +Statistics", by Elizaveta Levina and Peter Bickel. + +@param signature1 First signature, a single column floating-point matrix. Each row is the value of +the histogram in each bin. +@param signature2 Second signature of the same format and size as signature1. + */ +CV_EXPORTS float EMDL1(InputArray signature1, InputArray signature2); + +//! @} + +}//namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape/hist_cost.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape/hist_cost.hpp new file mode 100644 index 00000000..21d0d680 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape/hist_cost.hpp @@ -0,0 +1,111 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_HIST_COST_HPP +#define OPENCV_HIST_COST_HPP + +#include "opencv2/imgproc.hpp" + +namespace cv +{ + +//! @addtogroup shape +//! @{ + +/** @brief Abstract base class for histogram cost algorithms. + */ +class CV_EXPORTS_W HistogramCostExtractor : public Algorithm +{ +public: + CV_WRAP virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix) = 0; + + CV_WRAP virtual void setNDummies(int nDummies) = 0; + CV_WRAP virtual int getNDummies() const = 0; + + CV_WRAP virtual void setDefaultCost(float defaultCost) = 0; + CV_WRAP virtual float getDefaultCost() const = 0; +}; + +/** @brief A norm based cost extraction. : + */ +class CV_EXPORTS_W NormHistogramCostExtractor : public HistogramCostExtractor +{ +public: + CV_WRAP virtual void setNormFlag(int flag) = 0; + CV_WRAP virtual int getNormFlag() const = 0; +}; + +CV_EXPORTS_W Ptr + createNormHistogramCostExtractor(int flag=DIST_L2, int nDummies=25, float defaultCost=0.2f); + +/** @brief An EMD based cost extraction. : + */ +class CV_EXPORTS_W EMDHistogramCostExtractor : public HistogramCostExtractor +{ +public: + CV_WRAP virtual void setNormFlag(int flag) = 0; + CV_WRAP virtual int getNormFlag() const = 0; +}; + +CV_EXPORTS_W Ptr + createEMDHistogramCostExtractor(int flag=DIST_L2, int nDummies=25, float defaultCost=0.2f); + +/** @brief An Chi based cost extraction. : + */ +class CV_EXPORTS_W ChiHistogramCostExtractor : public HistogramCostExtractor +{}; + +CV_EXPORTS_W Ptr createChiHistogramCostExtractor(int nDummies=25, float defaultCost=0.2f); + +/** @brief An EMD-L1 based cost extraction. : + */ +class CV_EXPORTS_W EMDL1HistogramCostExtractor : public HistogramCostExtractor +{}; + +CV_EXPORTS_W Ptr + createEMDL1HistogramCostExtractor(int nDummies=25, float defaultCost=0.2f); + +//! @} + +} // cv +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape/shape.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape.hpp new file mode 100644 index 00000000..5c4da3ce --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/shape.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_distance.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_distance.hpp new file mode 100644 index 00000000..725b56ac --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_distance.hpp @@ -0,0 +1,227 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_SHAPE_SHAPE_DISTANCE_HPP +#define OPENCV_SHAPE_SHAPE_DISTANCE_HPP +#include "opencv2/core.hpp" +#include "opencv2/shape/hist_cost.hpp" +#include "opencv2/shape/shape_transformer.hpp" + +namespace cv +{ + +//! @addtogroup shape +//! @{ + +/** @example samples/cpp/shape_example.cpp +An example using shape distance algorithm +*/ +/** @brief Abstract base class for shape distance algorithms. + */ +class CV_EXPORTS_W ShapeDistanceExtractor : public Algorithm +{ +public: + /** @brief Compute the shape distance between two shapes defined by its contours. + + @param contour1 Contour defining first shape. + @param contour2 Contour defining second shape. + */ + CV_WRAP virtual float computeDistance(InputArray contour1, InputArray contour2) = 0; +}; + +/***********************************************************************************/ +/***********************************************************************************/ +/***********************************************************************************/ +/** @brief Implementation of the Shape Context descriptor and matching algorithm + +proposed by Belongie et al. in "Shape Matching and Object Recognition Using Shape Contexts" (PAMI +2002). This implementation is packaged in a generic scheme, in order to allow you the +implementation of the common variations of the original pipeline. +*/ +class CV_EXPORTS_W ShapeContextDistanceExtractor : public ShapeDistanceExtractor +{ +public: + /** @brief Establish the number of angular bins for the Shape Context Descriptor used in the shape matching + pipeline. + + @param nAngularBins The number of angular bins in the shape context descriptor. + */ + CV_WRAP virtual void setAngularBins(int nAngularBins) = 0; + CV_WRAP virtual int getAngularBins() const = 0; + + /** @brief Establish the number of radial bins for the Shape Context Descriptor used in the shape matching + pipeline. + + @param nRadialBins The number of radial bins in the shape context descriptor. + */ + CV_WRAP virtual void setRadialBins(int nRadialBins) = 0; + CV_WRAP virtual int getRadialBins() const = 0; + + /** @brief Set the inner radius of the shape context descriptor. + + @param innerRadius The value of the inner radius. + */ + CV_WRAP virtual void setInnerRadius(float innerRadius) = 0; + CV_WRAP virtual float getInnerRadius() const = 0; + + /** @brief Set the outer radius of the shape context descriptor. + + @param outerRadius The value of the outer radius. + */ + CV_WRAP virtual void setOuterRadius(float outerRadius) = 0; + CV_WRAP virtual float getOuterRadius() const = 0; + + CV_WRAP virtual void setRotationInvariant(bool rotationInvariant) = 0; + CV_WRAP virtual bool getRotationInvariant() const = 0; + + /** @brief Set the weight of the shape context distance in the final value of the shape distance. The shape + context distance between two shapes is defined as the symmetric sum of shape context matching costs + over best matching points. The final value of the shape distance is a user-defined linear + combination of the shape context distance, an image appearance distance, and a bending energy. + + @param shapeContextWeight The weight of the shape context distance in the final distance value. + */ + CV_WRAP virtual void setShapeContextWeight(float shapeContextWeight) = 0; + CV_WRAP virtual float getShapeContextWeight() const = 0; + + /** @brief Set the weight of the Image Appearance cost in the final value of the shape distance. The image + appearance cost is defined as the sum of squared brightness differences in Gaussian windows around + corresponding image points. The final value of the shape distance is a user-defined linear + combination of the shape context distance, an image appearance distance, and a bending energy. If + this value is set to a number different from 0, is mandatory to set the images that correspond to + each shape. + + @param imageAppearanceWeight The weight of the appearance cost in the final distance value. + */ + CV_WRAP virtual void setImageAppearanceWeight(float imageAppearanceWeight) = 0; + CV_WRAP virtual float getImageAppearanceWeight() const = 0; + + /** @brief Set the weight of the Bending Energy in the final value of the shape distance. The bending energy + definition depends on what transformation is being used to align the shapes. The final value of the + shape distance is a user-defined linear combination of the shape context distance, an image + appearance distance, and a bending energy. + + @param bendingEnergyWeight The weight of the Bending Energy in the final distance value. + */ + CV_WRAP virtual void setBendingEnergyWeight(float bendingEnergyWeight) = 0; + CV_WRAP virtual float getBendingEnergyWeight() const = 0; + + /** @brief Set the images that correspond to each shape. This images are used in the calculation of the Image + Appearance cost. + + @param image1 Image corresponding to the shape defined by contours1. + @param image2 Image corresponding to the shape defined by contours2. + */ + CV_WRAP virtual void setImages(InputArray image1, InputArray image2) = 0; + CV_WRAP virtual void getImages(OutputArray image1, OutputArray image2) const = 0; + + CV_WRAP virtual void setIterations(int iterations) = 0; + CV_WRAP virtual int getIterations() const = 0; + + /** @brief Set the algorithm used for building the shape context descriptor cost matrix. + + @param comparer Smart pointer to a HistogramCostExtractor, an algorithm that defines the cost + matrix between descriptors. + */ + CV_WRAP virtual void setCostExtractor(Ptr comparer) = 0; + CV_WRAP virtual Ptr getCostExtractor() const = 0; + + /** @brief Set the value of the standard deviation for the Gaussian window for the image appearance cost. + + @param sigma Standard Deviation. + */ + CV_WRAP virtual void setStdDev(float sigma) = 0; + CV_WRAP virtual float getStdDev() const = 0; + + /** @brief Set the algorithm used for aligning the shapes. + + @param transformer Smart pointer to a ShapeTransformer, an algorithm that defines the aligning + transformation. + */ + CV_WRAP virtual void setTransformAlgorithm(Ptr transformer) = 0; + CV_WRAP virtual Ptr getTransformAlgorithm() const = 0; +}; + +/* Complete constructor */ +CV_EXPORTS_W Ptr + createShapeContextDistanceExtractor(int nAngularBins=12, int nRadialBins=4, + float innerRadius=0.2f, float outerRadius=2, int iterations=3, + const Ptr &comparer = createChiHistogramCostExtractor(), + const Ptr &transformer = createThinPlateSplineShapeTransformer()); + +/***********************************************************************************/ +/***********************************************************************************/ +/***********************************************************************************/ +/** @brief A simple Hausdorff distance measure between shapes defined by contours + +according to the paper "Comparing Images using the Hausdorff distance." by D.P. Huttenlocher, G.A. +Klanderman, and W.J. Rucklidge. (PAMI 1993). : + */ +class CV_EXPORTS_W HausdorffDistanceExtractor : public ShapeDistanceExtractor +{ +public: + /** @brief Set the norm used to compute the Hausdorff value between two shapes. It can be L1 or L2 norm. + + @param distanceFlag Flag indicating which norm is used to compute the Hausdorff distance + (NORM_L1, NORM_L2). + */ + CV_WRAP virtual void setDistanceFlag(int distanceFlag) = 0; + CV_WRAP virtual int getDistanceFlag() const = 0; + + /** @brief This method sets the rank proportion (or fractional value) that establish the Kth ranked value of + the partial Hausdorff distance. Experimentally had been shown that 0.6 is a good value to compare + shapes. + + @param rankProportion fractional value (between 0 and 1). + */ + CV_WRAP virtual void setRankProportion(float rankProportion) = 0; + CV_WRAP virtual float getRankProportion() const = 0; +}; + +/* Constructor */ +CV_EXPORTS_W Ptr createHausdorffDistanceExtractor(int distanceFlag=cv::NORM_L2, float rankProp=0.6f); + +//! @} + +} // cv +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_transformer.hpp b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_transformer.hpp new file mode 100644 index 00000000..3c3ce20c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/shape/shape_transformer.hpp @@ -0,0 +1,132 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_SHAPE_SHAPE_TRANSFORM_HPP +#define OPENCV_SHAPE_SHAPE_TRANSFORM_HPP +#include +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" + +namespace cv +{ + +//! @addtogroup shape +//! @{ + +/** @brief Abstract base class for shape transformation algorithms. + */ +class CV_EXPORTS_W ShapeTransformer : public Algorithm +{ +public: + /** @brief Estimate the transformation parameters of the current transformer algorithm, based on point matches. + + @param transformingShape Contour defining first shape. + @param targetShape Contour defining second shape (Target). + @param matches Standard vector of Matches between points. + */ + CV_WRAP virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, + std::vector& matches) = 0; + + /** @brief Apply a transformation, given a pre-estimated transformation parameters. + + @param input Contour (set of points) to apply the transformation. + @param output Output contour. + */ + CV_WRAP virtual float applyTransformation(InputArray input, OutputArray output=noArray()) = 0; + + /** @brief Apply a transformation, given a pre-estimated transformation parameters, to an Image. + + @param transformingImage Input image. + @param output Output image. + @param flags Image interpolation method. + @param borderMode border style. + @param borderValue border value. + */ + CV_WRAP virtual void warpImage(InputArray transformingImage, OutputArray output, + int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()) const = 0; +}; + +/***********************************************************************************/ +/***********************************************************************************/ + +/** @brief Definition of the transformation + +occupied in the paper "Principal Warps: Thin-Plate Splines and Decomposition of Deformations", by +F.L. Bookstein (PAMI 1989). : + */ +class CV_EXPORTS_W ThinPlateSplineShapeTransformer : public ShapeTransformer +{ +public: + /** @brief Set the regularization parameter for relaxing the exact interpolation requirements of the TPS + algorithm. + + @param beta value of the regularization parameter. + */ + CV_WRAP virtual void setRegularizationParameter(double beta) = 0; + CV_WRAP virtual double getRegularizationParameter() const = 0; +}; + +/** Complete constructor */ +CV_EXPORTS_W Ptr + createThinPlateSplineShapeTransformer(double regularizationParameter=0); + +/***********************************************************************************/ +/***********************************************************************************/ + +/** @brief Wrapper class for the OpenCV Affine Transformation algorithm. : + */ +class CV_EXPORTS_W AffineTransformer : public ShapeTransformer +{ +public: + CV_WRAP virtual void setFullAffine(bool fullAffine) = 0; + CV_WRAP virtual bool getFullAffine() const = 0; +}; + +/** Complete constructor */ +CV_EXPORTS_W Ptr createAffineTransformer(bool fullAffine); + +//! @} + +} // cv +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching.hpp new file mode 100644 index 00000000..52789156 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching.hpp @@ -0,0 +1,329 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_STITCHER_HPP +#define OPENCV_STITCHING_STITCHER_HPP + +#include "opencv2/core.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/stitching/warpers.hpp" +#include "opencv2/stitching/detail/matchers.hpp" +#include "opencv2/stitching/detail/motion_estimators.hpp" +#include "opencv2/stitching/detail/exposure_compensate.hpp" +#include "opencv2/stitching/detail/seam_finders.hpp" +#include "opencv2/stitching/detail/blenders.hpp" +#include "opencv2/stitching/detail/camera.hpp" + + +#if defined(Status) +# warning Detected X11 'Status' macro definition, it can cause build conflicts. Please, include this header before any X11 headers. +#endif + + +/** +@defgroup stitching Images stitching + +This figure illustrates the stitching module pipeline implemented in the Stitcher class. Using that +class it's possible to configure/remove some steps, i.e. adjust the stitching pipeline according to +the particular needs. All building blocks from the pipeline are available in the detail namespace, +one can combine and use them separately. + +The implemented stitching pipeline is very similar to the one proposed in @cite BL07 . + +![stitching pipeline](StitchingPipeline.jpg) + +Camera models +------------- + +There are currently 2 camera models implemented in stitching pipeline. + +- _Homography model_ expecting perspective transformations between images + implemented in @ref cv::detail::BestOf2NearestMatcher cv::detail::HomographyBasedEstimator + cv::detail::BundleAdjusterReproj cv::detail::BundleAdjusterRay +- _Affine model_ expecting affine transformation with 6 DOF or 4 DOF implemented in + @ref cv::detail::AffineBestOf2NearestMatcher cv::detail::AffineBasedEstimator + cv::detail::BundleAdjusterAffine cv::detail::BundleAdjusterAffinePartial cv::AffineWarper + +Homography model is useful for creating photo panoramas captured by camera, +while affine-based model can be used to stitch scans and object captured by +specialized devices. Use @ref cv::Stitcher::create to get preconfigured pipeline for one +of those models. + +@note +Certain detailed settings of @ref cv::Stitcher might not make sense. Especially +you should not mix classes implementing affine model and classes implementing +Homography model, as they work with different transformations. + +@{ + @defgroup stitching_match Features Finding and Images Matching + @defgroup stitching_rotation Rotation Estimation + @defgroup stitching_autocalib Autocalibration + @defgroup stitching_warp Images Warping + @defgroup stitching_seam Seam Estimation + @defgroup stitching_exposure Exposure Compensation + @defgroup stitching_blend Image Blenders +@} + */ + +namespace cv { + +//! @addtogroup stitching +//! @{ + +/** @example samples/cpp/stitching.cpp +A basic example on image stitching +*/ + +/** @example samples/cpp/stitching_detailed.cpp +A detailed example on image stitching +*/ + +/** @brief High level image stitcher. + +It's possible to use this class without being aware of the entire stitching pipeline. However, to +be able to achieve higher stitching stability and quality of the final images at least being +familiar with the theory is recommended. + +@note + - A basic example on image stitching can be found at + opencv_source_code/samples/cpp/stitching.cpp + - A detailed example on image stitching can be found at + opencv_source_code/samples/cpp/stitching_detailed.cpp + */ +class CV_EXPORTS_W Stitcher +{ +public: + enum { ORIG_RESOL = -1 }; + enum Status + { + OK = 0, + ERR_NEED_MORE_IMGS = 1, + ERR_HOMOGRAPHY_EST_FAIL = 2, + ERR_CAMERA_PARAMS_ADJUST_FAIL = 3 + }; + enum Mode + { + /** Mode for creating photo panoramas. Expects images under perspective + transformation and projects resulting pano to sphere. + + @sa detail::BestOf2NearestMatcher SphericalWarper + */ + PANORAMA = 0, + /** Mode for composing scans. Expects images under affine transformation does + not compensate exposure by default. + + @sa detail::AffineBestOf2NearestMatcher AffineWarper + */ + SCANS = 1, + + }; + + // Stitcher() {} + /** @brief Creates a stitcher with the default parameters. + + @param try_use_gpu Flag indicating whether GPU should be used whenever it's possible. + @return Stitcher class instance. + */ + static Stitcher createDefault(bool try_use_gpu = false); + /** @brief Creates a Stitcher configured in one of the stitching modes. + + @param mode Scenario for stitcher operation. This is usually determined by source of images + to stitch and their transformation. Default parameters will be chosen for operation in given + scenario. + @param try_use_gpu Flag indicating whether GPU should be used whenever it's possible. + @return Stitcher class instance. + */ + static Ptr create(Mode mode = PANORAMA, bool try_use_gpu = false); + + CV_WRAP double registrationResol() const { return registr_resol_; } + CV_WRAP void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; } + + CV_WRAP double seamEstimationResol() const { return seam_est_resol_; } + CV_WRAP void setSeamEstimationResol(double resol_mpx) { seam_est_resol_ = resol_mpx; } + + CV_WRAP double compositingResol() const { return compose_resol_; } + CV_WRAP void setCompositingResol(double resol_mpx) { compose_resol_ = resol_mpx; } + + CV_WRAP double panoConfidenceThresh() const { return conf_thresh_; } + CV_WRAP void setPanoConfidenceThresh(double conf_thresh) { conf_thresh_ = conf_thresh; } + + CV_WRAP bool waveCorrection() const { return do_wave_correct_; } + CV_WRAP void setWaveCorrection(bool flag) { do_wave_correct_ = flag; } + + detail::WaveCorrectKind waveCorrectKind() const { return wave_correct_kind_; } + void setWaveCorrectKind(detail::WaveCorrectKind kind) { wave_correct_kind_ = kind; } + + Ptr featuresFinder() { return features_finder_; } + const Ptr featuresFinder() const { return features_finder_; } + void setFeaturesFinder(Ptr features_finder) + { features_finder_ = features_finder; } + + Ptr featuresMatcher() { return features_matcher_; } + const Ptr featuresMatcher() const { return features_matcher_; } + void setFeaturesMatcher(Ptr features_matcher) + { features_matcher_ = features_matcher; } + + const cv::UMat& matchingMask() const { return matching_mask_; } + void setMatchingMask(const cv::UMat &mask) + { + CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows); + matching_mask_ = mask.clone(); + } + + Ptr bundleAdjuster() { return bundle_adjuster_; } + const Ptr bundleAdjuster() const { return bundle_adjuster_; } + void setBundleAdjuster(Ptr bundle_adjuster) + { bundle_adjuster_ = bundle_adjuster; } + + /* TODO OpenCV ABI 4.x + Ptr estimator() { return estimator_; } + const Ptr estimator() const { return estimator_; } + void setEstimator(Ptr estimator) + { estimator_ = estimator; } + */ + + Ptr warper() { return warper_; } + const Ptr warper() const { return warper_; } + void setWarper(Ptr creator) { warper_ = creator; } + + Ptr exposureCompensator() { return exposure_comp_; } + const Ptr exposureCompensator() const { return exposure_comp_; } + void setExposureCompensator(Ptr exposure_comp) + { exposure_comp_ = exposure_comp; } + + Ptr seamFinder() { return seam_finder_; } + const Ptr seamFinder() const { return seam_finder_; } + void setSeamFinder(Ptr seam_finder) { seam_finder_ = seam_finder; } + + Ptr blender() { return blender_; } + const Ptr blender() const { return blender_; } + void setBlender(Ptr b) { blender_ = b; } + + /** @overload */ + CV_WRAP Status estimateTransform(InputArrayOfArrays images); + /** @brief These functions try to match the given images and to estimate rotations of each camera. + + @note Use the functions only if you're aware of the stitching pipeline, otherwise use + Stitcher::stitch. + + @param images Input images. + @param rois Region of interest rectangles. + @return Status code. + */ + Status estimateTransform(InputArrayOfArrays images, const std::vector > &rois); + + /** @overload */ + CV_WRAP Status composePanorama(OutputArray pano); + /** @brief These functions try to compose the given images (or images stored internally from the other function + calls) into the final pano under the assumption that the image transformations were estimated + before. + + @note Use the functions only if you're aware of the stitching pipeline, otherwise use + Stitcher::stitch. + + @param images Input images. + @param pano Final pano. + @return Status code. + */ + CV_WRAP Status composePanorama(InputArrayOfArrays images, OutputArray pano); + + /** @overload */ + CV_WRAP Status stitch(InputArrayOfArrays images, OutputArray pano); + /** @brief These functions try to stitch the given images. + + @param images Input images. + @param rois Region of interest rectangles. + @param pano Final pano. + @return Status code. + */ + Status stitch(InputArrayOfArrays images, const std::vector > &rois, OutputArray pano); + + std::vector component() const { return indices_; } + std::vector cameras() const { return cameras_; } + CV_WRAP double workScale() const { return work_scale_; } + +private: + //Stitcher() {} + + Status matchImages(); + Status estimateCameraParams(); + + double registr_resol_; + double seam_est_resol_; + double compose_resol_; + double conf_thresh_; + Ptr features_finder_; + Ptr features_matcher_; + cv::UMat matching_mask_; + Ptr bundle_adjuster_; + /* TODO OpenCV ABI 4.x + Ptr estimator_; + */ + bool do_wave_correct_; + detail::WaveCorrectKind wave_correct_kind_; + Ptr warper_; + Ptr exposure_comp_; + Ptr seam_finder_; + Ptr blender_; + + std::vector imgs_; + std::vector > rois_; + std::vector full_img_sizes_; + std::vector features_; + std::vector pairwise_matches_; + std::vector seam_est_imgs_; + std::vector indices_; + std::vector cameras_; + double work_scale_; + double seam_scale_; + double seam_work_aspect_; + double warped_image_scale_; +}; + +CV_EXPORTS_W Ptr createStitcher(bool try_use_gpu = false); +CV_EXPORTS_W Ptr createStitcherScans(bool try_use_gpu = false); + +//! @} stitching + +} // namespace cv + +#endif // OPENCV_STITCHING_STITCHER_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/autocalib.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/autocalib.hpp new file mode 100644 index 00000000..19705e27 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/autocalib.hpp @@ -0,0 +1,86 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_AUTOCALIB_HPP +#define OPENCV_STITCHING_AUTOCALIB_HPP + +#include "opencv2/core.hpp" +#include "matchers.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_autocalib +//! @{ + +/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera +undergoes rotations around its centre only. + +@param H Homography. +@param f0 Estimated focal length along X axis. +@param f1 Estimated focal length along Y axis. +@param f0_ok True, if f0 was estimated successfully, false otherwise. +@param f1_ok True, if f1 was estimated successfully, false otherwise. + +See "Construction of Panoramic Image Mosaics with Global and Local Alignment" +by Heung-Yeung Shum and Richard Szeliski. + */ +void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok); + +/** @brief Estimates focal lengths for each given camera. + +@param features Features of images. +@param pairwise_matches Matches between all image pairs. +@param focals Estimated focal lengths for each camera. + */ +void CV_EXPORTS estimateFocal(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &focals); + +bool CV_EXPORTS calibrateRotatingCamera(const std::vector &Hs, Mat &K); + +//! @} stitching_autocalib + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_AUTOCALIB_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/blenders.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/blenders.hpp new file mode 100644 index 00000000..542f1e42 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/blenders.hpp @@ -0,0 +1,184 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_BLENDERS_HPP +#define OPENCV_STITCHING_BLENDERS_HPP + +#if defined(NO) +# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers. +#endif + +#include "opencv2/core.hpp" +#include "opencv2/core/cuda.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_blend +//! @{ + +/** @brief Base class for all blenders. + +Simple blender which puts one image over another +*/ +class CV_EXPORTS Blender +{ +public: + virtual ~Blender() {} + + enum { NO, FEATHER, MULTI_BAND }; + static Ptr createDefault(int type, bool try_gpu = false); + + /** @brief Prepares the blender for blending. + + @param corners Source images top-left corners + @param sizes Source image sizes + */ + void prepare(const std::vector &corners, const std::vector &sizes); + /** @overload */ + virtual void prepare(Rect dst_roi); + /** @brief Processes the image. + + @param img Source image + @param mask Source image mask + @param tl Source image top-left corners + */ + virtual void feed(InputArray img, InputArray mask, Point tl); + /** @brief Blends and returns the final pano. + + @param dst Final pano + @param dst_mask Final pano mask + */ + virtual void blend(InputOutputArray dst, InputOutputArray dst_mask); + +protected: + UMat dst_, dst_mask_; + Rect dst_roi_; +}; + +/** @brief Simple blender which mixes images at its borders. + */ +class CV_EXPORTS FeatherBlender : public Blender +{ +public: + FeatherBlender(float sharpness = 0.02f); + + float sharpness() const { return sharpness_; } + void setSharpness(float val) { sharpness_ = val; } + + void prepare(Rect dst_roi) CV_OVERRIDE; + void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE; + void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE; + + //! Creates weight maps for fixed set of source images by their masks and top-left corners. + //! Final image can be obtained by simple weighting of the source images. + Rect createWeightMaps(const std::vector &masks, const std::vector &corners, + std::vector &weight_maps); + +private: + float sharpness_; + UMat weight_map_; + UMat dst_weight_map_; +}; + +inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); } + +/** @brief Blender which uses multi-band blending algorithm (see @cite BA83). + */ +class CV_EXPORTS MultiBandBlender : public Blender +{ +public: + MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F); + + int numBands() const { return actual_num_bands_; } + void setNumBands(int val) { actual_num_bands_ = val; } + + void prepare(Rect dst_roi) CV_OVERRIDE; + void feed(InputArray img, InputArray mask, Point tl) CV_OVERRIDE; + void blend(InputOutputArray dst, InputOutputArray dst_mask) CV_OVERRIDE; + +private: + int actual_num_bands_, num_bands_; + std::vector dst_pyr_laplace_; + std::vector dst_band_weights_; + Rect dst_roi_final_; + bool can_use_gpu_; + int weight_type_; //CV_32F or CV_16S +#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) + std::vector gpu_dst_pyr_laplace_; + std::vector gpu_dst_band_weights_; + std::vector gpu_tl_points_; + std::vector gpu_imgs_with_border_; + std::vector > gpu_weight_pyr_gauss_vec_; + std::vector > gpu_src_pyr_laplace_vec_; + std::vector > gpu_ups_; + cuda::GpuMat gpu_dst_mask_; + cuda::GpuMat gpu_mask_; + cuda::GpuMat gpu_img_; + cuda::GpuMat gpu_weight_map_; + cuda::GpuMat gpu_add_mask_; + int gpu_feed_idx_; + bool gpu_initialized_; +#endif +}; + + +////////////////////////////////////////////////////////////////////////////// +// Auxiliary functions + +void CV_EXPORTS normalizeUsingWeightMap(InputArray weight, InputOutputArray src); + +void CV_EXPORTS createWeightMap(InputArray mask, float sharpness, InputOutputArray weight); + +void CV_EXPORTS createLaplacePyr(InputArray img, int num_levels, std::vector& pyr); +void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector& pyr); + +// Restores source image +void CV_EXPORTS restoreImageFromLaplacePyr(std::vector& pyr); +void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector& pyr); + +//! @} + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_BLENDERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/camera.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/camera.hpp new file mode 100644 index 00000000..07c6b5b0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/camera.hpp @@ -0,0 +1,78 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_CAMERA_HPP +#define OPENCV_STITCHING_CAMERA_HPP + +#include "opencv2/core.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching +//! @{ + +/** @brief Describes camera parameters. + +@note Translation is assumed to be zero during the whole stitching pipeline. : + */ +struct CV_EXPORTS CameraParams +{ + CameraParams(); + CameraParams(const CameraParams& other); + CameraParams& operator =(const CameraParams& other); + Mat K() const; + + double focal; // Focal length + double aspect; // Aspect ratio + double ppx; // Principal point X + double ppy; // Principal point Y + Mat R; // Rotation + Mat t; // Translation +}; + +//! @} + +} // namespace detail +} // namespace cv + +#endif // #ifndef OPENCV_STITCHING_CAMERA_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/exposure_compensate.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/exposure_compensate.hpp new file mode 100644 index 00000000..6c99407c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/exposure_compensate.hpp @@ -0,0 +1,136 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP +#define OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP + +#if defined(NO) +# warning Detected Apple 'NO' macro definition, it can cause build conflicts. Please, include this header before any Apple headers. +#endif + +#include "opencv2/core.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_exposure +//! @{ + +/** @brief Base class for all exposure compensators. + */ +class CV_EXPORTS ExposureCompensator +{ +public: + virtual ~ExposureCompensator() {} + + enum { NO, GAIN, GAIN_BLOCKS }; + static Ptr createDefault(int type); + + /** + @param corners Source image top-left corners + @param images Source images + @param masks Image masks to update (second value in pair specifies the value which should be used + to detect where image is) + */ + void feed(const std::vector &corners, const std::vector &images, + const std::vector &masks); + /** @overload */ + virtual void feed(const std::vector &corners, const std::vector &images, + const std::vector > &masks) = 0; + /** @brief Compensate exposure in the specified image. + + @param index Image index + @param corner Image top-left corner + @param image Image to process + @param mask Image mask + */ + virtual void apply(int index, Point corner, InputOutputArray image, InputArray mask) = 0; +}; + +/** @brief Stub exposure compensator which does nothing. + */ +class CV_EXPORTS NoExposureCompensator : public ExposureCompensator +{ +public: + void feed(const std::vector &/*corners*/, const std::vector &/*images*/, + const std::vector > &/*masks*/) CV_OVERRIDE { } + void apply(int /*index*/, Point /*corner*/, InputOutputArray /*image*/, InputArray /*mask*/) CV_OVERRIDE { } +}; + +/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image +intensities, see @cite BL07 and @cite WJ10 for details. + */ +class CV_EXPORTS GainCompensator : public ExposureCompensator +{ +public: + void feed(const std::vector &corners, const std::vector &images, + const std::vector > &masks) CV_OVERRIDE; + void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE; + std::vector gains() const; + +private: + Mat_ gains_; +}; + +/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block +intensities, see @cite UES01 for details. + */ +class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator +{ +public: + BlocksGainCompensator(int bl_width = 32, int bl_height = 32) + : bl_width_(bl_width), bl_height_(bl_height) {} + void feed(const std::vector &corners, const std::vector &images, + const std::vector > &masks) CV_OVERRIDE; + void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE; + +private: + int bl_width_, bl_height_; + std::vector gain_maps_; +}; + +//! @} + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/matchers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/matchers.hpp new file mode 100644 index 00000000..560af28d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/matchers.hpp @@ -0,0 +1,370 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_MATCHERS_HPP +#define OPENCV_STITCHING_MATCHERS_HPP + +#include "opencv2/core.hpp" +#include "opencv2/features2d.hpp" + +#include "opencv2/opencv_modules.hpp" + +#ifdef HAVE_OPENCV_XFEATURES2D +# include "opencv2/xfeatures2d/cuda.hpp" +#endif + +namespace cv { +namespace detail { + +//! @addtogroup stitching_match +//! @{ + +/** @brief Structure containing image keypoints and descriptors. */ +struct CV_EXPORTS ImageFeatures +{ + int img_idx; + Size img_size; + std::vector keypoints; + UMat descriptors; +}; + +/** @brief Feature finders base class */ +class CV_EXPORTS FeaturesFinder +{ +public: + virtual ~FeaturesFinder() {} + /** @overload */ + void operator ()(InputArray image, ImageFeatures &features); + /** @brief Finds features in the given image. + + @param image Source image + @param features Found features + @param rois Regions of interest + + @sa detail::ImageFeatures, Rect_ + */ + void operator ()(InputArray image, ImageFeatures &features, const std::vector &rois); + /** @brief Finds features in the given images in parallel. + + @param images Source images + @param features Found features for each image + @param rois Regions of interest for each image + + @sa detail::ImageFeatures, Rect_ + */ + void operator ()(InputArrayOfArrays images, std::vector &features, + const std::vector > &rois); + /** @overload */ + void operator ()(InputArrayOfArrays images, std::vector &features); + /** @brief Frees unused memory allocated before if there is any. */ + virtual void collectGarbage() {} + + /* TODO OpenCV ABI 4.x + reimplement this as public method similar to FeaturesMatcher and remove private function hack + @return True, if it's possible to use the same finder instance in parallel, false otherwise + bool isThreadSafe() const { return is_thread_safe_; } + */ + +protected: + /** @brief This method must implement features finding logic in order to make the wrappers + detail::FeaturesFinder::operator()_ work. + + @param image Source image + @param features Found features + + @sa detail::ImageFeatures */ + virtual void find(InputArray image, ImageFeatures &features) = 0; + /** @brief uses dynamic_cast to determine thread-safety + @return True, if it's possible to use the same finder instance in parallel, false otherwise + */ + bool isThreadSafe() const; +}; + +/** @brief SURF features finder. + +@sa detail::FeaturesFinder, SURF +*/ +class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder +{ +public: + SurfFeaturesFinder(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4, + int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4); + +private: + void find(InputArray image, ImageFeatures &features) CV_OVERRIDE; + + Ptr detector_; + Ptr extractor_; + Ptr surf; +}; + + +/** @brief SIFT features finder. + +@sa detail::FeaturesFinder, SIFT +*/ +class CV_EXPORTS SiftFeaturesFinder : public FeaturesFinder +{ +public: + SiftFeaturesFinder(); + +private: + void find(InputArray image, ImageFeatures &features) CV_OVERRIDE; + Ptr sift; +}; + +/** @brief ORB features finder. : + +@sa detail::FeaturesFinder, ORB +*/ +class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder +{ +public: + OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5); + +private: + void find(InputArray image, ImageFeatures &features) CV_OVERRIDE; + + Ptr orb; + Size grid_size; +}; + +/** @brief AKAZE features finder. : + +@sa detail::FeaturesFinder, AKAZE +*/ +class CV_EXPORTS AKAZEFeaturesFinder : public detail::FeaturesFinder +{ +public: + AKAZEFeaturesFinder(int descriptor_type = AKAZE::DESCRIPTOR_MLDB, + int descriptor_size = 0, + int descriptor_channels = 3, + float threshold = 0.001f, + int nOctaves = 4, + int nOctaveLayers = 4, + int diffusivity = KAZE::DIFF_PM_G2); + +private: + void find(InputArray image, ImageFeatures &features) CV_OVERRIDE; + + Ptr akaze; +}; + +#ifdef HAVE_OPENCV_XFEATURES2D +class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder +{ +public: + SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4, + int num_octaves_descr = 4, int num_layers_descr = 2); + + void collectGarbage() CV_OVERRIDE; + +private: + void find(InputArray image, ImageFeatures &features) CV_OVERRIDE; + + cuda::GpuMat image_; + cuda::GpuMat gray_image_; + cuda::SURF_CUDA surf_; + cuda::GpuMat keypoints_; + cuda::GpuMat descriptors_; + int num_octaves_, num_layers_; + int num_octaves_descr_, num_layers_descr_; +}; +#endif + +/** @brief Structure containing information about matches between two images. + +It's assumed that there is a transformation between those images. Transformation may be +homography or affine transformation based on selected matcher. + +@sa detail::FeaturesMatcher +*/ +struct CV_EXPORTS MatchesInfo +{ + MatchesInfo(); + MatchesInfo(const MatchesInfo &other); + MatchesInfo& operator =(const MatchesInfo &other); + + int src_img_idx, dst_img_idx; //!< Images indices (optional) + std::vector matches; + std::vector inliers_mask; //!< Geometrically consistent matches mask + int num_inliers; //!< Number of geometrically consistent matches + Mat H; //!< Estimated transformation + double confidence; //!< Confidence two images are from the same panorama +}; + +/** @brief Feature matchers base class. */ +class CV_EXPORTS FeaturesMatcher +{ +public: + virtual ~FeaturesMatcher() {} + + /** @overload + @param features1 First image features + @param features2 Second image features + @param matches_info Found matches + */ + void operator ()(const ImageFeatures &features1, const ImageFeatures &features2, + MatchesInfo& matches_info) { match(features1, features2, matches_info); } + + /** @brief Performs images matching. + + @param features Features of the source images + @param pairwise_matches Found pairwise matches + @param mask Mask indicating which image pairs must be matched + + The function is parallelized with the TBB library. + + @sa detail::MatchesInfo + */ + void operator ()(const std::vector &features, std::vector &pairwise_matches, + const cv::UMat &mask = cv::UMat()); + + /** @return True, if it's possible to use the same matcher instance in parallel, false otherwise + */ + bool isThreadSafe() const { return is_thread_safe_; } + + /** @brief Frees unused memory allocated before if there is any. + */ + virtual void collectGarbage() {} + +protected: + FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {} + + /** @brief This method must implement matching logic in order to make the wrappers + detail::FeaturesMatcher::operator()_ work. + + @param features1 first image features + @param features2 second image features + @param matches_info found matches + */ + virtual void match(const ImageFeatures &features1, const ImageFeatures &features2, + MatchesInfo& matches_info) = 0; + + bool is_thread_safe_; +}; + +/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the +ratio between descriptor distances is greater than the threshold match_conf + +@sa detail::FeaturesMatcher + */ +class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher +{ +public: + /** @brief Constructs a "best of 2 nearest" matcher. + + @param try_use_gpu Should try to use GPU or not + @param match_conf Match distances ration threshold + @param num_matches_thresh1 Minimum number of matches required for the 2D projective transform + estimation used in the inliers classification step + @param num_matches_thresh2 Minimum number of matches required for the 2D projective transform + re-estimation on inliers + */ + BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6, + int num_matches_thresh2 = 6); + + void collectGarbage() CV_OVERRIDE; + +protected: + void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE; + + int num_matches_thresh1_; + int num_matches_thresh2_; + Ptr impl_; +}; + +class CV_EXPORTS BestOf2NearestRangeMatcher : public BestOf2NearestMatcher +{ +public: + BestOf2NearestRangeMatcher(int range_width = 5, bool try_use_gpu = false, float match_conf = 0.3f, + int num_matches_thresh1 = 6, int num_matches_thresh2 = 6); + + void operator ()(const std::vector &features, std::vector &pairwise_matches, + const cv::UMat &mask = cv::UMat()); + + +protected: + int range_width_; +}; + +/** @brief Features matcher similar to cv::detail::BestOf2NearestMatcher which +finds two best matches for each feature and leaves the best one only if the +ratio between descriptor distances is greater than the threshold match_conf. + +Unlike cv::detail::BestOf2NearestMatcher this matcher uses affine +transformation (affine transformation estimate will be placed in matches_info). + +@sa cv::detail::FeaturesMatcher cv::detail::BestOf2NearestMatcher + */ +class CV_EXPORTS AffineBestOf2NearestMatcher : public BestOf2NearestMatcher +{ +public: + /** @brief Constructs a "best of 2 nearest" matcher that expects affine transformation + between images + + @param full_affine whether to use full affine transformation with 6 degress of freedom or reduced + transformation with 4 degrees of freedom using only rotation, translation and uniform scaling + @param try_use_gpu Should try to use GPU or not + @param match_conf Match distances ration threshold + @param num_matches_thresh1 Minimum number of matches required for the 2D affine transform + estimation used in the inliers classification step + + @sa cv::estimateAffine2D cv::estimateAffinePartial2D + */ + AffineBestOf2NearestMatcher(bool full_affine = false, bool try_use_gpu = false, + float match_conf = 0.3f, int num_matches_thresh1 = 6) : + BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh1), + full_affine_(full_affine) {} + +protected: + void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) CV_OVERRIDE; + + bool full_affine_; +}; + +//! @} stitching_match + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_MATCHERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/motion_estimators.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/motion_estimators.hpp new file mode 100644 index 00000000..40f12c32 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/motion_estimators.hpp @@ -0,0 +1,359 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_MOTION_ESTIMATORS_HPP +#define OPENCV_STITCHING_MOTION_ESTIMATORS_HPP + +#include "opencv2/core.hpp" +#include "matchers.hpp" +#include "util.hpp" +#include "camera.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_rotation +//! @{ + +/** @brief Rotation estimator base class. + +It takes features of all images, pairwise matches between all images and estimates rotations of all +cameras. + +@note The coordinate system origin is implementation-dependent, but you can always normalize the +rotations in respect to the first camera, for instance. : + */ +class CV_EXPORTS Estimator +{ +public: + virtual ~Estimator() {} + + /** @brief Estimates camera parameters. + + @param features Features of images + @param pairwise_matches Pairwise matches of images + @param cameras Estimated camera parameters + @return True in case of success, false otherwise + */ + bool operator ()(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &cameras) + { return estimate(features, pairwise_matches, cameras); } + +protected: + /** @brief This method must implement camera parameters estimation logic in order to make the wrapper + detail::Estimator::operator()_ work. + + @param features Features of images + @param pairwise_matches Pairwise matches of images + @param cameras Estimated camera parameters + @return True in case of success, false otherwise + */ + virtual bool estimate(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &cameras) = 0; +}; + +/** @brief Homography based rotation estimator. + */ +class CV_EXPORTS HomographyBasedEstimator : public Estimator +{ +public: + HomographyBasedEstimator(bool is_focals_estimated = false) + : is_focals_estimated_(is_focals_estimated) {} + +private: + virtual bool estimate(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &cameras) CV_OVERRIDE; + + bool is_focals_estimated_; +}; + +/** @brief Affine transformation based estimator. + +This estimator uses pairwise transformations estimated by matcher to estimate +final transformation for each camera. + +@sa cv::detail::HomographyBasedEstimator + */ +class CV_EXPORTS AffineBasedEstimator : public Estimator +{ +private: + virtual bool estimate(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &cameras) CV_OVERRIDE; +}; + +/** @brief Base class for all camera parameters refinement methods. + */ +class CV_EXPORTS BundleAdjusterBase : public Estimator +{ +public: + const Mat refinementMask() const { return refinement_mask_.clone(); } + void setRefinementMask(const Mat &mask) + { + CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3)); + refinement_mask_ = mask.clone(); + } + + double confThresh() const { return conf_thresh_; } + void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; } + + TermCriteria termCriteria() { return term_criteria_; } + void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; } + +protected: + /** @brief Construct a bundle adjuster base instance. + + @param num_params_per_cam Number of parameters per camera + @param num_errs_per_measurement Number of error terms (components) per match + */ + BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) + : num_images_(0), total_num_matches_(0), + num_params_per_cam_(num_params_per_cam), + num_errs_per_measurement_(num_errs_per_measurement), + features_(0), pairwise_matches_(0), conf_thresh_(0) + { + setRefinementMask(Mat::ones(3, 3, CV_8U)); + setConfThresh(1.); + setTermCriteria(TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 1000, DBL_EPSILON)); + } + + // Runs bundle adjustment + virtual bool estimate(const std::vector &features, + const std::vector &pairwise_matches, + std::vector &cameras) CV_OVERRIDE; + + /** @brief Sets initial camera parameter to refine. + + @param cameras Camera parameters + */ + virtual void setUpInitialCameraParams(const std::vector &cameras) = 0; + /** @brief Gets the refined camera parameters. + + @param cameras Refined camera parameters + */ + virtual void obtainRefinedCameraParams(std::vector &cameras) const = 0; + /** @brief Calculates error vector. + + @param err Error column-vector of length total_num_matches \* num_errs_per_measurement + */ + virtual void calcError(Mat &err) = 0; + /** @brief Calculates the cost function jacobian. + + @param jac Jacobian matrix of dimensions + (total_num_matches \* num_errs_per_measurement) x (num_images \* num_params_per_cam) + */ + virtual void calcJacobian(Mat &jac) = 0; + + // 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine + Mat refinement_mask_; + + int num_images_; + int total_num_matches_; + + int num_params_per_cam_; + int num_errs_per_measurement_; + + const ImageFeatures *features_; + const MatchesInfo *pairwise_matches_; + + // Threshold to filter out poorly matched image pairs + double conf_thresh_; + + //Levenberg-Marquardt algorithm termination criteria + TermCriteria term_criteria_; + + // Camera parameters matrix (CV_64F) + Mat cam_params_; + + // Connected images pairs + std::vector > edges_; +}; + + +/** @brief Stub bundle adjuster that does nothing. + */ +class CV_EXPORTS NoBundleAdjuster : public BundleAdjusterBase +{ +public: + NoBundleAdjuster() : BundleAdjusterBase(0, 0) {} + +private: + bool estimate(const std::vector &, const std::vector &, + std::vector &) CV_OVERRIDE + { + return true; + } + void setUpInitialCameraParams(const std::vector &) CV_OVERRIDE {} + void obtainRefinedCameraParams(std::vector &) const CV_OVERRIDE {} + void calcError(Mat &) CV_OVERRIDE {} + void calcJacobian(Mat &) CV_OVERRIDE {} +}; + + +/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection +error squares + +It can estimate focal length, aspect ratio, principal point. +You can affect only on them via the refinement mask. + */ +class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase +{ +public: + BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {} + +private: + void setUpInitialCameraParams(const std::vector &cameras) CV_OVERRIDE; + void obtainRefinedCameraParams(std::vector &cameras) const CV_OVERRIDE; + void calcError(Mat &err) CV_OVERRIDE; + void calcJacobian(Mat &jac) CV_OVERRIDE; + + Mat err1_, err2_; +}; + + +/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances +between the rays passing through the camera center and a feature. : + +It can estimate focal length. It ignores the refinement mask for now. + */ +class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase +{ +public: + BundleAdjusterRay() : BundleAdjusterBase(4, 3) {} + +private: + void setUpInitialCameraParams(const std::vector &cameras) CV_OVERRIDE; + void obtainRefinedCameraParams(std::vector &cameras) const CV_OVERRIDE; + void calcError(Mat &err) CV_OVERRIDE; + void calcJacobian(Mat &jac) CV_OVERRIDE; + + Mat err1_, err2_; +}; + + +/** @brief Bundle adjuster that expects affine transformation +represented in homogeneous coordinates in R for each camera param. Implements +camera parameters refinement algorithm which minimizes sum of the reprojection +error squares + +It estimates all transformation parameters. Refinement mask is ignored. + +@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffinePartial + */ +class CV_EXPORTS BundleAdjusterAffine : public BundleAdjusterBase +{ +public: + BundleAdjusterAffine() : BundleAdjusterBase(6, 2) {} + +private: + void setUpInitialCameraParams(const std::vector &cameras) CV_OVERRIDE; + void obtainRefinedCameraParams(std::vector &cameras) const CV_OVERRIDE; + void calcError(Mat &err) CV_OVERRIDE; + void calcJacobian(Mat &jac) CV_OVERRIDE; + + Mat err1_, err2_; +}; + + +/** @brief Bundle adjuster that expects affine transformation with 4 DOF +represented in homogeneous coordinates in R for each camera param. Implements +camera parameters refinement algorithm which minimizes sum of the reprojection +error squares + +It estimates all transformation parameters. Refinement mask is ignored. + +@sa AffineBasedEstimator AffineBestOf2NearestMatcher BundleAdjusterAffine + */ +class CV_EXPORTS BundleAdjusterAffinePartial : public BundleAdjusterBase +{ +public: + BundleAdjusterAffinePartial() : BundleAdjusterBase(4, 2) {} + +private: + void setUpInitialCameraParams(const std::vector &cameras) CV_OVERRIDE; + void obtainRefinedCameraParams(std::vector &cameras) const CV_OVERRIDE; + void calcError(Mat &err) CV_OVERRIDE; + void calcJacobian(Mat &jac) CV_OVERRIDE; + + Mat err1_, err2_; +}; + + +enum WaveCorrectKind +{ + WAVE_CORRECT_HORIZ, + WAVE_CORRECT_VERT +}; + +/** @brief Tries to make panorama more horizontal (or vertical). + +@param rmats Camera rotation matrices. +@param kind Correction kind, see detail::WaveCorrectKind. + */ +void CV_EXPORTS waveCorrect(std::vector &rmats, WaveCorrectKind kind); + + +////////////////////////////////////////////////////////////////////////////// +// Auxiliary functions + +// Returns matches graph representation in DOT language +String CV_EXPORTS matchesGraphAsString(std::vector &pathes, std::vector &pairwise_matches, + float conf_threshold); + +std::vector CV_EXPORTS leaveBiggestComponent( + std::vector &features, + std::vector &pairwise_matches, + float conf_threshold); + +void CV_EXPORTS findMaxSpanningTree( + int num_images, const std::vector &pairwise_matches, + Graph &span_tree, std::vector ¢ers); + +//! @} stitching_rotation + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_MOTION_ESTIMATORS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/seam_finders.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/seam_finders.hpp new file mode 100644 index 00000000..904f0ecb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/seam_finders.hpp @@ -0,0 +1,285 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_SEAM_FINDERS_HPP +#define OPENCV_STITCHING_SEAM_FINDERS_HPP + +#include +#include "opencv2/core.hpp" +#include "opencv2/opencv_modules.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_seam +//! @{ + +/** @brief Base class for a seam estimator. + */ +class CV_EXPORTS SeamFinder +{ +public: + virtual ~SeamFinder() {} + /** @brief Estimates seams. + + @param src Source images + @param corners Source image top-left corners + @param masks Source image masks to update + */ + virtual void find(const std::vector &src, const std::vector &corners, + std::vector &masks) = 0; +}; + +/** @brief Stub seam estimator which does nothing. + */ +class CV_EXPORTS NoSeamFinder : public SeamFinder +{ +public: + void find(const std::vector&, const std::vector&, std::vector&) CV_OVERRIDE {} +}; + +/** @brief Base class for all pairwise seam estimators. + */ +class CV_EXPORTS PairwiseSeamFinder : public SeamFinder +{ +public: + virtual void find(const std::vector &src, const std::vector &corners, + std::vector &masks) CV_OVERRIDE; + +protected: + void run(); + /** @brief Resolves masks intersection of two specified images in the given ROI. + + @param first First image index + @param second Second image index + @param roi Region of interest + */ + virtual void findInPair(size_t first, size_t second, Rect roi) = 0; + + std::vector images_; + std::vector sizes_; + std::vector corners_; + std::vector masks_; +}; + +/** @brief Voronoi diagram-based seam estimator. + */ +class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder +{ +public: + virtual void find(const std::vector &src, const std::vector &corners, + std::vector &masks) CV_OVERRIDE; + virtual void find(const std::vector &size, const std::vector &corners, + std::vector &masks); +private: + void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE; +}; + + +class CV_EXPORTS DpSeamFinder : public SeamFinder +{ +public: + enum CostFunction { COLOR, COLOR_GRAD }; + + DpSeamFinder(CostFunction costFunc = COLOR); + + CostFunction costFunction() const { return costFunc_; } + void setCostFunction(CostFunction val) { costFunc_ = val; } + + virtual void find(const std::vector &src, const std::vector &corners, + std::vector &masks) CV_OVERRIDE; + +private: + enum ComponentState + { + FIRST = 1, SECOND = 2, INTERS = 4, + INTERS_FIRST = INTERS | FIRST, + INTERS_SECOND = INTERS | SECOND + }; + + class ImagePairLess + { + public: + ImagePairLess(const std::vector &images, const std::vector &corners) + : src_(&images[0]), corners_(&corners[0]) {} + + bool operator() (const std::pair &l, const std::pair &r) const + { + Point c1 = corners_[l.first] + Point(src_[l.first].cols / 2, src_[l.first].rows / 2); + Point c2 = corners_[l.second] + Point(src_[l.second].cols / 2, src_[l.second].rows / 2); + int d1 = (c1 - c2).dot(c1 - c2); + + c1 = corners_[r.first] + Point(src_[r.first].cols / 2, src_[r.first].rows / 2); + c2 = corners_[r.second] + Point(src_[r.second].cols / 2, src_[r.second].rows / 2); + int d2 = (c1 - c2).dot(c1 - c2); + + return d1 < d2; + } + + private: + const Mat *src_; + const Point *corners_; + }; + + class ClosePoints + { + public: + ClosePoints(int minDist) : minDist_(minDist) {} + + bool operator() (const Point &p1, const Point &p2) const + { + int dist2 = (p1.x-p2.x) * (p1.x-p2.x) + (p1.y-p2.y) * (p1.y-p2.y); + return dist2 < minDist_ * minDist_; + } + + private: + int minDist_; + }; + + void process( + const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2); + + void findComponents(); + + void findEdges(); + + void resolveConflicts( + const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2); + + void computeGradients(const Mat &image1, const Mat &image2); + + bool hasOnlyOneNeighbor(int comp); + + bool closeToContour(int y, int x, const Mat_ &contourMask); + + bool getSeamTips(int comp1, int comp2, Point &p1, Point &p2); + + void computeCosts( + const Mat &image1, const Mat &image2, Point tl1, Point tl2, + int comp, Mat_ &costV, Mat_ &costH); + + bool estimateSeam( + const Mat &image1, const Mat &image2, Point tl1, Point tl2, int comp, + Point p1, Point p2, std::vector &seam, bool &isHorizontal); + + void updateLabelsUsingSeam( + int comp1, int comp2, const std::vector &seam, bool isHorizontalSeam); + + CostFunction costFunc_; + + // processing images pair data + Point unionTl_, unionBr_; + Size unionSize_; + Mat_ mask1_, mask2_; + Mat_ contour1mask_, contour2mask_; + Mat_ gradx1_, grady1_; + Mat_ gradx2_, grady2_; + + // components data + int ncomps_; + Mat_ labels_; + std::vector states_; + std::vector tls_, brs_; + std::vector > contours_; + std::set > edges_; +}; + +/** @brief Base class for all minimum graph-cut-based seam estimators. + */ +class CV_EXPORTS GraphCutSeamFinderBase +{ +public: + enum CostType { COST_COLOR, COST_COLOR_GRAD }; +}; + +/** @brief Minimum graph cut-based seam estimator. See details in @cite V03 . + */ +class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder +{ +public: + GraphCutSeamFinder(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f, + float bad_region_penalty = 1000.f); + + ~GraphCutSeamFinder(); + + void find(const std::vector &src, const std::vector &corners, + std::vector &masks) CV_OVERRIDE; + +private: + // To avoid GCGraph dependency + class Impl; + Ptr impl_; +}; + + +#ifdef HAVE_OPENCV_CUDALEGACY +class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder +{ +public: + GraphCutSeamFinderGpu(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f, + float bad_region_penalty = 1000.f) + : cost_type_(cost_type), terminal_cost_(terminal_cost), + bad_region_penalty_(bad_region_penalty) {} + + void find(const std::vector &src, const std::vector &corners, + std::vector &masks) CV_OVERRIDE; + void findInPair(size_t first, size_t second, Rect roi) CV_OVERRIDE; + +private: + void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2, + cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom); + void setGraphWeightsColorGrad(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &dx1, const cv::Mat &dx2, + const cv::Mat &dy1, const cv::Mat &dy2, const cv::Mat &mask1, const cv::Mat &mask2, + cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom); + std::vector dx_, dy_; + int cost_type_; + float terminal_cost_; + float bad_region_penalty_; +}; +#endif + +//! @} + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_SEAM_FINDERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/timelapsers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/timelapsers.hpp new file mode 100644 index 00000000..74d797ed --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/timelapsers.hpp @@ -0,0 +1,91 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef OPENCV_STITCHING_TIMELAPSERS_HPP +#define OPENCV_STITCHING_TIMELAPSERS_HPP + +#include "opencv2/core.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching +//! @{ + +// Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_. + +class CV_EXPORTS Timelapser +{ +public: + + enum {AS_IS, CROP}; + + virtual ~Timelapser() {} + + static Ptr createDefault(int type); + + virtual void initialize(const std::vector &corners, const std::vector &sizes); + virtual void process(InputArray img, InputArray mask, Point tl); + virtual const UMat& getDst() {return dst_;} + +protected: + + virtual bool test_point(Point pt); + + UMat dst_; + Rect dst_roi_; +}; + + +class CV_EXPORTS TimelapserCrop : public Timelapser +{ +public: + virtual void initialize(const std::vector &corners, const std::vector &sizes) CV_OVERRIDE; +}; + +//! @} + +} // namespace detail +} // namespace cv + +#endif // OPENCV_STITCHING_TIMELAPSERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util.hpp new file mode 100644 index 00000000..78301b85 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util.hpp @@ -0,0 +1,121 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_UTIL_HPP +#define OPENCV_STITCHING_UTIL_HPP + +#include +#include "opencv2/core.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching +//! @{ + +class CV_EXPORTS DisjointSets +{ +public: + DisjointSets(int elem_count = 0) { createOneElemSets(elem_count); } + + void createOneElemSets(int elem_count); + int findSetByElem(int elem); + int mergeSets(int set1, int set2); + + std::vector parent; + std::vector size; + +private: + std::vector rank_; +}; + + +struct CV_EXPORTS GraphEdge +{ + GraphEdge(int from, int to, float weight); + bool operator <(const GraphEdge& other) const { return weight < other.weight; } + bool operator >(const GraphEdge& other) const { return weight > other.weight; } + + int from, to; + float weight; +}; + +inline GraphEdge::GraphEdge(int _from, int _to, float _weight) : from(_from), to(_to), weight(_weight) {} + + +class CV_EXPORTS Graph +{ +public: + Graph(int num_vertices = 0) { create(num_vertices); } + void create(int num_vertices) { edges_.assign(num_vertices, std::list()); } + int numVertices() const { return static_cast(edges_.size()); } + void addEdge(int from, int to, float weight); + template B forEach(B body) const; + template B walkBreadthFirst(int from, B body) const; + +private: + std::vector< std::list > edges_; +}; + + +////////////////////////////////////////////////////////////////////////////// +// Auxiliary functions + +CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi); +CV_EXPORTS Rect resultRoi(const std::vector &corners, const std::vector &images); +CV_EXPORTS Rect resultRoi(const std::vector &corners, const std::vector &sizes); +CV_EXPORTS Rect resultRoiIntersection(const std::vector &corners, const std::vector &sizes); +CV_EXPORTS Point resultTl(const std::vector &corners); + +// Returns random 'count' element subset of the {0,1,...,size-1} set +CV_EXPORTS void selectRandomSubset(int count, int size, std::vector &subset); + +CV_EXPORTS int& stitchingLogLevel(); + +//! @} + +} // namespace detail +} // namespace cv + +#include "util_inl.hpp" + +#endif // OPENCV_STITCHING_UTIL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util_inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util_inl.hpp new file mode 100644 index 00000000..dafab8b8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/util_inl.hpp @@ -0,0 +1,131 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_UTIL_INL_HPP +#define OPENCV_STITCHING_UTIL_INL_HPP + +#include +#include "opencv2/core.hpp" +#include "util.hpp" // Make your IDE see declarations + +//! @cond IGNORED + +namespace cv { +namespace detail { + +template +B Graph::forEach(B body) const +{ + for (int i = 0; i < numVertices(); ++i) + { + std::list::const_iterator edge = edges_[i].begin(); + for (; edge != edges_[i].end(); ++edge) + body(*edge); + } + return body; +} + + +template +B Graph::walkBreadthFirst(int from, B body) const +{ + std::vector was(numVertices(), false); + std::queue vertices; + + was[from] = true; + vertices.push(from); + + while (!vertices.empty()) + { + int vertex = vertices.front(); + vertices.pop(); + + std::list::const_iterator edge = edges_[vertex].begin(); + for (; edge != edges_[vertex].end(); ++edge) + { + if (!was[edge->to]) + { + body(*edge); + was[edge->to] = true; + vertices.push(edge->to); + } + } + } + + return body; +} + + +////////////////////////////////////////////////////////////////////////////// +// Some auxiliary math functions + +static inline +float normL2(const Point3f& a) +{ + return a.x * a.x + a.y * a.y + a.z * a.z; +} + + +static inline +float normL2(const Point3f& a, const Point3f& b) +{ + return normL2(a - b); +} + + +static inline +double normL2sq(const Mat &r) +{ + return r.dot(r); +} + + +static inline int sqr(int x) { return x * x; } +static inline float sqr(float x) { return x * x; } +static inline double sqr(double x) { return x * x; } + +} // namespace detail +} // namespace cv + +//! @endcond + +#endif // OPENCV_STITCHING_UTIL_INL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers.hpp new file mode 100644 index 00000000..263cb010 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers.hpp @@ -0,0 +1,651 @@ + /*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_WARPERS_HPP +#define OPENCV_STITCHING_WARPERS_HPP + +#include "opencv2/core.hpp" +#include "opencv2/core/cuda.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/opencv_modules.hpp" + +namespace cv { +namespace detail { + +//! @addtogroup stitching_warp +//! @{ + +/** @brief Rotation-only model image warper interface. + */ +class CV_EXPORTS RotationWarper +{ +public: + virtual ~RotationWarper() {} + + /** @brief Projects the image point. + + @param pt Source point + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @return Projected point + */ + virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0; + + /** @brief Builds the projection maps according to the given camera data. + + @param src_size Source image size + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @param xmap Projection map for the x axis + @param ymap Projection map for the y axis + @return Projected image minimum bounding box + */ + virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) = 0; + + /** @brief Projects the image. + + @param src Source image + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @param interp_mode Interpolation mode + @param border_mode Border extrapolation mode + @param dst Projected image + @return Project image top-left corner + */ + virtual Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) = 0; + + /** @brief Projects the image backward. + + @param src Projected image + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @param interp_mode Interpolation mode + @param border_mode Border extrapolation mode + @param dst_size Backward-projected image size + @param dst Backward-projected image + */ + virtual void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + Size dst_size, OutputArray dst) = 0; + + /** + @param src_size Source image bounding box + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @return Projected image minimum bounding box + */ + virtual Rect warpRoi(Size src_size, InputArray K, InputArray R) = 0; + + virtual float getScale() const { return 1.f; } + virtual void setScale(float) {} +}; + +/** @brief Base class for warping logic implementation. + */ +struct CV_EXPORTS ProjectorBase +{ + void setCameraParams(InputArray K = Mat::eye(3, 3, CV_32F), + InputArray R = Mat::eye(3, 3, CV_32F), + InputArray T = Mat::zeros(3, 1, CV_32F)); + + float scale; + float k[9]; + float rinv[9]; + float r_kinv[9]; + float k_rinv[9]; + float t[3]; +}; + +/** @brief Base class for rotation-based warper using a detail::ProjectorBase_ derived class. + */ +template +class CV_EXPORTS_TEMPLATE RotationWarperBase : public RotationWarper +{ +public: + Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE; + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; + + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) CV_OVERRIDE; + + void warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + Size dst_size, OutputArray dst) CV_OVERRIDE; + + Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE; + + float getScale() const CV_OVERRIDE{ return projector_.scale; } + void setScale(float val) CV_OVERRIDE { projector_.scale = val; } + +protected: + + // Detects ROI of the destination image. It's correct for any projection. + virtual void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br); + + // Detects ROI of the destination image by walking over image border. + // Correctness for any projection isn't guaranteed. + void detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br); + + P projector_; +}; + + +struct CV_EXPORTS PlaneProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + +/** @brief Warper that maps an image onto the z = 1 plane. + */ +class CV_EXPORTS PlaneWarper : public RotationWarperBase +{ +public: + /** @brief Construct an instance of the plane warper class. + + @param scale Projected image scale multiplier + */ + PlaneWarper(float scale = 1.f) { projector_.scale = scale; } + + Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE; + Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T); + + virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap); + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; + + Point warp(InputArray src, InputArray K, InputArray R, + int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE; + virtual Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, + OutputArray dst); + + Rect warpRoi(Size src_size, InputArray K, InputArray R) CV_OVERRIDE; + Rect warpRoi(Size src_size, InputArray K, InputArray R, InputArray T); + +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE; +}; + + +/** @brief Affine warper that uses rotations and translations + + Uses affine transformation in homogeneous coordinates to represent both rotation and + translation in camera rotation matrix. + */ +class CV_EXPORTS AffineWarper : public PlaneWarper +{ +public: + /** @brief Construct an instance of the affine warper class. + + @param scale Projected image scale multiplier + */ + AffineWarper(float scale = 1.f) : PlaneWarper(scale) {} + + /** @brief Projects the image point. + + @param pt Source point + @param K Camera intrinsic parameters + @param H Camera extrinsic parameters + @return Projected point + */ + Point2f warpPoint(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE; + + /** @brief Builds the projection maps according to the given camera data. + + @param src_size Source image size + @param K Camera intrinsic parameters + @param H Camera extrinsic parameters + @param xmap Projection map for the x axis + @param ymap Projection map for the y axis + @return Projected image minimum bounding box + */ + Rect buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; + + /** @brief Projects the image. + + @param src Source image + @param K Camera intrinsic parameters + @param H Camera extrinsic parameters + @param interp_mode Interpolation mode + @param border_mode Border extrapolation mode + @param dst Projected image + @return Project image top-left corner + */ + Point warp(InputArray src, InputArray K, InputArray H, + int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE; + + /** + @param src_size Source image bounding box + @param K Camera intrinsic parameters + @param H Camera extrinsic parameters + @return Projected image minimum bounding box + */ + Rect warpRoi(Size src_size, InputArray K, InputArray H) CV_OVERRIDE; + +protected: + /** @brief Extracts rotation and translation matrices from matrix H representing + affine transformation in homogeneous coordinates + */ + void getRTfromHomogeneous(InputArray H, Mat &R, Mat &T); +}; + + +struct CV_EXPORTS SphericalProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +/** @brief Warper that maps an image onto the unit sphere located at the origin. + + Projects image onto unit sphere with origin at (0, 0, 0) and radius scale, measured in pixels. + A 360 panorama would therefore have a resulting width of 2 * scale * PI pixels. + Poles are located at (0, -1, 0) and (0, 1, 0) points. +*/ +class CV_EXPORTS SphericalWarper : public RotationWarperBase +{ +public: + /** @brief Construct an instance of the spherical warper class. + + @param scale Radius of the projected sphere, in pixels. An image spanning the + whole sphere will have a width of 2 * scale * PI pixels. + */ + SphericalWarper(float scale) { projector_.scale = scale; } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE; +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE; +}; + + +struct CV_EXPORTS CylindricalProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +/** @brief Warper that maps an image onto the x\*x + z\*z = 1 cylinder. + */ +class CV_EXPORTS CylindricalWarper : public RotationWarperBase +{ +public: + /** @brief Construct an instance of the cylindrical warper class. + + @param scale Projected image scale multiplier + */ + CylindricalWarper(float scale) { projector_.scale = scale; } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, OutputArray dst) CV_OVERRIDE; +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + } +}; + + +struct CV_EXPORTS FisheyeProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS FisheyeWarper : public RotationWarperBase +{ +public: + FisheyeWarper(float scale) { projector_.scale = scale; } +}; + + +struct CV_EXPORTS StereographicProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS StereographicWarper : public RotationWarperBase +{ +public: + StereographicWarper(float scale) { projector_.scale = scale; } +}; + + +struct CV_EXPORTS CompressedRectilinearProjector : ProjectorBase +{ + float a, b; + + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS CompressedRectilinearWarper : public RotationWarperBase +{ +public: + CompressedRectilinearWarper(float scale, float A = 1, float B = 1) + { + projector_.a = A; + projector_.b = B; + projector_.scale = scale; + } +}; + + +struct CV_EXPORTS CompressedRectilinearPortraitProjector : ProjectorBase +{ + float a, b; + + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS CompressedRectilinearPortraitWarper : public RotationWarperBase +{ +public: + CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1) + { + projector_.a = A; + projector_.b = B; + projector_.scale = scale; + } +}; + + +struct CV_EXPORTS PaniniProjector : ProjectorBase +{ + float a, b; + + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS PaniniWarper : public RotationWarperBase +{ +public: + PaniniWarper(float scale, float A = 1, float B = 1) + { + projector_.a = A; + projector_.b = B; + projector_.scale = scale; + } +}; + + +struct CV_EXPORTS PaniniPortraitProjector : ProjectorBase +{ + float a, b; + + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS PaniniPortraitWarper : public RotationWarperBase +{ +public: + PaniniPortraitWarper(float scale, float A = 1, float B = 1) + { + projector_.a = A; + projector_.b = B; + projector_.scale = scale; + } + +}; + + +struct CV_EXPORTS MercatorProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS MercatorWarper : public RotationWarperBase +{ +public: + MercatorWarper(float scale) { projector_.scale = scale; } +}; + + +struct CV_EXPORTS TransverseMercatorProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS TransverseMercatorWarper : public RotationWarperBase +{ +public: + TransverseMercatorWarper(float scale) { projector_.scale = scale; } +}; + + +class CV_EXPORTS PlaneWarperGpu : public PlaneWarper +{ +public: + PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {} + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE + { + Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); + d_xmap_.download(xmap); + d_ymap_.download(ymap); + return result; + } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, OutputArray xmap, OutputArray ymap) CV_OVERRIDE + { + Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_); + d_xmap_.download(xmap); + d_ymap_.download(ymap); + return result; + } + + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) CV_OVERRIDE + { + d_src_.upload(src); + Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); + d_dst_.download(dst); + return result; + } + + Point warp(InputArray src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, + OutputArray dst) CV_OVERRIDE + { + d_src_.upload(src); + Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_); + d_dst_.download(dst); + return result; + } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap); + + Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, cuda::GpuMat & xmap, cuda::GpuMat & ymap); + + Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode, + cuda::GpuMat & dst); + + Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, InputArray T, int interp_mode, int border_mode, + cuda::GpuMat & dst); + +private: + cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; +}; + + +class CV_EXPORTS SphericalWarperGpu : public SphericalWarper +{ +public: + SphericalWarperGpu(float scale) : SphericalWarper(scale) {} + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE + { + Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); + d_xmap_.download(xmap); + d_ymap_.download(ymap); + return result; + } + + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) CV_OVERRIDE + { + d_src_.upload(src); + Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); + d_dst_.download(dst); + return result; + } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap); + + Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode, + cuda::GpuMat & dst); + +private: + cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; +}; + + +class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper +{ +public: + CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {} + + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE + { + Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_); + d_xmap_.download(xmap); + d_ymap_.download(ymap); + return result; + } + + Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) CV_OVERRIDE + { + d_src_.upload(src); + Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_); + d_dst_.download(dst); + return result; + } + + Rect buildMaps(Size src_size, InputArray K, InputArray R, cuda::GpuMat & xmap, cuda::GpuMat & ymap); + + Point warp(const cuda::GpuMat & src, InputArray K, InputArray R, int interp_mode, int border_mode, + cuda::GpuMat & dst); + +private: + cuda::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_; +}; + + +struct CV_EXPORTS SphericalPortraitProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +// Projects image onto unit sphere with origin at (0, 0, 0). +// Poles are located NOT at (0, -1, 0) and (0, 1, 0) points, BUT at (1, 0, 0) and (-1, 0, 0) points. +class CV_EXPORTS SphericalPortraitWarper : public RotationWarperBase +{ +public: + SphericalPortraitWarper(float scale) { projector_.scale = scale; } + +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE; +}; + +struct CV_EXPORTS CylindricalPortraitProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS CylindricalPortraitWarper : public RotationWarperBase +{ +public: + CylindricalPortraitWarper(float scale) { projector_.scale = scale; } + +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + } +}; + +struct CV_EXPORTS PlanePortraitProjector : ProjectorBase +{ + void mapForward(float x, float y, float &u, float &v); + void mapBackward(float u, float v, float &x, float &y); +}; + + +class CV_EXPORTS PlanePortraitWarper : public RotationWarperBase +{ +public: + PlanePortraitWarper(float scale) { projector_.scale = scale; } + +protected: + void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) CV_OVERRIDE + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + } +}; + +//! @} stitching_warp + +} // namespace detail +} // namespace cv + +#include "warpers_inl.hpp" + +#endif // OPENCV_STITCHING_WARPERS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers_inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers_inl.hpp new file mode 100644 index 00000000..f4a19d9c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/detail/warpers_inl.hpp @@ -0,0 +1,774 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_WARPERS_INL_HPP +#define OPENCV_STITCHING_WARPERS_INL_HPP + +#include "opencv2/core.hpp" +#include "warpers.hpp" // Make your IDE see declarations +#include + +//! @cond IGNORED + +namespace cv { +namespace detail { + +template +Point2f RotationWarperBase

::warpPoint(const Point2f &pt, InputArray K, InputArray R) +{ + projector_.setCameraParams(K, R); + Point2f uv; + projector_.mapForward(pt.x, pt.y, uv.x, uv.y); + return uv; +} + + +template +Rect RotationWarperBase

::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap) +{ + projector_.setCameraParams(K, R); + + Point dst_tl, dst_br; + detectResultRoi(src_size, dst_tl, dst_br); + + _xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F); + _ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F); + + Mat xmap = _xmap.getMat(), ymap = _ymap.getMat(); + + float x, y; + for (int v = dst_tl.y; v <= dst_br.y; ++v) + { + for (int u = dst_tl.x; u <= dst_br.x; ++u) + { + projector_.mapBackward(static_cast(u), static_cast(v), x, y); + xmap.at(v - dst_tl.y, u - dst_tl.x) = x; + ymap.at(v - dst_tl.y, u - dst_tl.x) = y; + } + } + + return Rect(dst_tl, dst_br); +} + + +template +Point RotationWarperBase

::warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + OutputArray dst) +{ + UMat xmap, ymap; + Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap); + + dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type()); + remap(src, dst, xmap, ymap, interp_mode, border_mode); + + return dst_roi.tl(); +} + + +template +void RotationWarperBase

::warpBackward(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, + Size dst_size, OutputArray dst) +{ + projector_.setCameraParams(K, R); + + Point src_tl, src_br; + detectResultRoi(dst_size, src_tl, src_br); + + Size size = src.size(); + CV_Assert(src_br.x - src_tl.x + 1 == size.width && src_br.y - src_tl.y + 1 == size.height); + + Mat xmap(dst_size, CV_32F); + Mat ymap(dst_size, CV_32F); + + float u, v; + for (int y = 0; y < dst_size.height; ++y) + { + for (int x = 0; x < dst_size.width; ++x) + { + projector_.mapForward(static_cast(x), static_cast(y), u, v); + xmap.at(y, x) = u - src_tl.x; + ymap.at(y, x) = v - src_tl.y; + } + } + + dst.create(dst_size, src.type()); + remap(src, dst, xmap, ymap, interp_mode, border_mode); +} + + +template +Rect RotationWarperBase

::warpRoi(Size src_size, InputArray K, InputArray R) +{ + projector_.setCameraParams(K, R); + + Point dst_tl, dst_br; + detectResultRoi(src_size, dst_tl, dst_br); + + return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1)); +} + + +template +void RotationWarperBase

::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) +{ + float tl_uf = (std::numeric_limits::max)(); + float tl_vf = (std::numeric_limits::max)(); + float br_uf = -(std::numeric_limits::max)(); + float br_vf = -(std::numeric_limits::max)(); + + float u, v; + for (int y = 0; y < src_size.height; ++y) + { + for (int x = 0; x < src_size.width; ++x) + { + projector_.mapForward(static_cast(x), static_cast(y), u, v); + tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v); + br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v); + } + } + + dst_tl.x = static_cast(tl_uf); + dst_tl.y = static_cast(tl_vf); + dst_br.x = static_cast(br_uf); + dst_br.y = static_cast(br_vf); +} + + +template +void RotationWarperBase

::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br) +{ + float tl_uf = (std::numeric_limits::max)(); + float tl_vf = (std::numeric_limits::max)(); + float br_uf = -(std::numeric_limits::max)(); + float br_vf = -(std::numeric_limits::max)(); + + float u, v; + for (float x = 0; x < src_size.width; ++x) + { + projector_.mapForward(static_cast(x), 0, u, v); + tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v); + br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v); + + projector_.mapForward(static_cast(x), static_cast(src_size.height - 1), u, v); + tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v); + br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v); + } + for (int y = 0; y < src_size.height; ++y) + { + projector_.mapForward(0, static_cast(y), u, v); + tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v); + br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v); + + projector_.mapForward(static_cast(src_size.width - 1), static_cast(y), u, v); + tl_uf = (std::min)(tl_uf, u); tl_vf = (std::min)(tl_vf, v); + br_uf = (std::max)(br_uf, u); br_vf = (std::max)(br_vf, v); + } + + dst_tl.x = static_cast(tl_uf); + dst_tl.y = static_cast(tl_vf); + dst_br.x = static_cast(br_uf); + dst_br.y = static_cast(br_vf); +} + + +inline +void PlaneProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + x_ = t[0] + x_ / z_ * (1 - t[2]); + y_ = t[1] + y_ / z_ * (1 - t[2]); + + u = scale * x_; + v = scale * y_; +} + + +inline +void PlaneProjector::mapBackward(float u, float v, float &x, float &y) +{ + u = u / scale - t[0]; + v = v / scale - t[1]; + + float z; + x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2] * (1 - t[2]); + y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5] * (1 - t[2]); + z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8] * (1 - t[2]); + + x /= z; + y /= z; +} + + +inline +void SphericalProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + u = scale * atan2f(x_, z_); + float w = y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_); + v = scale * (static_cast(CV_PI) - acosf(w == w ? w : 0)); +} + + +inline +void SphericalProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float sinv = sinf(static_cast(CV_PI) - v); + float x_ = sinv * sinf(u); + float y_ = cosf(static_cast(CV_PI) - v); + float z_ = sinv * cosf(u); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + + +inline +void CylindricalProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + u = scale * atan2f(x_, z_); + v = scale * y_ / sqrtf(x_ * x_ + z_ * z_); +} + + +inline +void CylindricalProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float x_ = sinf(u); + float y_ = v; + float z_ = cosf(u); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void FisheyeProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + u = scale * v_ * cosf(u_); + v = scale * v_ * sinf(u_); +} + +inline +void FisheyeProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float u_ = atan2f(v, u); + float v_ = sqrtf(u*u + v*v); + + float sinv = sinf((float)CV_PI - v_); + float x_ = sinv * sinf(u_); + float y_ = cosf((float)CV_PI - v_); + float z_ = sinv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void StereographicProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + float r = sinf(v_) / (1 - cosf(v_)); + + u = scale * r * cos(u_); + v = scale * r * sin(u_); +} + +inline +void StereographicProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float u_ = atan2f(v, u); + float r = sqrtf(u*u + v*v); + float v_ = 2 * atanf(1.f / r); + + float sinv = sinf((float)CV_PI - v_); + float x_ = sinv * sinf(u_); + float y_ = cosf((float)CV_PI - v_); + float z_ = sinv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void CompressedRectilinearProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + u = scale * a * tanf(u_ / a); + v = scale * b * tanf(v_) / cosf(u_); +} + +inline +void CompressedRectilinearProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float aatg = a * atanf(u / a); + float u_ = aatg; + float v_ = atanf(v * cosf(aatg) / b); + + float cosv = cosf(v_); + float x_ = cosv * sinf(u_); + float y_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void CompressedRectilinearPortraitProjector::mapForward(float x, float y, float &u, float &v) +{ + float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + u = - scale * a * tanf(u_ / a); + v = scale * b * tanf(v_) / cosf(u_); +} + +inline +void CompressedRectilinearPortraitProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= - scale; + v /= scale; + + float aatg = a * atanf(u / a); + float u_ = aatg; + float v_ = atanf(v * cosf( aatg ) / b); + + float cosv = cosf(v_); + float y_ = cosv * sinf(u_); + float x_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void PaniniProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + float tg = a * tanf(u_ / a); + u = scale * tg; + + float sinu = sinf(u_); + if ( fabs(sinu) < 1E-7 ) + v = scale * b * tanf(v_); + else + v = scale * b * tg * tanf(v_) / sinu; +} + +inline +void PaniniProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float lamda = a * atanf(u / a); + float u_ = lamda; + + float v_; + if ( fabs(lamda) > 1E-7) + v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda / a))); + else + v_ = atanf(v / b); + + float cosv = cosf(v_); + float x_ = cosv * sinf(u_); + float y_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void PaniniPortraitProjector::mapForward(float x, float y, float &u, float &v) +{ + float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + float tg = a * tanf(u_ / a); + u = - scale * tg; + + float sinu = sinf( u_ ); + if ( fabs(sinu) < 1E-7 ) + v = scale * b * tanf(v_); + else + v = scale * b * tg * tanf(v_) / sinu; +} + +inline +void PaniniPortraitProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= - scale; + v /= scale; + + float lamda = a * atanf(u / a); + float u_ = lamda; + + float v_; + if ( fabs(lamda) > 1E-7) + v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda/a))); + else + v_ = atanf(v / b); + + float cosv = cosf(v_); + float y_ = cosv * sinf(u_); + float x_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void MercatorProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + u = scale * u_; + v = scale * logf( tanf( (float)(CV_PI/4) + v_/2 ) ); +} + +inline +void MercatorProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float v_ = atanf( sinhf(v) ); + float u_ = u; + + float cosv = cosf(v_); + float x_ = cosv * sinf(u_); + float y_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void TransverseMercatorProjector::mapForward(float x, float y, float &u, float &v) +{ + float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float u_ = atan2f(x_, z_); + float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)); + + float B = cosf(v_) * sinf(u_); + + u = scale / 2 * logf( (1+B) / (1-B) ); + v = scale * atan2f(tanf(v_), cosf(u_)); +} + +inline +void TransverseMercatorProjector::mapBackward(float u, float v, float &x, float &y) +{ + u /= scale; + v /= scale; + + float v_ = asinf( sinf(v) / coshf(u) ); + float u_ = atan2f( sinhf(u), cos(v) ); + + float cosv = cosf(v_); + float x_ = cosv * sinf(u_); + float y_ = sinf(v_); + float z_ = cosv * cosf(u_); + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void SphericalPortraitProjector::mapForward(float x, float y, float &u0, float &v0) +{ + float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float x_ = y0_; + float y_ = x0_; + float u, v; + + u = scale * atan2f(x_, z_); + v = scale * (static_cast(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_))); + + u0 = -u;//v; + v0 = v;//u; +} + + +inline +void SphericalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y) +{ + float u, v; + u = -u0;//v0; + v = v0;//u0; + + u /= scale; + v /= scale; + + float sinv = sinf(static_cast(CV_PI) - v); + float x0_ = sinv * sinf(u); + float y0_ = cosf(static_cast(CV_PI) - v); + float z_ = sinv * cosf(u); + + float x_ = y0_; + float y_ = x0_; + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void CylindricalPortraitProjector::mapForward(float x, float y, float &u0, float &v0) +{ + float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float x_ = y0_; + float y_ = x0_; + float u, v; + + u = scale * atan2f(x_, z_); + v = scale * y_ / sqrtf(x_ * x_ + z_ * z_); + + u0 = -u;//v; + v0 = v;//u; +} + + +inline +void CylindricalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y) +{ + float u, v; + u = -u0;//v0; + v = v0;//u0; + + u /= scale; + v /= scale; + + float x0_ = sinf(u); + float y0_ = v; + float z_ = cosf(u); + + float x_ = y0_; + float y_ = x0_; + + float z; + x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_; + y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_; + z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_; + + if (z > 0) { x /= z; y /= z; } + else x = y = -1; +} + +inline +void PlanePortraitProjector::mapForward(float x, float y, float &u0, float &v0) +{ + float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2]; + float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5]; + float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8]; + + float x_ = y0_; + float y_ = x0_; + + x_ = t[0] + x_ / z_ * (1 - t[2]); + y_ = t[1] + y_ / z_ * (1 - t[2]); + + float u,v; + u = scale * x_; + v = scale * y_; + + u0 = -u; + v0 = v; +} + + +inline +void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y) +{ + float u, v; + u = -u0; + v = v0; + + u = u / scale - t[0]; + v = v / scale - t[1]; + + float z; + x = k_rinv[0] * v + k_rinv[1] * u + k_rinv[2] * (1 - t[2]); + y = k_rinv[3] * v + k_rinv[4] * u + k_rinv[5] * (1 - t[2]); + z = k_rinv[6] * v + k_rinv[7] * u + k_rinv[8] * (1 - t[2]); + + x /= z; + y /= z; +} + + +} // namespace detail +} // namespace cv + +//! @endcond + +#endif // OPENCV_STITCHING_WARPERS_INL_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/stitching/warpers.hpp b/third_party/opencv/uos/sw_64/include/opencv2/stitching/warpers.hpp new file mode 100644 index 00000000..cf7699c0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/stitching/warpers.hpp @@ -0,0 +1,192 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_STITCHING_WARPER_CREATORS_HPP +#define OPENCV_STITCHING_WARPER_CREATORS_HPP + +#include "opencv2/stitching/detail/warpers.hpp" + +namespace cv { + +//! @addtogroup stitching_warp +//! @{ + +/** @brief Image warper factories base class. + */ +class WarperCreator +{ +public: + virtual ~WarperCreator() {} + virtual Ptr create(float scale) const = 0; +}; + +/** @brief Plane warper factory class. + @sa detail::PlaneWarper + */ +class PlaneWarper : public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +/** @brief Affine warper factory class. + @sa detail::AffineWarper + */ +class AffineWarper : public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +/** @brief Cylindrical warper factory class. +@sa detail::CylindricalWarper +*/ +class CylindricalWarper: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +/** @brief Spherical warper factory class */ +class SphericalWarper: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +class FisheyeWarper : public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +class StereographicWarper: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +class CompressedRectilinearWarper: public WarperCreator +{ + float a, b; +public: + CompressedRectilinearWarper(float A = 1, float B = 1) + { + a = A; b = B; + } + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale, a, b); } +}; + +class CompressedRectilinearPortraitWarper: public WarperCreator +{ + float a, b; +public: + CompressedRectilinearPortraitWarper(float A = 1, float B = 1) + { + a = A; b = B; + } + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale, a, b); } +}; + +class PaniniWarper: public WarperCreator +{ + float a, b; +public: + PaniniWarper(float A = 1, float B = 1) + { + a = A; b = B; + } + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale, a, b); } +}; + +class PaniniPortraitWarper: public WarperCreator +{ + float a, b; +public: + PaniniPortraitWarper(float A = 1, float B = 1) + { + a = A; b = B; + } + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale, a, b); } +}; + +class MercatorWarper: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + +class TransverseMercatorWarper: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + + + +#ifdef HAVE_OPENCV_CUDAWARPING +class PlaneWarperGpu: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + + +class CylindricalWarperGpu: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; + + +class SphericalWarperGpu: public WarperCreator +{ +public: + Ptr create(float scale) const CV_OVERRIDE { return makePtr(scale); } +}; +#endif + +//! @} stitching_warp + +} // namespace cv + +#endif // OPENCV_STITCHING_WARPER_CREATORS_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/superres.hpp b/third_party/opencv/uos/sw_64/include/opencv2/superres.hpp new file mode 100644 index 00000000..16c11acd --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/superres.hpp @@ -0,0 +1,207 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_SUPERRES_HPP +#define OPENCV_SUPERRES_HPP + +#include "opencv2/core.hpp" +#include "opencv2/superres/optical_flow.hpp" + +/** + @defgroup superres Super Resolution + +The Super Resolution module contains a set of functions and classes that can be used to solve the +problem of resolution enhancement. There are a few methods implemented, most of them are described in +the papers @cite Farsiu03 and @cite Mitzel09 . + + */ + +namespace cv +{ + namespace superres + { + +//! @addtogroup superres +//! @{ + + class CV_EXPORTS FrameSource + { + public: + virtual ~FrameSource(); + + virtual void nextFrame(OutputArray frame) = 0; + virtual void reset() = 0; + }; + + CV_EXPORTS Ptr createFrameSource_Empty(); + + CV_EXPORTS Ptr createFrameSource_Video(const String& fileName); + CV_EXPORTS Ptr createFrameSource_Video_CUDA(const String& fileName); + + CV_EXPORTS Ptr createFrameSource_Camera(int deviceId = 0); + + /** @brief Base class for Super Resolution algorithms. + + The class is only used to define the common interface for the whole family of Super Resolution + algorithms. + */ + class CV_EXPORTS SuperResolution : public cv::Algorithm, public FrameSource + { + public: + /** @brief Set input frame source for Super Resolution algorithm. + + @param frameSource Input frame source + */ + void setInput(const Ptr& frameSource); + + /** @brief Process next frame from input and return output result. + + @param frame Output result + */ + void nextFrame(OutputArray frame) CV_OVERRIDE; + void reset() CV_OVERRIDE; + + /** @brief Clear all inner buffers. + */ + virtual void collectGarbage(); + + //! @brief Scale factor + /** @see setScale */ + virtual int getScale() const = 0; + /** @copybrief getScale @see getScale */ + virtual void setScale(int val) = 0; + + //! @brief Iterations count + /** @see setIterations */ + virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + virtual void setIterations(int val) = 0; + + //! @brief Asymptotic value of steepest descent method + /** @see setTau */ + virtual double getTau() const = 0; + /** @copybrief getTau @see getTau */ + virtual void setTau(double val) = 0; + + //! @brief Weight parameter to balance data term and smoothness term + /** @see setLabmda */ + virtual double getLabmda() const = 0; + /** @copybrief getLabmda @see getLabmda */ + virtual void setLabmda(double val) = 0; + + //! @brief Parameter of spacial distribution in Bilateral-TV + /** @see setAlpha */ + virtual double getAlpha() const = 0; + /** @copybrief getAlpha @see getAlpha */ + virtual void setAlpha(double val) = 0; + + //! @brief Kernel size of Bilateral-TV filter + /** @see setKernelSize */ + virtual int getKernelSize() const = 0; + /** @copybrief getKernelSize @see getKernelSize */ + virtual void setKernelSize(int val) = 0; + + //! @brief Gaussian blur kernel size + /** @see setBlurKernelSize */ + virtual int getBlurKernelSize() const = 0; + /** @copybrief getBlurKernelSize @see getBlurKernelSize */ + virtual void setBlurKernelSize(int val) = 0; + + //! @brief Gaussian blur sigma + /** @see setBlurSigma */ + virtual double getBlurSigma() const = 0; + /** @copybrief getBlurSigma @see getBlurSigma */ + virtual void setBlurSigma(double val) = 0; + + //! @brief Radius of the temporal search area + /** @see setTemporalAreaRadius */ + virtual int getTemporalAreaRadius() const = 0; + /** @copybrief getTemporalAreaRadius @see getTemporalAreaRadius */ + virtual void setTemporalAreaRadius(int val) = 0; + + //! @brief Dense optical flow algorithm + /** @see setOpticalFlow */ + virtual Ptr getOpticalFlow() const = 0; + /** @copybrief getOpticalFlow @see getOpticalFlow */ + virtual void setOpticalFlow(const Ptr &val) = 0; + + protected: + SuperResolution(); + + virtual void initImpl(Ptr& frameSource) = 0; + virtual void processImpl(Ptr& frameSource, OutputArray output) = 0; + + bool isUmat_; + + private: + Ptr frameSource_; + bool firstCall_; + }; + + /** @brief Create Bilateral TV-L1 Super Resolution. + + This class implements Super Resolution algorithm described in the papers @cite Farsiu03 and + @cite Mitzel09 . + + Here are important members of the class that control the algorithm, which you can set after + constructing the class instance: + + - **int scale** Scale factor. + - **int iterations** Iteration count. + - **double tau** Asymptotic value of steepest descent method. + - **double lambda** Weight parameter to balance data term and smoothness term. + - **double alpha** Parameter of spacial distribution in Bilateral-TV. + - **int btvKernelSize** Kernel size of Bilateral-TV filter. + - **int blurKernelSize** Gaussian blur kernel size. + - **double blurSigma** Gaussian blur sigma. + - **int temporalAreaRadius** Radius of the temporal search area. + - **Ptr\ opticalFlow** Dense optical flow algorithm. + */ + CV_EXPORTS Ptr createSuperResolution_BTVL1(); + CV_EXPORTS Ptr createSuperResolution_BTVL1_CUDA(); + +//! @} superres + + } +} + +#endif // OPENCV_SUPERRES_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/superres/optical_flow.hpp b/third_party/opencv/uos/sw_64/include/opencv2/superres/optical_flow.hpp new file mode 100644 index 00000000..07e7ca9c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/superres/optical_flow.hpp @@ -0,0 +1,203 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_SUPERRES_OPTICAL_FLOW_HPP +#define OPENCV_SUPERRES_OPTICAL_FLOW_HPP + +#include "opencv2/core.hpp" + +namespace cv +{ + namespace superres + { + +//! @addtogroup superres +//! @{ + + class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm + { + public: + virtual void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = noArray()) = 0; + virtual void collectGarbage() = 0; + }; + + + class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + /** @see setPyrScale */ + virtual double getPyrScale() const = 0; + /** @copybrief getPyrScale @see getPyrScale */ + virtual void setPyrScale(double val) = 0; + /** @see setLevelsNumber */ + virtual int getLevelsNumber() const = 0; + /** @copybrief getLevelsNumber @see getLevelsNumber */ + virtual void setLevelsNumber(int val) = 0; + /** @see setWindowSize */ + virtual int getWindowSize() const = 0; + /** @copybrief getWindowSize @see getWindowSize */ + virtual void setWindowSize(int val) = 0; + /** @see setIterations */ + virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + virtual void setIterations(int val) = 0; + /** @see setPolyN */ + virtual int getPolyN() const = 0; + /** @copybrief getPolyN @see getPolyN */ + virtual void setPolyN(int val) = 0; + /** @see setPolySigma */ + virtual double getPolySigma() const = 0; + /** @copybrief getPolySigma @see getPolySigma */ + virtual void setPolySigma(double val) = 0; + /** @see setFlags */ + virtual int getFlags() const = 0; + /** @copybrief getFlags @see getFlags */ + virtual void setFlags(int val) = 0; + }; + CV_EXPORTS Ptr createOptFlow_Farneback(); + CV_EXPORTS Ptr createOptFlow_Farneback_CUDA(); + + +// CV_EXPORTS Ptr createOptFlow_Simple(); + + + class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt + { + public: + /** @see setTau */ + virtual double getTau() const = 0; + /** @copybrief getTau @see getTau */ + virtual void setTau(double val) = 0; + /** @see setLambda */ + virtual double getLambda() const = 0; + /** @copybrief getLambda @see getLambda */ + virtual void setLambda(double val) = 0; + /** @see setTheta */ + virtual double getTheta() const = 0; + /** @copybrief getTheta @see getTheta */ + virtual void setTheta(double val) = 0; + /** @see setScalesNumber */ + virtual int getScalesNumber() const = 0; + /** @copybrief getScalesNumber @see getScalesNumber */ + virtual void setScalesNumber(int val) = 0; + /** @see setWarpingsNumber */ + virtual int getWarpingsNumber() const = 0; + /** @copybrief getWarpingsNumber @see getWarpingsNumber */ + virtual void setWarpingsNumber(int val) = 0; + /** @see setEpsilon */ + virtual double getEpsilon() const = 0; + /** @copybrief getEpsilon @see getEpsilon */ + virtual void setEpsilon(double val) = 0; + /** @see setIterations */ + virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + virtual void setIterations(int val) = 0; + /** @see setUseInitialFlow */ + virtual bool getUseInitialFlow() const = 0; + /** @copybrief getUseInitialFlow @see getUseInitialFlow */ + virtual void setUseInitialFlow(bool val) = 0; + }; + CV_EXPORTS Ptr createOptFlow_DualTVL1(); + CV_EXPORTS Ptr createOptFlow_DualTVL1_CUDA(); + + + class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + //! @brief Flow smoothness + /** @see setAlpha */ + virtual double getAlpha() const = 0; + /** @copybrief getAlpha @see getAlpha */ + virtual void setAlpha(double val) = 0; + //! @brief Gradient constancy importance + /** @see setGamma */ + virtual double getGamma() const = 0; + /** @copybrief getGamma @see getGamma */ + virtual void setGamma(double val) = 0; + //! @brief Pyramid scale factor + /** @see setScaleFactor */ + virtual double getScaleFactor() const = 0; + /** @copybrief getScaleFactor @see getScaleFactor */ + virtual void setScaleFactor(double val) = 0; + //! @brief Number of lagged non-linearity iterations (inner loop) + /** @see setInnerIterations */ + virtual int getInnerIterations() const = 0; + /** @copybrief getInnerIterations @see getInnerIterations */ + virtual void setInnerIterations(int val) = 0; + //! @brief Number of warping iterations (number of pyramid levels) + /** @see setOuterIterations */ + virtual int getOuterIterations() const = 0; + /** @copybrief getOuterIterations @see getOuterIterations */ + virtual void setOuterIterations(int val) = 0; + //! @brief Number of linear system solver iterations + /** @see setSolverIterations */ + virtual int getSolverIterations() const = 0; + /** @copybrief getSolverIterations @see getSolverIterations */ + virtual void setSolverIterations(int val) = 0; + }; + CV_EXPORTS Ptr createOptFlow_Brox_CUDA(); + + + class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt + { + public: + /** @see setWindowSize */ + virtual int getWindowSize() const = 0; + /** @copybrief getWindowSize @see getWindowSize */ + virtual void setWindowSize(int val) = 0; + /** @see setMaxLevel */ + virtual int getMaxLevel() const = 0; + /** @copybrief getMaxLevel @see getMaxLevel */ + virtual void setMaxLevel(int val) = 0; + /** @see setIterations */ + virtual int getIterations() const = 0; + /** @copybrief getIterations @see getIterations */ + virtual void setIterations(int val) = 0; + }; + CV_EXPORTS Ptr createOptFlow_PyrLK_CUDA(); + +//! @} + + } +} + +#endif // OPENCV_SUPERRES_OPTICAL_FLOW_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/video.hpp b/third_party/opencv/uos/sw_64/include/opencv2/video.hpp new file mode 100644 index 00000000..aa644a93 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/video.hpp @@ -0,0 +1,63 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEO_HPP +#define OPENCV_VIDEO_HPP + +/** + @defgroup video Video Analysis + @{ + @defgroup video_motion Motion Analysis + @defgroup video_track Object Tracking + @defgroup video_c C API + @} +*/ + +#include "opencv2/video/tracking.hpp" +#include "opencv2/video/background_segm.hpp" + +#ifndef DISABLE_OPENCV_24_COMPATIBILITY +#include "opencv2/video/tracking_c.h" +#endif + +#endif //OPENCV_VIDEO_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/video/background_segm.hpp b/third_party/opencv/uos/sw_64/include/opencv2/video/background_segm.hpp new file mode 100644 index 00000000..e1dfa15a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/video/background_segm.hpp @@ -0,0 +1,317 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_BACKGROUND_SEGM_HPP +#define OPENCV_BACKGROUND_SEGM_HPP + +#include "opencv2/core.hpp" + +namespace cv +{ + +//! @addtogroup video_motion +//! @{ + +/** @brief Base class for background/foreground segmentation. : + +The class is only used to define the common interface for the whole family of background/foreground +segmentation algorithms. + */ +class CV_EXPORTS_W BackgroundSubtractor : public Algorithm +{ +public: + /** @brief Computes a foreground mask. + + @param image Next video frame. + @param fgmask The output foreground mask as an 8-bit binary image. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + */ + CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) = 0; + + /** @brief Computes a background image. + + @param backgroundImage The output background image. + + @note Sometimes the background image can be very blurry, as it contain the average background + statistics. + */ + CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const = 0; +}; + + +/** @brief Gaussian Mixture-based Background/Foreground Segmentation Algorithm. + +The class implements the Gaussian mixture model background subtraction described in @cite Zivkovic2004 +and @cite Zivkovic2006 . + */ +class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor +{ +public: + /** @brief Returns the number of last frames that affect the background model + */ + CV_WRAP virtual int getHistory() const = 0; + /** @brief Sets the number of last frames that affect the background model + */ + CV_WRAP virtual void setHistory(int history) = 0; + + /** @brief Returns the number of gaussian components in the background model + */ + CV_WRAP virtual int getNMixtures() const = 0; + /** @brief Sets the number of gaussian components in the background model. + + The model needs to be reinitalized to reserve memory. + */ + CV_WRAP virtual void setNMixtures(int nmixtures) = 0;//needs reinitialization! + + /** @brief Returns the "background ratio" parameter of the algorithm + + If a foreground pixel keeps semi-constant value for about backgroundRatio\*history frames, it's + considered background and added to the model as a center of a new component. It corresponds to TB + parameter in the paper. + */ + CV_WRAP virtual double getBackgroundRatio() const = 0; + /** @brief Sets the "background ratio" parameter of the algorithm + */ + CV_WRAP virtual void setBackgroundRatio(double ratio) = 0; + + /** @brief Returns the variance threshold for the pixel-model match + + The main threshold on the squared Mahalanobis distance to decide if the sample is well described by + the background model or not. Related to Cthr from the paper. + */ + CV_WRAP virtual double getVarThreshold() const = 0; + /** @brief Sets the variance threshold for the pixel-model match + */ + CV_WRAP virtual void setVarThreshold(double varThreshold) = 0; + + /** @brief Returns the variance threshold for the pixel-model match used for new mixture component generation + + Threshold for the squared Mahalanobis distance that helps decide when a sample is close to the + existing components (corresponds to Tg in the paper). If a pixel is not close to any component, it + is considered foreground or added as a new component. 3 sigma =\> Tg=3\*3=9 is default. A smaller Tg + value generates more components. A higher Tg value may result in a small number of components but + they can grow too large. + */ + CV_WRAP virtual double getVarThresholdGen() const = 0; + /** @brief Sets the variance threshold for the pixel-model match used for new mixture component generation + */ + CV_WRAP virtual void setVarThresholdGen(double varThresholdGen) = 0; + + /** @brief Returns the initial variance of each gaussian component + */ + CV_WRAP virtual double getVarInit() const = 0; + /** @brief Sets the initial variance of each gaussian component + */ + CV_WRAP virtual void setVarInit(double varInit) = 0; + + CV_WRAP virtual double getVarMin() const = 0; + CV_WRAP virtual void setVarMin(double varMin) = 0; + + CV_WRAP virtual double getVarMax() const = 0; + CV_WRAP virtual void setVarMax(double varMax) = 0; + + /** @brief Returns the complexity reduction threshold + + This parameter defines the number of samples needed to accept to prove the component exists. CT=0.05 + is a default value for all the samples. By setting CT=0 you get an algorithm very similar to the + standard Stauffer&Grimson algorithm. + */ + CV_WRAP virtual double getComplexityReductionThreshold() const = 0; + /** @brief Sets the complexity reduction threshold + */ + CV_WRAP virtual void setComplexityReductionThreshold(double ct) = 0; + + /** @brief Returns the shadow detection flag + + If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorMOG2 for + details. + */ + CV_WRAP virtual bool getDetectShadows() const = 0; + /** @brief Enables or disables shadow detection + */ + CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0; + + /** @brief Returns the shadow value + + Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0 + in the mask always means background, 255 means foreground. + */ + CV_WRAP virtual int getShadowValue() const = 0; + /** @brief Sets the shadow value + */ + CV_WRAP virtual void setShadowValue(int value) = 0; + + /** @brief Returns the shadow threshold + + A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in + the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel + is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara, + *Detecting Moving Shadows...*, IEEE PAMI,2003. + */ + CV_WRAP virtual double getShadowThreshold() const = 0; + /** @brief Sets the shadow threshold + */ + CV_WRAP virtual void setShadowThreshold(double threshold) = 0; + + /** @brief Computes a foreground mask. + + @param image Next video frame. Floating point frame will be used without scaling and should be in range \f$[0,255]\f$. + @param fgmask The output foreground mask as an 8-bit binary image. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + */ + CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; +}; + +/** @brief Creates MOG2 Background Subtractor + +@param history Length of the history. +@param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model +to decide whether a pixel is well described by the background model. This parameter does not +affect the background update. +@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the +speed a bit, so if you do not need this feature, set the parameter to false. + */ +CV_EXPORTS_W Ptr + createBackgroundSubtractorMOG2(int history=500, double varThreshold=16, + bool detectShadows=true); + +/** @brief K-nearest neighbours - based Background/Foreground Segmentation Algorithm. + +The class implements the K-nearest neighbours background subtraction described in @cite Zivkovic2006 . +Very efficient if number of foreground pixels is low. + */ +class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor +{ +public: + /** @brief Returns the number of last frames that affect the background model + */ + CV_WRAP virtual int getHistory() const = 0; + /** @brief Sets the number of last frames that affect the background model + */ + CV_WRAP virtual void setHistory(int history) = 0; + + /** @brief Returns the number of data samples in the background model + */ + CV_WRAP virtual int getNSamples() const = 0; + /** @brief Sets the number of data samples in the background model. + + The model needs to be reinitalized to reserve memory. + */ + CV_WRAP virtual void setNSamples(int _nN) = 0;//needs reinitialization! + + /** @brief Returns the threshold on the squared distance between the pixel and the sample + + The threshold on the squared distance between the pixel and the sample to decide whether a pixel is + close to a data sample. + */ + CV_WRAP virtual double getDist2Threshold() const = 0; + /** @brief Sets the threshold on the squared distance + */ + CV_WRAP virtual void setDist2Threshold(double _dist2Threshold) = 0; + + /** @brief Returns the number of neighbours, the k in the kNN. + + K is the number of samples that need to be within dist2Threshold in order to decide that that + pixel is matching the kNN background model. + */ + CV_WRAP virtual int getkNNSamples() const = 0; + /** @brief Sets the k in the kNN. How many nearest neighbours need to match. + */ + CV_WRAP virtual void setkNNSamples(int _nkNN) = 0; + + /** @brief Returns the shadow detection flag + + If true, the algorithm detects shadows and marks them. See createBackgroundSubtractorKNN for + details. + */ + CV_WRAP virtual bool getDetectShadows() const = 0; + /** @brief Enables or disables shadow detection + */ + CV_WRAP virtual void setDetectShadows(bool detectShadows) = 0; + + /** @brief Returns the shadow value + + Shadow value is the value used to mark shadows in the foreground mask. Default value is 127. Value 0 + in the mask always means background, 255 means foreground. + */ + CV_WRAP virtual int getShadowValue() const = 0; + /** @brief Sets the shadow value + */ + CV_WRAP virtual void setShadowValue(int value) = 0; + + /** @brief Returns the shadow threshold + + A shadow is detected if pixel is a darker version of the background. The shadow threshold (Tau in + the paper) is a threshold defining how much darker the shadow can be. Tau= 0.5 means that if a pixel + is more than twice darker then it is not shadow. See Prati, Mikic, Trivedi and Cucchiara, + *Detecting Moving Shadows...*, IEEE PAMI,2003. + */ + CV_WRAP virtual double getShadowThreshold() const = 0; + /** @brief Sets the shadow threshold + */ + CV_WRAP virtual void setShadowThreshold(double threshold) = 0; +}; + +/** @brief Creates KNN Background Subtractor + +@param history Length of the history. +@param dist2Threshold Threshold on the squared distance between the pixel and the sample to decide +whether a pixel is close to that sample. This parameter does not affect the background update. +@param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the +speed a bit, so if you do not need this feature, set the parameter to false. + */ +CV_EXPORTS_W Ptr + createBackgroundSubtractorKNN(int history=500, double dist2Threshold=400.0, + bool detectShadows=true); + +//! @} video_motion + +} // cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/video/tracking.hpp b/third_party/opencv/uos/sw_64/include/opencv2/video/tracking.hpp new file mode 100644 index 00000000..9f344be2 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/video/tracking.hpp @@ -0,0 +1,654 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_TRACKING_HPP +#define OPENCV_TRACKING_HPP + +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" + +namespace cv +{ + +//! @addtogroup video_track +//! @{ + +enum { OPTFLOW_USE_INITIAL_FLOW = 4, + OPTFLOW_LK_GET_MIN_EIGENVALS = 8, + OPTFLOW_FARNEBACK_GAUSSIAN = 256 + }; + +/** @brief Finds an object center, size, and orientation. + +@param probImage Back projection of the object histogram. See calcBackProject. +@param window Initial search window. +@param criteria Stop criteria for the underlying meanShift. +returns +(in old interfaces) Number of iterations CAMSHIFT took to converge +The function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an +object center using meanShift and then adjusts the window size and finds the optimal rotation. The +function returns the rotated rectangle structure that includes the object position, size, and +orientation. The next position of the search window can be obtained with RotatedRect::boundingRect() + +See the OpenCV sample camshiftdemo.c that tracks colored objects. + +@note +- (Python) A sample explaining the camshift tracking algorithm can be found at + opencv_source_code/samples/python/camshift.py + */ +CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window, + TermCriteria criteria ); +/** @example samples/cpp/camshiftdemo.cpp +An example using the mean-shift tracking algorithm +*/ + +/** @brief Finds an object on a back projection image. + +@param probImage Back projection of the object histogram. See calcBackProject for details. +@param window Initial search window. +@param criteria Stop criteria for the iterative search algorithm. +returns +: Number of iterations CAMSHIFT took to converge. +The function implements the iterative object search algorithm. It takes the input back projection of +an object and the initial position. The mass center in window of the back projection image is +computed and the search window center shifts to the mass center. The procedure is repeated until the +specified number of iterations criteria.maxCount is done or until the window center shifts by less +than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search +window size or orientation do not change during the search. You can simply pass the output of +calcBackProject to this function. But better results can be obtained if you pre-filter the back +projection and remove the noise. For example, you can do this by retrieving connected components +with findContours , throwing away contours with small area ( contourArea ), and rendering the +remaining contours with drawContours. + + */ +CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria ); + +/** @brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK. + +@param img 8-bit input image. +@param pyramid output pyramid. +@param winSize window size of optical flow algorithm. Must be not less than winSize argument of +calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels. +@param maxLevel 0-based maximal pyramid level number. +@param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is +constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally. +@param pyrBorder the border mode for pyramid layers. +@param derivBorder the border mode for gradients. +@param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false +to force data copying. +@return number of levels in constructed pyramid. Can be less than maxLevel. + */ +CV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays pyramid, + Size winSize, int maxLevel, bool withDerivatives = true, + int pyrBorder = BORDER_REFLECT_101, + int derivBorder = BORDER_CONSTANT, + bool tryReuseInputImage = true ); + +/** @example samples/cpp/lkdemo.cpp +An example using the Lucas-Kanade optical flow algorithm +*/ + +/** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with +pyramids. + +@param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid. +@param nextImg second input image or pyramid of the same size and the same type as prevImg. +@param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be +single-precision floating-point numbers. +@param nextPts output vector of 2D points (with single-precision floating-point coordinates) +containing the calculated new positions of input features in the second image; when +OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input. +@param status output status vector (of unsigned chars); each element of the vector is set to 1 if +the flow for the corresponding features has been found, otherwise, it is set to 0. +@param err output vector of errors; each element of the vector is set to an error for the +corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't +found then the error is not defined (use the status parameter to find such cases). +@param winSize size of the search window at each pyramid level. +@param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single +level), if set to 1, two levels are used, and so on; if pyramids are passed to input then +algorithm will use as many levels as pyramids have but no more than maxLevel. +@param criteria parameter, specifying the termination criteria of the iterative search algorithm +(after the specified maximum number of iterations criteria.maxCount or when the search window +moves by less than criteria.epsilon. +@param flags operation flags: + - **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is + not set, then prevPts is copied to nextPts and is considered the initial estimate. + - **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see + minEigThreshold description); if the flag is not set, then L1 distance between patches + around the original and a moved point, divided by number of pixels in a window, is used as a + error measure. +@param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of +optical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided +by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding +feature is filtered out and its flow is not processed, so it allows to remove bad points and get a +performance boost. + +The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See +@cite Bouguet00 . The function is parallelized with the TBB library. + +@note + +- An example using the Lucas-Kanade optical flow algorithm can be found at + opencv_source_code/samples/cpp/lkdemo.cpp +- (Python) An example using the Lucas-Kanade optical flow algorithm can be found at + opencv_source_code/samples/python/lk_track.py +- (Python) An example using the Lucas-Kanade tracker for homography matching can be found at + opencv_source_code/samples/python/lk_homography.py + */ +CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, + InputArray prevPts, InputOutputArray nextPts, + OutputArray status, OutputArray err, + Size winSize = Size(21,21), int maxLevel = 3, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), + int flags = 0, double minEigThreshold = 1e-4 ); + +/** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm. + +@param prev first 8-bit single-channel input image. +@param next second input image of the same size and the same type as prev. +@param flow computed flow image that has the same size as prev and type CV_32FC2. +@param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image; +pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous +one. +@param levels number of pyramid layers including the initial image; levels=1 means that no extra +layers are created and only the original images are used. +@param winsize averaging window size; larger values increase the algorithm robustness to image +noise and give more chances for fast motion detection, but yield more blurred motion field. +@param iterations number of iterations the algorithm does at each pyramid level. +@param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel; +larger values mean that the image will be approximated with smoother surfaces, yielding more +robust algorithm and more blurred motion field, typically poly_n =5 or 7. +@param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a +basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a +good value would be poly_sigma=1.5. +@param flags operation flags that can be a combination of the following: + - **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation. + - **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$ + filter instead of a box filter of the same size for optical flow estimation; usually, this + option gives z more accurate flow than with a box filter, at the cost of lower speed; + normally, winsize for a Gaussian window should be set to a larger value to achieve the same + level of robustness. + +The function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that + +\f[\texttt{prev} (y,x) \sim \texttt{next} ( y + \texttt{flow} (y,x)[1], x + \texttt{flow} (y,x)[0])\f] + +@note + +- An example using the optical flow algorithm described by Gunnar Farneback can be found at + opencv_source_code/samples/cpp/fback.cpp +- (Python) An example using the optical flow algorithm described by Gunnar Farneback can be + found at opencv_source_code/samples/python/opt_flow.py + */ +CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow, + double pyr_scale, int levels, int winsize, + int iterations, int poly_n, double poly_sigma, + int flags ); + +/** @brief Computes an optimal affine transformation between two 2D point sets. + +@param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat. +@param dst Second input 2D point set of the same size and the same type as A, or another image. +@param fullAffine If true, the function finds an optimal affine transformation with no additional +restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is +limited to combinations of translation, rotation, and uniform scaling (4 degrees of freedom). + +The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that +approximates best the affine transformation between: + +* Two point sets +* Two raster images. In this case, the function first finds some features in the src image and + finds the corresponding features in dst image. After that, the problem is reduced to the first + case. +In case of point sets, the problem is formulated as follows: you need to find a 2x2 matrix *A* and +2x1 vector *b* so that: + +\f[[A^*|b^*] = arg \min _{[A|b]} \sum _i \| \texttt{dst}[i] - A { \texttt{src}[i]}^T - b \| ^2\f] +where src[i] and dst[i] are the i-th points in src and dst, respectively +\f$[A|b]\f$ can be either arbitrary (when fullAffine=true ) or have a form of +\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f] +when fullAffine=false. + +@sa +estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography + */ +CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine); +CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine, int ransacMaxIters, double ransacGoodRatio, + int ransacSize0); + + +enum +{ + MOTION_TRANSLATION = 0, + MOTION_EUCLIDEAN = 1, + MOTION_AFFINE = 2, + MOTION_HOMOGRAPHY = 3 +}; + +/** @brief Computes the Enhanced Correlation Coefficient value between two images @cite EP08 . + +@param templateImage single-channel template image; CV_8U or CV_32F array. +@param inputImage single-channel input image to be warped to provide an image similar to + templateImage, same type as templateImage. +@param inputMask An optional mask to indicate valid values of inputImage. + +@sa +findTransformECC + */ + +CV_EXPORTS_W double computeECC(InputArray templateImage, InputArray inputImage, InputArray inputMask = noArray()); + +/** @example samples/cpp/image_alignment.cpp +An example using the image alignment ECC algorithm +*/ + +/** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 . + +@param templateImage single-channel template image; CV_8U or CV_32F array. +@param inputImage single-channel input image which should be warped with the final warpMatrix in +order to provide an image similar to templateImage, same type as templateImage. +@param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp). +@param motionType parameter, specifying the type of motion: + - **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with + the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being + estimated. + - **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three + parameters are estimated; warpMatrix is \f$2\times 3\f$. + - **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated; + warpMatrix is \f$2\times 3\f$. + - **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are + estimated;\`warpMatrix\` is \f$3\times 3\f$. +@param criteria parameter, specifying the termination criteria of the ECC algorithm; +criteria.epsilon defines the threshold of the increment in the correlation coefficient between two +iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion). +Default values are shown in the declaration above. +@param inputMask An optional mask to indicate valid values of inputImage. +@param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5) + +The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion +(@cite EP08), that is + +\f[\texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\f] + +where + +\f[\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\f] + +(the equation holds with homogeneous coordinates for homography). It returns the final enhanced +correlation coefficient, that is the correlation coefficient between the template image and the +final warped input image. When a \f$3\times 3\f$ matrix is given with motionType =0, 1 or 2, the third +row is ignored. + +Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an +area-based alignment that builds on intensity similarities. In essence, the function updates the +initial transformation that roughly aligns the images. If this information is missing, the identity +warp (unity matrix) is used as an initialization. Note that if images undergo strong +displacements/rotations, an initial transformation that roughly aligns the images is necessary +(e.g., a simple euclidean/similarity transform that allows for the images showing the same image +content approximately). Use inverse warping in the second image to take an image close to the first +one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV +sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws +an exception if algorithm does not converges. + +@sa +computeECC, estimateAffine2D, estimateAffinePartial2D, findHomography + */ +CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray inputImage, + InputOutputArray warpMatrix, int motionType, + TermCriteria criteria, + InputArray inputMask, int gaussFiltSize); + +/** @overload */ +CV_EXPORTS +double findTransformECC(InputArray templateImage, InputArray inputImage, + InputOutputArray warpMatrix, int motionType = MOTION_AFFINE, + TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001), + InputArray inputMask = noArray()); + +/** @example samples/cpp/kalman.cpp +An example using the standard Kalman filter +*/ + +/** @brief Kalman filter class. + +The class implements a standard Kalman filter , +@cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get +an extended Kalman filter functionality. +@note In C API when CvKalman\* kalmanFilter structure is not needed anymore, it should be released +with cvReleaseKalman(&kalmanFilter) + */ +class CV_EXPORTS_W KalmanFilter +{ +public: + CV_WRAP KalmanFilter(); + /** @overload + @param dynamParams Dimensionality of the state. + @param measureParams Dimensionality of the measurement. + @param controlParams Dimensionality of the control vector. + @param type Type of the created matrices that should be CV_32F or CV_64F. + */ + CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F ); + + /** @brief Re-initializes Kalman filter. The previous content is destroyed. + + @param dynamParams Dimensionality of the state. + @param measureParams Dimensionality of the measurement. + @param controlParams Dimensionality of the control vector. + @param type Type of the created matrices that should be CV_32F or CV_64F. + */ + void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F ); + + /** @brief Computes a predicted state. + + @param control The optional input control + */ + CV_WRAP const Mat& predict( const Mat& control = Mat() ); + + /** @brief Updates the predicted state from the measurement. + + @param measurement The measured system parameters + */ + CV_WRAP const Mat& correct( const Mat& measurement ); + + CV_PROP_RW Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k) + CV_PROP_RW Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) + CV_PROP_RW Mat transitionMatrix; //!< state transition matrix (A) + CV_PROP_RW Mat controlMatrix; //!< control matrix (B) (not used if there is no control) + CV_PROP_RW Mat measurementMatrix; //!< measurement matrix (H) + CV_PROP_RW Mat processNoiseCov; //!< process noise covariance matrix (Q) + CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R) + CV_PROP_RW Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/ + CV_PROP_RW Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R) + CV_PROP_RW Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) + + // temporary matrices + Mat temp1; + Mat temp2; + Mat temp3; + Mat temp4; + Mat temp5; +}; + + +class CV_EXPORTS_W DenseOpticalFlow : public Algorithm +{ +public: + /** @brief Calculates an optical flow. + + @param I0 first 8-bit single-channel input image. + @param I1 second input image of the same size and the same type as prev. + @param flow computed flow image that has the same size as prev and type CV_32FC2. + */ + CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0; + /** @brief Releases all inner buffers. + */ + CV_WRAP virtual void collectGarbage() = 0; +}; + +/** @brief Base interface for sparse optical flow algorithms. + */ +class CV_EXPORTS_W SparseOpticalFlow : public Algorithm +{ +public: + /** @brief Calculates a sparse optical flow. + + @param prevImg First input image. + @param nextImg Second input image of the same size and the same type as prevImg. + @param prevPts Vector of 2D points for which the flow needs to be found. + @param nextPts Output vector of 2D points containing the calculated new positions of input features in the second image. + @param status Output status vector. Each element of the vector is set to 1 if the + flow for the corresponding features has been found. Otherwise, it is set to 0. + @param err Optional output vector that contains error response for each point (inverse confidence). + */ + CV_WRAP virtual void calc(InputArray prevImg, InputArray nextImg, + InputArray prevPts, InputOutputArray nextPts, + OutputArray status, + OutputArray err = cv::noArray()) = 0; +}; + +/** @brief "Dual TV L1" Optical Flow Algorithm. + +The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and +@cite Javier2012 . +Here are important members of the class that control the algorithm, which you can set after +constructing the class instance: + +- member double tau + Time step of the numerical scheme. + +- member double lambda + Weight parameter for the data term, attachment parameter. This is the most relevant + parameter, which determines the smoothness of the output. The smaller this parameter is, + the smoother the solutions we obtain. It depends on the range of motions of the images, so + its value should be adapted to each image sequence. + +- member double theta + Weight parameter for (u - v)\^2, tightness parameter. It serves as a link between the + attachment and the regularization terms. In theory, it should have a small value in order + to maintain both parts in correspondence. The method is stable for a large range of values + of this parameter. + +- member int nscales + Number of scales used to create the pyramid of images. + +- member int warps + Number of warpings per scale. Represents the number of times that I1(x+u0) and grad( + I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the + method. It also affects the running time, so it is a compromise between speed and + accuracy. + +- member double epsilon + Stopping criterion threshold used in the numerical scheme, which is a trade-off between + precision and running time. A small value will yield more accurate solutions at the + expense of a slower convergence. + +- member int iterations + Stopping criterion iterations number used in the numerical scheme. + +C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". +Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". +*/ +class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow +{ +public: + //! @brief Time step of the numerical scheme + /** @see setTau */ + CV_WRAP virtual double getTau() const = 0; + /** @copybrief getTau @see getTau */ + CV_WRAP virtual void setTau(double val) = 0; + //! @brief Weight parameter for the data term, attachment parameter + /** @see setLambda */ + CV_WRAP virtual double getLambda() const = 0; + /** @copybrief getLambda @see getLambda */ + CV_WRAP virtual void setLambda(double val) = 0; + //! @brief Weight parameter for (u - v)^2, tightness parameter + /** @see setTheta */ + CV_WRAP virtual double getTheta() const = 0; + /** @copybrief getTheta @see getTheta */ + CV_WRAP virtual void setTheta(double val) = 0; + //! @brief coefficient for additional illumination variation term + /** @see setGamma */ + CV_WRAP virtual double getGamma() const = 0; + /** @copybrief getGamma @see getGamma */ + CV_WRAP virtual void setGamma(double val) = 0; + //! @brief Number of scales used to create the pyramid of images + /** @see setScalesNumber */ + CV_WRAP virtual int getScalesNumber() const = 0; + /** @copybrief getScalesNumber @see getScalesNumber */ + CV_WRAP virtual void setScalesNumber(int val) = 0; + //! @brief Number of warpings per scale + /** @see setWarpingsNumber */ + CV_WRAP virtual int getWarpingsNumber() const = 0; + /** @copybrief getWarpingsNumber @see getWarpingsNumber */ + CV_WRAP virtual void setWarpingsNumber(int val) = 0; + //! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time + /** @see setEpsilon */ + CV_WRAP virtual double getEpsilon() const = 0; + /** @copybrief getEpsilon @see getEpsilon */ + CV_WRAP virtual void setEpsilon(double val) = 0; + //! @brief Inner iterations (between outlier filtering) used in the numerical scheme + /** @see setInnerIterations */ + CV_WRAP virtual int getInnerIterations() const = 0; + /** @copybrief getInnerIterations @see getInnerIterations */ + CV_WRAP virtual void setInnerIterations(int val) = 0; + //! @brief Outer iterations (number of inner loops) used in the numerical scheme + /** @see setOuterIterations */ + CV_WRAP virtual int getOuterIterations() const = 0; + /** @copybrief getOuterIterations @see getOuterIterations */ + CV_WRAP virtual void setOuterIterations(int val) = 0; + //! @brief Use initial flow + /** @see setUseInitialFlow */ + CV_WRAP virtual bool getUseInitialFlow() const = 0; + /** @copybrief getUseInitialFlow @see getUseInitialFlow */ + CV_WRAP virtual void setUseInitialFlow(bool val) = 0; + //! @brief Step between scales (<1) + /** @see setScaleStep */ + CV_WRAP virtual double getScaleStep() const = 0; + /** @copybrief getScaleStep @see getScaleStep */ + CV_WRAP virtual void setScaleStep(double val) = 0; + //! @brief Median filter kernel size (1 = no filter) (3 or 5) + /** @see setMedianFiltering */ + CV_WRAP virtual int getMedianFiltering() const = 0; + /** @copybrief getMedianFiltering @see getMedianFiltering */ + CV_WRAP virtual void setMedianFiltering(int val) = 0; + + /** @brief Creates instance of cv::DualTVL1OpticalFlow*/ + CV_WRAP static Ptr create( + double tau = 0.25, + double lambda = 0.15, + double theta = 0.3, + int nscales = 5, + int warps = 5, + double epsilon = 0.01, + int innnerIterations = 30, + int outerIterations = 10, + double scaleStep = 0.8, + double gamma = 0.0, + int medianFiltering = 5, + bool useInitialFlow = false); +}; + +/** @brief Creates instance of cv::DenseOpticalFlow +*/ +CV_EXPORTS_W Ptr createOptFlow_DualTVL1(); + +/** @brief Class computing a dense optical flow using the Gunnar Farneback's algorithm. + */ +class CV_EXPORTS_W FarnebackOpticalFlow : public DenseOpticalFlow +{ +public: + CV_WRAP virtual int getNumLevels() const = 0; + CV_WRAP virtual void setNumLevels(int numLevels) = 0; + + CV_WRAP virtual double getPyrScale() const = 0; + CV_WRAP virtual void setPyrScale(double pyrScale) = 0; + + CV_WRAP virtual bool getFastPyramids() const = 0; + CV_WRAP virtual void setFastPyramids(bool fastPyramids) = 0; + + CV_WRAP virtual int getWinSize() const = 0; + CV_WRAP virtual void setWinSize(int winSize) = 0; + + CV_WRAP virtual int getNumIters() const = 0; + CV_WRAP virtual void setNumIters(int numIters) = 0; + + CV_WRAP virtual int getPolyN() const = 0; + CV_WRAP virtual void setPolyN(int polyN) = 0; + + CV_WRAP virtual double getPolySigma() const = 0; + CV_WRAP virtual void setPolySigma(double polySigma) = 0; + + CV_WRAP virtual int getFlags() const = 0; + CV_WRAP virtual void setFlags(int flags) = 0; + + CV_WRAP static Ptr create( + int numLevels = 5, + double pyrScale = 0.5, + bool fastPyramids = false, + int winSize = 13, + int numIters = 10, + int polyN = 5, + double polySigma = 1.1, + int flags = 0); +}; + + +/** @brief Class used for calculating a sparse optical flow. + +The class can calculate an optical flow for a sparse feature set using the +iterative Lucas-Kanade method with pyramids. + +@sa calcOpticalFlowPyrLK + +*/ +class CV_EXPORTS_W SparsePyrLKOpticalFlow : public SparseOpticalFlow +{ +public: + CV_WRAP virtual Size getWinSize() const = 0; + CV_WRAP virtual void setWinSize(Size winSize) = 0; + + CV_WRAP virtual int getMaxLevel() const = 0; + CV_WRAP virtual void setMaxLevel(int maxLevel) = 0; + + CV_WRAP virtual TermCriteria getTermCriteria() const = 0; + CV_WRAP virtual void setTermCriteria(TermCriteria& crit) = 0; + + CV_WRAP virtual int getFlags() const = 0; + CV_WRAP virtual void setFlags(int flags) = 0; + + CV_WRAP virtual double getMinEigThreshold() const = 0; + CV_WRAP virtual void setMinEigThreshold(double minEigThreshold) = 0; + + CV_WRAP static Ptr create( + Size winSize = Size(21, 21), + int maxLevel = 3, TermCriteria crit = + TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), + int flags = 0, + double minEigThreshold = 1e-4); +}; + +//! @} video_track + +} // cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/video/tracking_c.h b/third_party/opencv/uos/sw_64/include/opencv2/video/tracking_c.h new file mode 100644 index 00000000..3e32fbd0 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/video/tracking_c.h @@ -0,0 +1,232 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_TRACKING_C_H +#define OPENCV_TRACKING_C_H + +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @addtogroup video_c + @{ +*/ + +/****************************************************************************************\ +* Motion Analysis * +\****************************************************************************************/ + +/************************************ optical flow ***************************************/ + +#define CV_LKFLOW_PYR_A_READY 1 +#define CV_LKFLOW_PYR_B_READY 2 +#define CV_LKFLOW_INITIAL_GUESSES 4 +#define CV_LKFLOW_GET_MIN_EIGENVALS 8 + +/* It is Lucas & Kanade method, modified to use pyramids. + Also it does several iterations to get optical flow for + every point at every pyramid level. + Calculates optical flow between two images for certain set of points (i.e. + it is a "sparse" optical flow, which is opposite to the previous 3 methods) */ +CVAPI(void) cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr, + CvArr* prev_pyr, CvArr* curr_pyr, + const CvPoint2D32f* prev_features, + CvPoint2D32f* curr_features, + int count, + CvSize win_size, + int level, + char* status, + float* track_error, + CvTermCriteria criteria, + int flags ); + + +/* Modification of a previous sparse optical flow algorithm to calculate + affine flow */ +CVAPI(void) cvCalcAffineFlowPyrLK( const CvArr* prev, const CvArr* curr, + CvArr* prev_pyr, CvArr* curr_pyr, + const CvPoint2D32f* prev_features, + CvPoint2D32f* curr_features, + float* matrices, int count, + CvSize win_size, int level, + char* status, float* track_error, + CvTermCriteria criteria, int flags ); + +/* Estimate rigid transformation between 2 images or 2 point sets */ +CVAPI(int) cvEstimateRigidTransform( const CvArr* A, const CvArr* B, + CvMat* M, int full_affine ); + +/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */ +CVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next, + CvArr* flow, double pyr_scale, int levels, + int winsize, int iterations, int poly_n, + double poly_sigma, int flags ); + +/********************************* motion templates *************************************/ + +/****************************************************************************************\ +* All the motion template functions work only with single channel images. * +* Silhouette image must have depth IPL_DEPTH_8U or IPL_DEPTH_8S * +* Motion history image must have depth IPL_DEPTH_32F, * +* Gradient mask - IPL_DEPTH_8U or IPL_DEPTH_8S, * +* Motion orientation image - IPL_DEPTH_32F * +* Segmentation mask - IPL_DEPTH_32F * +* All the angles are in degrees, all the times are in milliseconds * +\****************************************************************************************/ + +/* Updates motion history image given motion silhouette */ +CVAPI(void) cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi, + double timestamp, double duration ); + +/* Calculates gradient of the motion history image and fills + a mask indicating where the gradient is valid */ +CVAPI(void) cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation, + double delta1, double delta2, + int aperture_size CV_DEFAULT(3)); + +/* Calculates average motion direction within a selected motion region + (region can be selected by setting ROIs and/or by composing a valid gradient mask + with the region mask) */ +CVAPI(double) cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask, + const CvArr* mhi, double timestamp, + double duration ); + +/* Splits a motion history image into a few parts corresponding to separate independent motions + (e.g. left hand, right hand) */ +CVAPI(CvSeq*) cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, + CvMemStorage* storage, + double timestamp, double seg_thresh ); + +/****************************************************************************************\ +* Tracking * +\****************************************************************************************/ + +/* Implements CAMSHIFT algorithm - determines object position, size and orientation + from the object histogram back project (extension of meanshift) */ +CVAPI(int) cvCamShift( const CvArr* prob_image, CvRect window, + CvTermCriteria criteria, CvConnectedComp* comp, + CvBox2D* box CV_DEFAULT(NULL) ); + +/* Implements MeanShift algorithm - determines object position + from the object histogram back project */ +CVAPI(int) cvMeanShift( const CvArr* prob_image, CvRect window, + CvTermCriteria criteria, CvConnectedComp* comp ); + +/* +standard Kalman filter (in G. Welch' and G. Bishop's notation): + + x(k)=A*x(k-1)+B*u(k)+w(k) p(w)~N(0,Q) + z(k)=H*x(k)+v(k), p(v)~N(0,R) +*/ +typedef struct CvKalman +{ + int MP; /* number of measurement vector dimensions */ + int DP; /* number of state vector dimensions */ + int CP; /* number of control vector dimensions */ + + /* backward compatibility fields */ +#if 1 + float* PosterState; /* =state_pre->data.fl */ + float* PriorState; /* =state_post->data.fl */ + float* DynamMatr; /* =transition_matrix->data.fl */ + float* MeasurementMatr; /* =measurement_matrix->data.fl */ + float* MNCovariance; /* =measurement_noise_cov->data.fl */ + float* PNCovariance; /* =process_noise_cov->data.fl */ + float* KalmGainMatr; /* =gain->data.fl */ + float* PriorErrorCovariance;/* =error_cov_pre->data.fl */ + float* PosterErrorCovariance;/* =error_cov_post->data.fl */ + float* Temp1; /* temp1->data.fl */ + float* Temp2; /* temp2->data.fl */ +#endif + + CvMat* state_pre; /* predicted state (x'(k)): + x(k)=A*x(k-1)+B*u(k) */ + CvMat* state_post; /* corrected state (x(k)): + x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) */ + CvMat* transition_matrix; /* state transition matrix (A) */ + CvMat* control_matrix; /* control matrix (B) + (it is not used if there is no control)*/ + CvMat* measurement_matrix; /* measurement matrix (H) */ + CvMat* process_noise_cov; /* process noise covariance matrix (Q) */ + CvMat* measurement_noise_cov; /* measurement noise covariance matrix (R) */ + CvMat* error_cov_pre; /* priori error estimate covariance matrix (P'(k)): + P'(k)=A*P(k-1)*At + Q)*/ + CvMat* gain; /* Kalman gain matrix (K(k)): + K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)*/ + CvMat* error_cov_post; /* posteriori error estimate covariance matrix (P(k)): + P(k)=(I-K(k)*H)*P'(k) */ + CvMat* temp1; /* temporary matrices */ + CvMat* temp2; + CvMat* temp3; + CvMat* temp4; + CvMat* temp5; +} CvKalman; + +/* Creates Kalman filter and sets A, B, Q, R and state to some initial values */ +CVAPI(CvKalman*) cvCreateKalman( int dynam_params, int measure_params, + int control_params CV_DEFAULT(0)); + +/* Releases Kalman filter state */ +CVAPI(void) cvReleaseKalman( CvKalman** kalman); + +/* Updates Kalman filter by time (predicts future state of the system) */ +CVAPI(const CvMat*) cvKalmanPredict( CvKalman* kalman, + const CvMat* control CV_DEFAULT(NULL)); + +/* Updates Kalman filter by measurement + (corrects state of the system and internal matrices) */ +CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement ); + +#define cvKalmanUpdateByTime cvKalmanPredict +#define cvKalmanUpdateByMeasurement cvKalmanCorrect + +/** @} video_c */ + +#ifdef __cplusplus +} // extern "C" +#endif + + +#endif // OPENCV_TRACKING_C_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/video/video.hpp b/third_party/opencv/uos/sw_64/include/opencv2/video/video.hpp new file mode 100644 index 00000000..8267b85d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/video/video.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/video.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videoio.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videoio.hpp new file mode 100644 index 00000000..aa247dd8 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videoio.hpp @@ -0,0 +1,1006 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOIO_HPP +#define OPENCV_VIDEOIO_HPP + +#include "opencv2/core.hpp" + +/** + @defgroup videoio Video I/O + + @brief Read and write video or images sequence with OpenCV + + ### See also: + - @ref videoio_overview + - Tutorials: @ref tutorial_table_of_content_videoio + @{ + @defgroup videoio_flags_base Flags for video I/O + @defgroup videoio_flags_others Additional flags for video I/O API backends + @defgroup videoio_c C API for video I/O + @defgroup videoio_ios iOS glue for video I/O + @defgroup videoio_winrt WinRT glue for video I/O + @defgroup videoio_registry Query I/O API backends registry + @} +*/ + +////////////////////////////////// video io ///////////////////////////////// + +typedef struct CvCapture CvCapture; +typedef struct CvVideoWriter CvVideoWriter; + +namespace cv +{ + +//! @addtogroup videoio +//! @{ + +//! @addtogroup videoio_flags_base +//! @{ + + +/** @brief %VideoCapture API backends identifier. + +Select preferred API for a capture object. +To be used in the VideoCapture::VideoCapture() constructor or VideoCapture::open() + +@note Backends are available only if they have been built with your OpenCV binaries. +See @ref videoio_overview for more information. +*/ +enum VideoCaptureAPIs { + CAP_ANY = 0, //!< Auto detect == 0 + CAP_VFW = 200, //!< Video For Windows (platform native) + CAP_V4L = 200, //!< V4L/V4L2 capturing support via libv4l + CAP_V4L2 = CAP_V4L, //!< Same as CAP_V4L + CAP_FIREWIRE = 300, //!< IEEE 1394 drivers + CAP_FIREWARE = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE + CAP_IEEE1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE + CAP_DC1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE + CAP_CMU1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE + CAP_QT = 500, //!< QuickTime + CAP_UNICAP = 600, //!< Unicap drivers + CAP_DSHOW = 700, //!< DirectShow (via videoInput) + CAP_PVAPI = 800, //!< PvAPI, Prosilica GigE SDK + CAP_OPENNI = 900, //!< OpenNI (for Kinect) + CAP_OPENNI_ASUS = 910, //!< OpenNI (for Asus Xtion) + CAP_ANDROID = 1000, //!< Android - not used + CAP_XIAPI = 1100, //!< XIMEA Camera API + CAP_AVFOUNDATION = 1200, //!< AVFoundation framework for iOS (OS X Lion will have the same API) + CAP_GIGANETIX = 1300, //!< Smartek Giganetix GigEVisionSDK + CAP_MSMF = 1400, //!< Microsoft Media Foundation (via videoInput) + CAP_WINRT = 1410, //!< Microsoft Windows Runtime using Media Foundation + CAP_INTELPERC = 1500, //!< Intel Perceptual Computing SDK + CAP_OPENNI2 = 1600, //!< OpenNI2 (for Kinect) + CAP_OPENNI2_ASUS = 1610, //!< OpenNI2 (for Asus Xtion and Occipital Structure sensors) + CAP_GPHOTO2 = 1700, //!< gPhoto2 connection + CAP_GSTREAMER = 1800, //!< GStreamer + CAP_FFMPEG = 1900, //!< Open and record video file or stream using the FFMPEG library + CAP_IMAGES = 2000, //!< OpenCV Image Sequence (e.g. img_%02d.jpg) + CAP_ARAVIS = 2100, //!< Aravis SDK + CAP_OPENCV_MJPEG = 2200, //!< Built-in OpenCV MotionJPEG codec + CAP_INTEL_MFX = 2300, //!< Intel MediaSDK + CAP_XINE = 2400, //!< XINE engine (Linux) + }; + +/** @brief %VideoCapture generic properties identifier. + + Reading / writing properties involves many layers. Some unexpected result might happens along this chain. + Effective behaviour depends from device hardware, driver and API Backend. + @sa videoio_flags_others, VideoCapture::get(), VideoCapture::set() +*/ +enum VideoCaptureProperties { + CAP_PROP_POS_MSEC =0, //!< Current position of the video file in milliseconds. + CAP_PROP_POS_FRAMES =1, //!< 0-based index of the frame to be decoded/captured next. + CAP_PROP_POS_AVI_RATIO =2, //!< Relative position of the video file: 0=start of the film, 1=end of the film. + CAP_PROP_FRAME_WIDTH =3, //!< Width of the frames in the video stream. + CAP_PROP_FRAME_HEIGHT =4, //!< Height of the frames in the video stream. + CAP_PROP_FPS =5, //!< Frame rate. + CAP_PROP_FOURCC =6, //!< 4-character code of codec. see VideoWriter::fourcc . + CAP_PROP_FRAME_COUNT =7, //!< Number of frames in the video file. + CAP_PROP_FORMAT =8, //!< Format of the %Mat objects (see Mat::type()) returned by VideoCapture::retrieve(). + //!< Set value -1 to fetch undecoded RAW video streams (as Mat 8UC1). + CAP_PROP_MODE =9, //!< Backend-specific value indicating the current capture mode. + CAP_PROP_BRIGHTNESS =10, //!< Brightness of the image (only for those cameras that support). + CAP_PROP_CONTRAST =11, //!< Contrast of the image (only for cameras). + CAP_PROP_SATURATION =12, //!< Saturation of the image (only for cameras). + CAP_PROP_HUE =13, //!< Hue of the image (only for cameras). + CAP_PROP_GAIN =14, //!< Gain of the image (only for those cameras that support). + CAP_PROP_EXPOSURE =15, //!< Exposure (only for those cameras that support). + CAP_PROP_CONVERT_RGB =16, //!< Boolean flags indicating whether images should be converted to RGB.
+ //!< *GStreamer note*: The flag is ignored in case if custom pipeline is used. It's user responsibility to interpret pipeline output. + CAP_PROP_WHITE_BALANCE_BLUE_U =17, //!< Currently unsupported. + CAP_PROP_RECTIFICATION =18, //!< Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently). + CAP_PROP_MONOCHROME =19, + CAP_PROP_SHARPNESS =20, + CAP_PROP_AUTO_EXPOSURE =21, //!< DC1394: exposure control done by camera, user can adjust reference level using this feature. + CAP_PROP_GAMMA =22, + CAP_PROP_TEMPERATURE =23, + CAP_PROP_TRIGGER =24, + CAP_PROP_TRIGGER_DELAY =25, + CAP_PROP_WHITE_BALANCE_RED_V =26, + CAP_PROP_ZOOM =27, + CAP_PROP_FOCUS =28, + CAP_PROP_GUID =29, + CAP_PROP_ISO_SPEED =30, + CAP_PROP_BACKLIGHT =32, + CAP_PROP_PAN =33, + CAP_PROP_TILT =34, + CAP_PROP_ROLL =35, + CAP_PROP_IRIS =36, + CAP_PROP_SETTINGS =37, //!< Pop up video/camera filter dialog (note: only supported by DSHOW backend currently. The property value is ignored) + CAP_PROP_BUFFERSIZE =38, + CAP_PROP_AUTOFOCUS =39, + CAP_PROP_SAR_NUM =40, //!< Sample aspect ratio: num/den (num) + CAP_PROP_SAR_DEN =41, //!< Sample aspect ratio: num/den (den) + CAP_PROP_BACKEND =42, //!< Current backend (enum VideoCaptureAPIs). Read-only property + CAP_PROP_CHANNEL =43, //!< Video input or Channel Number (only for those cameras that support) + CAP_PROP_AUTO_WB =44, //!< enable/ disable auto white-balance + CAP_PROP_WB_TEMPERATURE=45, //!< white-balance color temperature + CAP_PROP_CODEC_PIXEL_FORMAT =46, //!< (read-only) codec's pixel format. 4-character code - see VideoWriter::fourcc . Subset of [AV_PIX_FMT_*](https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/raw.c) or -1 if unknown + CAP_PROP_BITRATE =47, //!< (read-only) Video bitrate in kbits/s + CAP_PROP_ORIENTATION_META=48, //!< (read-only) Frame rotation defined by stream meta (applicable for FFmpeg back-end only) + CAP_PROP_ORIENTATION_AUTO=49, //!< if true - rotates output frames of CvCapture considering video file's metadata (applicable for FFmpeg back-end only) (https://github.com/opencv/opencv/issues/15499) + CAP_PROP_OPEN_TIMEOUT_MSEC=53, + CAP_PROP_READ_TIMEOUT_MSEC=54, +#ifndef CV_DOXYGEN + CV__CAP_PROP_LATEST +#endif + }; + + +/** @brief Generic camera output modes identifier. +@note Currently, these are supported through the libv4l backend only. +*/ +enum VideoCaptureModes { + CAP_MODE_BGR = 0, //!< BGR24 (default) + CAP_MODE_RGB = 1, //!< RGB24 + CAP_MODE_GRAY = 2, //!< Y8 + CAP_MODE_YUYV = 3 //!< YUYV + }; + +/** @brief %VideoWriter generic properties identifier. + @sa VideoWriter::get(), VideoWriter::set() +*/ +enum VideoWriterProperties { + VIDEOWRITER_PROP_QUALITY = 1, //!< Current quality (0..100%) of the encoded videostream. Can be adjusted dynamically in some codecs. + VIDEOWRITER_PROP_FRAMEBYTES = 2, //!< (Read-only): Size of just encoded video frame. Note that the encoding order may be different from representation order. + VIDEOWRITER_PROP_NSTRIPES = 3 //!< Number of stripes for parallel encoding. -1 for auto detection. +}; + +//! @} videoio_flags_base + +//! @addtogroup videoio_flags_others +//! @{ + +/** @name IEEE 1394 drivers + @{ +*/ + +/** @brief Modes of the IEEE 1394 controlling registers +(can be: auto, manual, auto single push, absolute Latter allowed with any other mode) +every feature can have only one mode turned on at a time +*/ +enum { CAP_PROP_DC1394_OFF = -4, //!< turn the feature off (not controlled manually nor automatically). + CAP_PROP_DC1394_MODE_MANUAL = -3, //!< set automatically when a value of the feature is set by the user. + CAP_PROP_DC1394_MODE_AUTO = -2, + CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CAP_PROP_DC1394_MAX = 31 + }; + +//! @} IEEE 1394 drivers + +/** @name OpenNI (for Kinect) + @{ +*/ + +//! OpenNI map generators +enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CAP_OPENNI_IR_GENERATOR = 1 << 29, + CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR + CAP_OPENNI_IR_GENERATOR + }; + +//! Properties of cameras available through OpenNI backend +enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, //!< In mm + CAP_PROP_OPENNI_BASELINE = 102, //!< In mm + CAP_PROP_OPENNI_FOCAL_LENGTH = 103, //!< In pixels + CAP_PROP_OPENNI_REGISTRATION = 104, //!< Flag that synchronizes the remapping depth map to image map + //!< by changing depth generator's view point (if the flag is "on") or + //!< sets this view point to its normal one (if the flag is "off"). + CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION, + CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + CAP_PROP_OPENNI2_SYNC = 110, + CAP_PROP_OPENNI2_MIRROR = 111 + }; + +//! OpenNI shortcuts +enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, + CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE, + CAP_OPENNI_DEPTH_GENERATOR_PRESENT = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, + CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE, + CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + CAP_OPENNI_IR_GENERATOR_PRESENT = CAP_OPENNI_IR_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, + }; + +//! OpenNI data given from depth generator +enum { CAP_OPENNI_DEPTH_MAP = 0, //!< Depth values in mm (CV_16UC1) + CAP_OPENNI_POINT_CLOUD_MAP = 1, //!< XYZ in meters (CV_32FC3) + CAP_OPENNI_DISPARITY_MAP = 2, //!< Disparity in pixels (CV_8UC1) + CAP_OPENNI_DISPARITY_MAP_32F = 3, //!< Disparity in pixels (CV_32FC1) + CAP_OPENNI_VALID_DEPTH_MASK = 4, //!< CV_8UC1 + + CAP_OPENNI_BGR_IMAGE = 5, //!< Data given from RGB image generator + CAP_OPENNI_GRAY_IMAGE = 6, //!< Data given from RGB image generator + + CAP_OPENNI_IR_IMAGE = 7 //!< Data given from IR image generator + }; + +//! Supported output modes of OpenNI image generator +enum { CAP_OPENNI_VGA_30HZ = 0, + CAP_OPENNI_SXGA_15HZ = 1, + CAP_OPENNI_SXGA_30HZ = 2, + CAP_OPENNI_QVGA_30HZ = 3, + CAP_OPENNI_QVGA_60HZ = 4 + }; + +//! @} OpenNI + +/** @name GStreamer + @{ +*/ + +enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 //!< Default is 1 + }; + +//! @} GStreamer + +/** @name PvAPI, Prosilica GigE SDK + @{ +*/ + +//! PVAPI +enum { CAP_PROP_PVAPI_MULTICASTIP = 300, //!< IP for enable multicast master mode. 0 for disable multicast. + CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, //!< FrameStartTriggerMode: Determines how a frame is initiated. + CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, //!< Horizontal sub-sampling of the image. + CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, //!< Vertical sub-sampling of the image. + CAP_PROP_PVAPI_BINNINGX = 304, //!< Horizontal binning factor. + CAP_PROP_PVAPI_BINNINGY = 305, //!< Vertical binning factor. + CAP_PROP_PVAPI_PIXELFORMAT = 306 //!< Pixel format. + }; + +//! PVAPI: FrameStartTriggerMode +enum { CAP_PVAPI_FSTRIGMODE_FREERUN = 0, //!< Freerun + CAP_PVAPI_FSTRIGMODE_SYNCIN1 = 1, //!< SyncIn1 + CAP_PVAPI_FSTRIGMODE_SYNCIN2 = 2, //!< SyncIn2 + CAP_PVAPI_FSTRIGMODE_FIXEDRATE = 3, //!< FixedRate + CAP_PVAPI_FSTRIGMODE_SOFTWARE = 4 //!< Software + }; + +//! PVAPI: DecimationHorizontal, DecimationVertical +enum { CAP_PVAPI_DECIMATION_OFF = 1, //!< Off + CAP_PVAPI_DECIMATION_2OUTOF4 = 2, //!< 2 out of 4 decimation + CAP_PVAPI_DECIMATION_2OUTOF8 = 4, //!< 2 out of 8 decimation + CAP_PVAPI_DECIMATION_2OUTOF16 = 8 //!< 2 out of 16 decimation + }; + +//! PVAPI: PixelFormat +enum { CAP_PVAPI_PIXELFORMAT_MONO8 = 1, //!< Mono8 + CAP_PVAPI_PIXELFORMAT_MONO16 = 2, //!< Mono16 + CAP_PVAPI_PIXELFORMAT_BAYER8 = 3, //!< Bayer8 + CAP_PVAPI_PIXELFORMAT_BAYER16 = 4, //!< Bayer16 + CAP_PVAPI_PIXELFORMAT_RGB24 = 5, //!< Rgb24 + CAP_PVAPI_PIXELFORMAT_BGR24 = 6, //!< Bgr24 + CAP_PVAPI_PIXELFORMAT_RGBA32 = 7, //!< Rgba32 + CAP_PVAPI_PIXELFORMAT_BGRA32 = 8, //!< Bgra32 + }; + +//! @} PvAPI + +/** @name XIMEA Camera API + @{ +*/ + +//! Properties of cameras available through XIMEA SDK backend +enum { CAP_PROP_XI_DOWNSAMPLING = 400, //!< Change image resolution by binning or skipping. + CAP_PROP_XI_DATA_FORMAT = 401, //!< Output data format. + CAP_PROP_XI_OFFSET_X = 402, //!< Horizontal offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_OFFSET_Y = 403, //!< Vertical offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_TRG_SOURCE = 404, //!< Defines source of trigger. + CAP_PROP_XI_TRG_SOFTWARE = 405, //!< Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CAP_PROP_XI_GPI_SELECTOR = 406, //!< Selects general purpose input. + CAP_PROP_XI_GPI_MODE = 407, //!< Set general purpose input mode. + CAP_PROP_XI_GPI_LEVEL = 408, //!< Get general purpose level. + CAP_PROP_XI_GPO_SELECTOR = 409, //!< Selects general purpose output. + CAP_PROP_XI_GPO_MODE = 410, //!< Set general purpose output mode. + CAP_PROP_XI_LED_SELECTOR = 411, //!< Selects camera signalling LED. + CAP_PROP_XI_LED_MODE = 412, //!< Define camera signalling LED functionality. + CAP_PROP_XI_MANUAL_WB = 413, //!< Calculates White Balance(must be called during acquisition). + CAP_PROP_XI_AUTO_WB = 414, //!< Automatic white balance. + CAP_PROP_XI_AEAG = 415, //!< Automatic exposure/gain. + CAP_PROP_XI_EXP_PRIORITY = 416, //!< Exposure priority (0.5 - exposure 50%, gain 50%). + CAP_PROP_XI_AE_MAX_LIMIT = 417, //!< Maximum limit of exposure in AEAG procedure. + CAP_PROP_XI_AG_MAX_LIMIT = 418, //!< Maximum limit of gain in AEAG procedure. + CAP_PROP_XI_AEAG_LEVEL = 419, //!< Average intensity of output signal AEAG should achieve(in %). + CAP_PROP_XI_TIMEOUT = 420, //!< Image capture timeout in milliseconds. + CAP_PROP_XI_EXPOSURE = 421, //!< Exposure time in microseconds. + CAP_PROP_XI_EXPOSURE_BURST_COUNT = 422, //!< Sets the number of times of exposure in one frame. + CAP_PROP_XI_GAIN_SELECTOR = 423, //!< Gain selector for parameter Gain allows to select different type of gains. + CAP_PROP_XI_GAIN = 424, //!< Gain in dB. + CAP_PROP_XI_DOWNSAMPLING_TYPE = 426, //!< Change image downsampling type. + CAP_PROP_XI_BINNING_SELECTOR = 427, //!< Binning engine selector. + CAP_PROP_XI_BINNING_VERTICAL = 428, //!< Vertical Binning - number of vertical photo-sensitive cells to combine together. + CAP_PROP_XI_BINNING_HORIZONTAL = 429, //!< Horizontal Binning - number of horizontal photo-sensitive cells to combine together. + CAP_PROP_XI_BINNING_PATTERN = 430, //!< Binning pattern type. + CAP_PROP_XI_DECIMATION_SELECTOR = 431, //!< Decimation engine selector. + CAP_PROP_XI_DECIMATION_VERTICAL = 432, //!< Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor. + CAP_PROP_XI_DECIMATION_HORIZONTAL = 433, //!< Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor. + CAP_PROP_XI_DECIMATION_PATTERN = 434, //!< Decimation pattern type. + CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR = 587, //!< Selects which test pattern generator is controlled by the TestPattern feature. + CAP_PROP_XI_TEST_PATTERN = 588, //!< Selects which test pattern type is generated by the selected generator. + CAP_PROP_XI_IMAGE_DATA_FORMAT = 435, //!< Output data format. + CAP_PROP_XI_SHUTTER_TYPE = 436, //!< Change sensor shutter type(CMOS sensor). + CAP_PROP_XI_SENSOR_TAPS = 437, //!< Number of taps. + CAP_PROP_XI_AEAG_ROI_OFFSET_X = 439, //!< Automatic exposure/gain ROI offset X. + CAP_PROP_XI_AEAG_ROI_OFFSET_Y = 440, //!< Automatic exposure/gain ROI offset Y. + CAP_PROP_XI_AEAG_ROI_WIDTH = 441, //!< Automatic exposure/gain ROI Width. + CAP_PROP_XI_AEAG_ROI_HEIGHT = 442, //!< Automatic exposure/gain ROI Height. + CAP_PROP_XI_BPC = 445, //!< Correction of bad pixels. + CAP_PROP_XI_WB_KR = 448, //!< White balance red coefficient. + CAP_PROP_XI_WB_KG = 449, //!< White balance green coefficient. + CAP_PROP_XI_WB_KB = 450, //!< White balance blue coefficient. + CAP_PROP_XI_WIDTH = 451, //!< Width of the Image provided by the device (in pixels). + CAP_PROP_XI_HEIGHT = 452, //!< Height of the Image provided by the device (in pixels). + CAP_PROP_XI_REGION_SELECTOR = 589, //!< Selects Region in Multiple ROI which parameters are set by width, height, ... ,region mode. + CAP_PROP_XI_REGION_MODE = 595, //!< Activates/deactivates Region selected by Region Selector. + CAP_PROP_XI_LIMIT_BANDWIDTH = 459, //!< Set/get bandwidth(datarate)(in Megabits). + CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH = 460, //!< Sensor output data bit depth. + CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH = 461, //!< Device output data bit depth. + CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH = 462, //!< bitdepth of data returned by function xiGetImage. + CAP_PROP_XI_OUTPUT_DATA_PACKING = 463, //!< Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available. + CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE = 464, //!< Data packing type. Some cameras supports only specific packing type. + CAP_PROP_XI_IS_COOLED = 465, //!< Returns 1 for cameras that support cooling. + CAP_PROP_XI_COOLING = 466, //!< Start camera cooling. + CAP_PROP_XI_TARGET_TEMP = 467, //!< Set sensor target temperature for cooling. + CAP_PROP_XI_CHIP_TEMP = 468, //!< Camera sensor temperature. + CAP_PROP_XI_HOUS_TEMP = 469, //!< Camera housing temperature. + CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, //!< Camera housing back side temperature. + CAP_PROP_XI_SENSOR_BOARD_TEMP = 596, //!< Camera sensor board temperature. + CAP_PROP_XI_CMS = 470, //!< Mode of color management system. + CAP_PROP_XI_APPLY_CMS = 471, //!< Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE). + CAP_PROP_XI_IMAGE_IS_COLOR = 474, //!< Returns 1 for color cameras. + CAP_PROP_XI_COLOR_FILTER_ARRAY = 475, //!< Returns color filter array type of RAW data. + CAP_PROP_XI_GAMMAY = 476, //!< Luminosity gamma. + CAP_PROP_XI_GAMMAC = 477, //!< Chromaticity gamma. + CAP_PROP_XI_SHARPNESS = 478, //!< Sharpness Strength. + CAP_PROP_XI_CC_MATRIX_00 = 479, //!< Color Correction Matrix element [0][0]. + CAP_PROP_XI_CC_MATRIX_01 = 480, //!< Color Correction Matrix element [0][1]. + CAP_PROP_XI_CC_MATRIX_02 = 481, //!< Color Correction Matrix element [0][2]. + CAP_PROP_XI_CC_MATRIX_03 = 482, //!< Color Correction Matrix element [0][3]. + CAP_PROP_XI_CC_MATRIX_10 = 483, //!< Color Correction Matrix element [1][0]. + CAP_PROP_XI_CC_MATRIX_11 = 484, //!< Color Correction Matrix element [1][1]. + CAP_PROP_XI_CC_MATRIX_12 = 485, //!< Color Correction Matrix element [1][2]. + CAP_PROP_XI_CC_MATRIX_13 = 486, //!< Color Correction Matrix element [1][3]. + CAP_PROP_XI_CC_MATRIX_20 = 487, //!< Color Correction Matrix element [2][0]. + CAP_PROP_XI_CC_MATRIX_21 = 488, //!< Color Correction Matrix element [2][1]. + CAP_PROP_XI_CC_MATRIX_22 = 489, //!< Color Correction Matrix element [2][2]. + CAP_PROP_XI_CC_MATRIX_23 = 490, //!< Color Correction Matrix element [2][3]. + CAP_PROP_XI_CC_MATRIX_30 = 491, //!< Color Correction Matrix element [3][0]. + CAP_PROP_XI_CC_MATRIX_31 = 492, //!< Color Correction Matrix element [3][1]. + CAP_PROP_XI_CC_MATRIX_32 = 493, //!< Color Correction Matrix element [3][2]. + CAP_PROP_XI_CC_MATRIX_33 = 494, //!< Color Correction Matrix element [3][3]. + CAP_PROP_XI_DEFAULT_CC_MATRIX = 495, //!< Set default Color Correction Matrix. + CAP_PROP_XI_TRG_SELECTOR = 498, //!< Selects the type of trigger. + CAP_PROP_XI_ACQ_FRAME_BURST_COUNT = 499, //!< Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStart. + CAP_PROP_XI_DEBOUNCE_EN = 507, //!< Enable/Disable debounce to selected GPI. + CAP_PROP_XI_DEBOUNCE_T0 = 508, //!< Debounce time (x * 10us). + CAP_PROP_XI_DEBOUNCE_T1 = 509, //!< Debounce time (x * 10us). + CAP_PROP_XI_DEBOUNCE_POL = 510, //!< Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge). + CAP_PROP_XI_LENS_MODE = 511, //!< Status of lens control interface. This shall be set to XI_ON before any Lens operations. + CAP_PROP_XI_LENS_APERTURE_VALUE = 512, //!< Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11. + CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE = 513, //!< Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps. + CAP_PROP_XI_LENS_FOCUS_MOVE = 514, //!< Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE. + CAP_PROP_XI_LENS_FOCUS_DISTANCE = 515, //!< Lens focus distance in cm. + CAP_PROP_XI_LENS_FOCAL_LENGTH = 516, //!< Lens focal distance in mm. + CAP_PROP_XI_LENS_FEATURE_SELECTOR = 517, //!< Selects the current feature which is accessible by XI_PRM_LENS_FEATURE. + CAP_PROP_XI_LENS_FEATURE = 518, //!< Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR. + CAP_PROP_XI_DEVICE_MODEL_ID = 521, //!< Returns device model id. + CAP_PROP_XI_DEVICE_SN = 522, //!< Returns device serial number. + CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA = 529, //!< The alpha channel of RGB32 output image format. + CAP_PROP_XI_IMAGE_PAYLOAD_SIZE = 530, //!< Buffer size in bytes sufficient for output image returned by xiGetImage. + CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT = 531, //!< Current format of pixels on transport layer. + CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ = 532, //!< Sensor clock frequency in Hz. + CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX = 533, //!< Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index. + CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT = 534, //!< Number of output channels from sensor used for data transfer. + CAP_PROP_XI_FRAMERATE = 535, //!< Define framerate in Hz. + CAP_PROP_XI_COUNTER_SELECTOR = 536, //!< Select counter. + CAP_PROP_XI_COUNTER_VALUE = 537, //!< Counter status. + CAP_PROP_XI_ACQ_TIMING_MODE = 538, //!< Type of sensor frames timing. + CAP_PROP_XI_AVAILABLE_BANDWIDTH = 539, //!< Calculate and returns available interface bandwidth(int Megabits). + CAP_PROP_XI_BUFFER_POLICY = 540, //!< Data move policy. + CAP_PROP_XI_LUT_EN = 541, //!< Activates LUT. + CAP_PROP_XI_LUT_INDEX = 542, //!< Control the index (offset) of the coefficient to access in the LUT. + CAP_PROP_XI_LUT_VALUE = 543, //!< Value at entry LUTIndex of the LUT. + CAP_PROP_XI_TRG_DELAY = 544, //!< Specifies the delay in microseconds (us) to apply after the trigger reception before activating it. + CAP_PROP_XI_TS_RST_MODE = 545, //!< Defines how time stamp reset engine will be armed. + CAP_PROP_XI_TS_RST_SOURCE = 546, //!< Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming). + CAP_PROP_XI_IS_DEVICE_EXIST = 547, //!< Returns 1 if camera connected and works properly. + CAP_PROP_XI_ACQ_BUFFER_SIZE = 548, //!< Acquisition buffer size in buffer_size_unit. Default bytes. + CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT = 549, //!< Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytes. + CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE = 550, //!< Acquisition transport buffer size in bytes. + CAP_PROP_XI_BUFFERS_QUEUE_SIZE = 551, //!< Queue of field/frame buffers. + CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT = 552, //!< Number of buffers to commit to low level. + CAP_PROP_XI_RECENT_FRAME = 553, //!< GetImage returns most recent frame. + CAP_PROP_XI_DEVICE_RESET = 554, //!< Resets the camera to default state. + CAP_PROP_XI_COLUMN_FPN_CORRECTION = 555, //!< Correction of column FPN. + CAP_PROP_XI_ROW_FPN_CORRECTION = 591, //!< Correction of row FPN. + CAP_PROP_XI_SENSOR_MODE = 558, //!< Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling. + CAP_PROP_XI_HDR = 559, //!< Enable High Dynamic Range feature. + CAP_PROP_XI_HDR_KNEEPOINT_COUNT = 560, //!< The number of kneepoints in the PWLR. + CAP_PROP_XI_HDR_T1 = 561, //!< Position of first kneepoint(in % of XI_PRM_EXPOSURE). + CAP_PROP_XI_HDR_T2 = 562, //!< Position of second kneepoint (in % of XI_PRM_EXPOSURE). + CAP_PROP_XI_KNEEPOINT1 = 563, //!< Value of first kneepoint (% of sensor saturation). + CAP_PROP_XI_KNEEPOINT2 = 564, //!< Value of second kneepoint (% of sensor saturation). + CAP_PROP_XI_IMAGE_BLACK_LEVEL = 565, //!< Last image black level counts. Can be used for Offline processing to recall it. + CAP_PROP_XI_HW_REVISION = 571, //!< Returns hardware revision number. + CAP_PROP_XI_DEBUG_LEVEL = 572, //!< Set debug level. + CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION = 573, //!< Automatic bandwidth calculation. + CAP_PROP_XI_FFS_FILE_ID = 594, //!< File number. + CAP_PROP_XI_FFS_FILE_SIZE = 580, //!< Size of file. + CAP_PROP_XI_FREE_FFS_SIZE = 581, //!< Size of free camera FFS. + CAP_PROP_XI_USED_FFS_SIZE = 582, //!< Size of used camera FFS. + CAP_PROP_XI_FFS_ACCESS_KEY = 583, //!< Setting of key enables file operations on some cameras. + CAP_PROP_XI_SENSOR_FEATURE_SELECTOR = 585, //!< Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE. + CAP_PROP_XI_SENSOR_FEATURE_VALUE = 586, //!< Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR. + }; + +//! @} XIMEA + +/** @name AVFoundation framework for iOS + @{ +*/ + +//! Properties of cameras available through AVFOUNDATION backend +enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CAP_PROP_IOS_DEVICE_FLASH = 9003, + CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CAP_PROP_IOS_DEVICE_TORCH = 9005 + }; + +//! @} AVFoundation framework for iOS + + +/** @name Smartek Giganetix GigEVisionSDK + @{ +*/ + +//! Properties of cameras available through Smartek Giganetix Ethernet Vision backend +/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ +enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006 + }; + +//! @} Smartek + +/** @name Intel Perceptual Computing SDK + @{ +*/ +enum { CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007 + }; + +//! Intel Perceptual Streams +enum { CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CAP_INTELPERC_GENERATORS_MASK = CAP_INTELPERC_DEPTH_GENERATOR + CAP_INTELPERC_IMAGE_GENERATOR + }; + +enum { CAP_INTELPERC_DEPTH_MAP = 0, //!< Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CAP_INTELPERC_UVDEPTH_MAP = 1, //!< Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CAP_INTELPERC_IR_MAP = 2, //!< Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CAP_INTELPERC_IMAGE = 3 + }; + +//! @} Intel Perceptual + +/** @name gPhoto2 connection + @{ +*/ + +/** @brief gPhoto2 properties + +If `propertyId` is less than 0 then work on widget with that __additive inversed__ camera setting ID +Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE. +@see CvCaptureCAM_GPHOTO2 for more info +*/ +enum { CAP_PROP_GPHOTO2_PREVIEW = 17001, //!< Capture only preview from liveview mode. + CAP_PROP_GPHOTO2_WIDGET_ENUMERATE = 17002, //!< Readonly, returns (const char *). + CAP_PROP_GPHOTO2_RELOAD_CONFIG = 17003, //!< Trigger, only by set. Reload camera settings. + CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE = 17004, //!< Reload all settings on set. + CAP_PROP_GPHOTO2_COLLECT_MSGS = 17005, //!< Collect messages with details. + CAP_PROP_GPHOTO2_FLUSH_MSGS = 17006, //!< Readonly, returns (const char *). + CAP_PROP_SPEED = 17007, //!< Exposure speed. Can be readonly, depends on camera program. + CAP_PROP_APERTURE = 17008, //!< Aperture. Can be readonly, depends on camera program. + CAP_PROP_EXPOSUREPROGRAM = 17009, //!< Camera exposure program. + CAP_PROP_VIEWFINDER = 17010 //!< Enter liveview mode. + }; + +//! @} gPhoto2 + + +/** @name Images backend + @{ +*/ + +/** @brief Images backend properties + +*/ +enum { CAP_PROP_IMAGES_BASE = 18000, + CAP_PROP_IMAGES_LAST = 19000 // excluding + }; + +//! @} Images + +//! @} videoio_flags_others + + +class IVideoCapture; + +/** @brief Class for video capturing from video files, image sequences or cameras. + +The class provides C++ API for capturing video from cameras or for reading video files and image sequences. + +Here is how the class can be used: +@include samples/cpp/videocapture_basic.cpp + +@note In @ref videoio_c "C API" the black-box structure `CvCapture` is used instead of %VideoCapture. +@note +- (C++) A basic sample on using the %VideoCapture interface can be found at + `OPENCV_SOURCE_CODE/samples/cpp/videocapture_starter.cpp` +- (Python) A basic sample on using the %VideoCapture interface can be found at + `OPENCV_SOURCE_CODE/samples/python/video.py` +- (Python) A multi threaded video processing sample can be found at + `OPENCV_SOURCE_CODE/samples/python/video_threaded.py` +- (Python) %VideoCapture sample showcasing some features of the Video4Linux2 backend + `OPENCV_SOURCE_CODE/samples/python/video_v4l2.py` + */ +class CV_EXPORTS_W VideoCapture +{ +public: + /** @brief Default constructor + @note In @ref videoio_c "C API", when you finished working with video, release CvCapture structure with + cvReleaseCapture(), or use Ptr\ that calls cvReleaseCapture() automatically in the + destructor. + */ + CV_WRAP VideoCapture(); + + /** @overload + @brief Open video file or image file sequence or a capturing device or a IP video stream for video capturing + + Same as VideoCapture(const String& filename, int apiPreference) but using default Capture API backends + */ + CV_WRAP VideoCapture(const String& filename); + + /** @overload + @brief Open video file or a capturing device or a IP video stream for video capturing with API Preference + + @param filename it can be: + - name of video file (eg. `video.avi`) + - or image sequence (eg. `img_%02d.jpg`, which will read samples like `img_00.jpg, img_01.jpg, img_02.jpg, ...`) + - or URL of video stream (eg. `protocol://host:port/script_name?script_params|auth`) + - or GStreamer pipeline string in gst-launch tool format in case if GStreamer is used as backend + Note that each video stream or IP camera feed has its own URL scheme. Please refer to the + documentation of source stream to know the right URL. + @param apiPreference preferred Capture API backends to use. Can be used to enforce a specific reader + implementation if multiple are available: e.g. cv::CAP_FFMPEG or cv::CAP_IMAGES or cv::CAP_DSHOW. + + @sa cv::VideoCaptureAPIs + */ + CV_WRAP VideoCapture(const String& filename, int apiPreference); + + /** @overload + @brief Open a camera for video capturing + + @param index camera_id + domain_offset (CAP_*) id of the video capturing device to open. To open default camera using default backend just pass 0. + Use a `domain_offset` to enforce a specific reader implementation if multiple are available like cv::CAP_FFMPEG or cv::CAP_IMAGES or cv::CAP_DSHOW. + e.g. to open Camera 1 using the MS Media Foundation API use `index = 1 + cv::CAP_MSMF` + + @sa cv::VideoCaptureAPIs + */ + CV_WRAP VideoCapture(int index); + + /** @overload + @brief Opens a camera for video capturing + + @param index id of the video capturing device to open. To open default camera using default backend just pass 0. + (to backward compatibility usage of camera_id + domain_offset (CAP_*) is valid when apiPreference is CAP_ANY) + @param apiPreference preferred Capture API backends to use. Can be used to enforce a specific reader + implementation if multiple are available: e.g. cv::CAP_DSHOW or cv::CAP_MSMF or cv::CAP_V4L2. + + @sa cv::VideoCaptureAPIs + */ + CV_WRAP VideoCapture(int index, int apiPreference); + + /** @brief Default destructor + + The method first calls VideoCapture::release to close the already opened file or camera. + */ + virtual ~VideoCapture(); + + /** @brief Open video file or a capturing device or a IP video stream for video capturing + + @overload + + Parameters are same as the constructor VideoCapture(const String& filename) + @return `true` if the file has been successfully opened + + The method first calls VideoCapture::release to close the already opened file or camera. + */ + CV_WRAP virtual bool open(const String& filename); + + /** @brief Open a camera for video capturing + + @overload + + Parameters are same as the constructor VideoCapture(int index) + @return `true` if the camera has been successfully opened. + + The method first calls VideoCapture::release to close the already opened file or camera. + */ + CV_WRAP virtual bool open(int index); + + /** @brief Open a camera for video capturing + + @overload + + Parameters are similar as the constructor VideoCapture(int index),except it takes an additional argument apiPreference. + Definitely, is same as open(int index) where `index=cameraNum + apiPreference` + @return `true` if the camera has been successfully opened. + */ + CV_WRAP bool open(int cameraNum, int apiPreference); + + /** @brief Returns true if video capturing has been initialized already. + + If the previous call to VideoCapture constructor or VideoCapture::open() succeeded, the method returns + true. + */ + CV_WRAP virtual bool isOpened() const; + + /** @brief Closes video file or capturing device. + + The method is automatically called by subsequent VideoCapture::open and by VideoCapture + destructor. + + The C function also deallocates memory and clears \*capture pointer. + */ + CV_WRAP virtual void release(); + + /** @brief Grabs the next frame from video file or capturing device. + + @return `true` (non-zero) in the case of success. + + The method/function grabs the next frame from video file or camera and returns true (non-zero) in + the case of success. + + The primary use of the function is in multi-camera environments, especially when the cameras do not + have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that + call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way + the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames + from different cameras will be closer in time. + + Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the + correct way of retrieving data from it is to call VideoCapture::grab() first and then call + VideoCapture::retrieve() one or more times with different values of the channel parameter. + + @ref tutorial_kinect_openni + */ + CV_WRAP virtual bool grab(); + + /** @brief Decodes and returns the grabbed video frame. + + @param [out] image the video frame is returned here. If no frames has been grabbed the image will be empty. + @param flag it could be a frame index or a driver specific flag + @return `false` if no frames has been grabbed + + The method decodes and returns the just grabbed frame. If no frames has been grabbed + (camera has been disconnected, or there are no more frames in video file), the method returns false + and the function returns an empty image (with %cv::Mat, test it with Mat::empty()). + + @sa read() + + @note In @ref videoio_c "C API", functions cvRetrieveFrame() and cv.RetrieveFrame() return image stored inside the video + capturing structure. It is not allowed to modify or release the image! You can copy the frame using + cvCloneImage and then do whatever you want with the copy. + */ + CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0); + + /** @brief Stream operator to read the next video frame. + @sa read() + */ + virtual VideoCapture& operator >> (CV_OUT Mat& image); + + /** @overload + @sa read() + */ + virtual VideoCapture& operator >> (CV_OUT UMat& image); + + /** @brief Grabs, decodes and returns the next video frame. + + @param [out] image the video frame is returned here. If no frames has been grabbed the image will be empty. + @return `false` if no frames has been grabbed + + The method/function combines VideoCapture::grab() and VideoCapture::retrieve() in one call. This is the + most convenient method for reading video files or capturing data from decode and returns the just + grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more + frames in video file), the method returns false and the function returns empty image (with %cv::Mat, test it with Mat::empty()). + + @note In @ref videoio_c "C API", functions cvRetrieveFrame() and cv.RetrieveFrame() return image stored inside the video + capturing structure. It is not allowed to modify or release the image! You can copy the frame using + cvCloneImage and then do whatever you want with the copy. + */ + CV_WRAP virtual bool read(OutputArray image); + + /** @brief Sets a property in the VideoCapture. + + @param propId Property identifier from cv::VideoCaptureProperties (eg. cv::CAP_PROP_POS_MSEC, cv::CAP_PROP_POS_FRAMES, ...) + or one from @ref videoio_flags_others + @param value Value of the property. + @return `true` if the property is supported by backend used by the VideoCapture instance. + @note Even if it returns `true` this doesn't ensure that the property + value has been accepted by the capture device. See note in VideoCapture::get() + */ + CV_WRAP virtual bool set(int propId, double value); + + /** @brief Returns the specified VideoCapture property + + @param propId Property identifier from cv::VideoCaptureProperties (eg. cv::CAP_PROP_POS_MSEC, cv::CAP_PROP_POS_FRAMES, ...) + or one from @ref videoio_flags_others + @return Value for the specified property. Value 0 is returned when querying a property that is + not supported by the backend used by the VideoCapture instance. + + @note Reading / writing properties involves many layers. Some unexpected result might happens + along this chain. + @code{.txt} + VideoCapture -> API Backend -> Operating System -> Device Driver -> Device Hardware + @endcode + The returned value might be different from what really used by the device or it could be encoded + using device dependent rules (eg. steps or percentage). Effective behaviour depends from device + driver and API Backend + + */ + CV_WRAP virtual double get(int propId) const; + + /** @brief Open video file or a capturing device or a IP video stream for video capturing with API Preference + + @overload + + Parameters are same as the constructor VideoCapture(const String& filename, int apiPreference) + @return `true` if the file has been successfully opened + + The method first calls VideoCapture::release to close the already opened file or camera. + */ + CV_WRAP virtual bool open(const String& filename, int apiPreference); + + /** @brief Returns used backend API name + + @note Stream should be opened. + */ + CV_WRAP String getBackendName() const; + +protected: + Ptr cap; + Ptr icap; +}; + +class IVideoWriter; + +/** @example samples/cpp/tutorial_code/videoio/video-write/video-write.cpp +Check @ref tutorial_video_write "the corresponding tutorial" for more details +*/ + +/** @example samples/cpp/videowriter_basic.cpp +An example using VideoCapture and VideoWriter class +*/ + +/** @brief Video writer class. + +The class provides C++ API for writing video files or image sequences. +*/ +class CV_EXPORTS_W VideoWriter +{ +public: + /** @brief Default constructors + + The constructors/functions initialize video writers. + - On Linux FFMPEG is used to write videos; + - On Windows FFMPEG or VFW is used; + - On MacOSX QTKit is used. + */ + CV_WRAP VideoWriter(); + + /** @overload + @param filename Name of the output video file. + @param fourcc 4-character code of codec used to compress the frames. For example, + VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a + motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by + FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses + other values as fourcc code: see [ObjectType](http://mp4ra.org/#/codecs), + so you may receive a warning message from OpenCV about fourcc code conversion. + @param fps Framerate of the created video stream. + @param frameSize Size of the video frames. + @param isColor If it is not zero, the encoder will expect and encode color frames, otherwise it + will work with grayscale frames (the flag is currently supported on Windows only). + + @b Tips: + - With some backends `fourcc=-1` pops up the codec selection dialog from the system. + - To save image sequence use a proper filename (eg. `img_%02d.jpg`) and `fourcc=0` + OR `fps=0`. Use uncompressed image format (eg. `img_%02d.BMP`) to save raw frames. + - Most codecs are lossy. If you want lossless video file you need to use a lossless codecs + (eg. FFMPEG FFV1, Huffman HFYU, Lagarith LAGS, etc...) + - If FFMPEG is enabled, using `codec=0; fps=0;` you can create an uncompressed (raw) video file. + */ + CV_WRAP VideoWriter(const String& filename, int fourcc, double fps, + Size frameSize, bool isColor = true); + + /** @overload + The `apiPreference` parameter allows to specify API backends to use. Can be used to enforce a specific reader implementation + if multiple are available: e.g. cv::CAP_FFMPEG or cv::CAP_GSTREAMER. + */ + CV_WRAP VideoWriter(const String& filename, int apiPreference, int fourcc, double fps, + Size frameSize, bool isColor = true); + + /** @brief Default destructor + + The method first calls VideoWriter::release to close the already opened file. + */ + virtual ~VideoWriter(); + + /** @brief Initializes or reinitializes video writer. + + The method opens video writer. Parameters are the same as in the constructor + VideoWriter::VideoWriter. + @return `true` if video writer has been successfully initialized + + The method first calls VideoWriter::release to close the already opened file. + */ + CV_WRAP virtual bool open(const String& filename, int fourcc, double fps, + Size frameSize, bool isColor = true); + + /** @overload + */ + CV_WRAP bool open(const String& filename, int apiPreference, int fourcc, double fps, + Size frameSize, bool isColor = true); + + /** @brief Returns true if video writer has been successfully initialized. + */ + CV_WRAP virtual bool isOpened() const; + + /** @brief Closes the video writer. + + The method is automatically called by subsequent VideoWriter::open and by the VideoWriter + destructor. + */ + CV_WRAP virtual void release(); + + /** @brief Stream operator to write the next video frame. + @sa write + */ + virtual VideoWriter& operator << (const Mat& image); + + /** @brief Writes the next video frame + + @param image The written frame. In general, color images are expected in BGR format. + + The function/method writes the specified image to video file. It must have the same size as has + been specified when opening the video writer. + */ + CV_WRAP virtual void write(const Mat& image); + + /** @brief Sets a property in the VideoWriter. + + @param propId Property identifier from cv::VideoWriterProperties (eg. cv::VIDEOWRITER_PROP_QUALITY) + or one of @ref videoio_flags_others + + @param value Value of the property. + @return `true` if the property is supported by the backend used by the VideoWriter instance. + */ + CV_WRAP virtual bool set(int propId, double value); + + /** @brief Returns the specified VideoWriter property + + @param propId Property identifier from cv::VideoWriterProperties (eg. cv::VIDEOWRITER_PROP_QUALITY) + or one of @ref videoio_flags_others + + @return Value for the specified property. Value 0 is returned when querying a property that is + not supported by the backend used by the VideoWriter instance. + */ + CV_WRAP virtual double get(int propId) const; + + /** @brief Concatenates 4 chars to a fourcc code + + @return a fourcc code + + This static method constructs the fourcc code of the codec to be used in the constructor + VideoWriter::VideoWriter or VideoWriter::open. + */ + CV_WRAP static int fourcc(char c1, char c2, char c3, char c4); + + /** @brief Returns used backend API name + + @note Stream should be opened. + */ + CV_WRAP String getBackendName() const; + +protected: + Ptr writer; + Ptr iwriter; + + static Ptr create(const String& filename, int fourcc, double fps, + Size frameSize, bool isColor = true); +}; + +//! @cond IGNORED +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvCapture* obj) const; +template<> CV_EXPORTS void DefaultDeleter::operator ()(CvVideoWriter* obj) const; +//! @endcond IGNORED + +//! @} videoio + +} // cv + +#endif //OPENCV_VIDEOIO_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videoio/cap_ios.h b/third_party/opencv/uos/sw_64/include/opencv2/videoio/cap_ios.h new file mode 100644 index 00000000..207ad46c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videoio/cap_ios.h @@ -0,0 +1,150 @@ +/* For iOS video I/O + * by Eduard Feicho on 29/07/12 + * Copyright 2012. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#import +#import +#import +#import +#include "opencv2/core.hpp" + +//! @addtogroup videoio_ios +//! @{ + +/////////////////////////////////////// CvAbstractCamera ///////////////////////////////////// + +@class CvAbstractCamera; + +CV_EXPORTS @interface CvAbstractCamera : NSObject +{ + UIDeviceOrientation currentDeviceOrientation; + + BOOL cameraAvailable; +} + +@property (nonatomic, strong) AVCaptureSession* captureSession; +@property (nonatomic, strong) AVCaptureConnection* videoCaptureConnection; + +@property (nonatomic, readonly) BOOL running; +@property (nonatomic, readonly) BOOL captureSessionLoaded; + +@property (nonatomic, assign) int defaultFPS; +@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer; +@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition; +@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation; +@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer; +@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset; + +@property (nonatomic, assign) int imageWidth; +@property (nonatomic, assign) int imageHeight; + +@property (nonatomic, strong) UIView* parentView; + +- CV_UNUSED(start); +- CV_UNUSED(stop); +- CV_UNUSED(switchCameras); + +- (id)initWithParentView:(UIView*)parent; + +- CV_UNUSED(createCaptureOutput); +- CV_UNUSED(createVideoPreviewLayer); +- CV_UNUSED(updateOrientation); + +- CV_UNUSED(lockFocus); +- CV_UNUSED(unlockFocus); +- CV_UNUSED(lockExposure); +- CV_UNUSED(unlockExposure); +- CV_UNUSED(lockBalance); +- CV_UNUSED(unlockBalance); + +@end + +///////////////////////////////// CvVideoCamera /////////////////////////////////////////// + +@class CvVideoCamera; + +CV_EXPORTS @protocol CvVideoCameraDelegate + +#ifdef __cplusplus +// delegate method for processing image frames +- (void)processImage:(cv::Mat&)image; +#endif + +@end + +CV_EXPORTS @interface CvVideoCamera : CvAbstractCamera +{ + AVCaptureVideoDataOutput *videoDataOutput; + + dispatch_queue_t videoDataOutputQueue; + CALayer *customPreviewLayer; + + CMTime lastSampleTime; + +} + +@property (nonatomic, weak) id delegate; +@property (nonatomic, assign) BOOL grayscaleMode; + +@property (nonatomic, assign) BOOL recordVideo; +@property (nonatomic, assign) BOOL rotateVideo; +@property (nonatomic, strong) AVAssetWriterInput* recordAssetWriterInput; +@property (nonatomic, strong) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor; +@property (nonatomic, strong) AVAssetWriter* recordAssetWriter; + +- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation; +- CV_UNUSED(layoutPreviewLayer); +- CV_UNUSED(saveVideo); +- (NSURL *)videoFileURL; +- (NSString *)videoFileString; + + +@end + +///////////////////////////////// CvPhotoCamera /////////////////////////////////////////// + +@class CvPhotoCamera; + +CV_EXPORTS @protocol CvPhotoCameraDelegate + +- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image; +- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera; + +@end + +CV_EXPORTS @interface CvPhotoCamera : CvAbstractCamera +{ + AVCaptureStillImageOutput *stillImageOutput; +} + +@property (nonatomic, weak) id delegate; + +- CV_UNUSED(takePicture); + +@end + +//! @} videoio_ios diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videoio/registry.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videoio/registry.hpp new file mode 100644 index 00000000..7404c681 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videoio/registry.hpp @@ -0,0 +1,44 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_VIDEOIO_REGISTRY_HPP +#define OPENCV_VIDEOIO_REGISTRY_HPP + +#include + +namespace cv { namespace videoio_registry { +/** @addtogroup videoio_registry +This section contains API description how to query/configure available Video I/O backends. + +Runtime configuration options: +- enable debug mode: `OPENCV_VIDEOIO_DEBUG=1` +- change backend priority: `OPENCV_VIDEOIO_PRIORITY_=9999` +- disable backend: `OPENCV_VIDEOIO_PRIORITY_=0` +- specify list of backends with high priority (>100000): `OPENCV_VIDEOIO_PRIORITY_LIST=FFMPEG,GSTREAMER` + +@{ + */ + + +/** @brief Returns backend API name or "unknown" +@param api backend ID (#VideoCaptureAPIs) +*/ +CV_EXPORTS_W cv::String getBackendName(VideoCaptureAPIs api); + +/** @brief Returns list of all builtin backends */ +CV_EXPORTS_W std::vector getBackends(); + +/** @brief Returns list of available backends which works via `cv::VideoCapture(int index)` */ +CV_EXPORTS_W std::vector getCameraBackends(); + +/** @brief Returns list of available backends which works via `cv::VideoCapture(filename)` */ +CV_EXPORTS_W std::vector getStreamBackends(); + +/** @brief Returns list of available backends which works via `cv::VideoWriter()` */ +CV_EXPORTS_W std::vector getWriterBackends(); + +//! @} +}} // namespace + +#endif // OPENCV_VIDEOIO_REGISTRY_HPP diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio.hpp new file mode 100644 index 00000000..ec84cf7a --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio.hpp @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Copyright (C) 2013, OpenCV Foundation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifdef __OPENCV_BUILD +#error this is a compatibility header which should not be used inside the OpenCV library +#endif + +#include "opencv2/videoio.hpp" diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio_c.h b/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio_c.h new file mode 100644 index 00000000..ffbe2e35 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videoio/videoio_c.h @@ -0,0 +1,587 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOIO_H +#define OPENCV_VIDEOIO_H + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + @addtogroup videoio_c + @{ +*/ + +/****************************************************************************************\ +* Working with Video Files and Cameras * +\****************************************************************************************/ + +/** @brief "black box" capture structure + +In C++ use cv::VideoCapture +*/ +typedef struct CvCapture CvCapture; + +/** @brief start capturing frames from video file +*/ +CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); + +/** @brief start capturing frames from video file. allows specifying a preferred API to use +*/ +CVAPI(CvCapture*) cvCreateFileCaptureWithPreference( const char* filename , int apiPreference); + +enum +{ + CV_CAP_ANY =0, // autodetect + + CV_CAP_MIL =100, // MIL proprietary drivers + + CV_CAP_VFW =200, // platform native + CV_CAP_V4L =200, + CV_CAP_V4L2 =200, + + CV_CAP_FIREWARE =300, // IEEE 1394 drivers + CV_CAP_FIREWIRE =300, + CV_CAP_IEEE1394 =300, + CV_CAP_DC1394 =300, + CV_CAP_CMU1394 =300, + + CV_CAP_STEREO =400, // TYZX proprietary drivers + CV_CAP_TYZX =400, + CV_TYZX_LEFT =400, + CV_TYZX_RIGHT =401, + CV_TYZX_COLOR =402, + CV_TYZX_Z =403, + + CV_CAP_QT =500, // QuickTime + + CV_CAP_UNICAP =600, // Unicap drivers + + CV_CAP_DSHOW =700, // DirectShow (via videoInput) + CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput) + + CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK + + CV_CAP_OPENNI =900, // OpenNI (for Kinect) + CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion) + + CV_CAP_ANDROID =1000, // Android - not used + CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera - not used + CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera - not used + + CV_CAP_XIAPI =1100, // XIMEA Camera API + + CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + + CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + + CV_CAP_INTELPERC = 1500, // Intel Perceptual Computing + + CV_CAP_OPENNI2 = 1600, // OpenNI2 (for Kinect) + CV_CAP_GPHOTO2 = 1700, + CV_CAP_GSTREAMER = 1800, // GStreamer + CV_CAP_FFMPEG = 1900, // FFMPEG + CV_CAP_IMAGES = 2000, // OpenCV Image Sequence (e.g. img_%02d.jpg) + + CV_CAP_ARAVIS = 2100 // Aravis GigE SDK +}; + +/** @brief start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) +*/ +CVAPI(CvCapture*) cvCreateCameraCapture( int index ); + +/** @brief grab a frame, return 1 on success, 0 on fail. + + this function is thought to be fast +*/ +CVAPI(int) cvGrabFrame( CvCapture* capture ); + +/** @brief get the frame grabbed with cvGrabFrame(..) + + This function may apply some frame processing like + frame decompression, flipping etc. + @warning !!!DO NOT RELEASE or MODIFY the retrieved frame!!! +*/ +CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) ); + +/** @brief Just a combination of cvGrabFrame and cvRetrieveFrame + + @warning !!!DO NOT RELEASE or MODIFY the retrieved frame!!! +*/ +CVAPI(IplImage*) cvQueryFrame( CvCapture* capture ); + +/** @brief stop capturing/reading and free resources +*/ +CVAPI(void) cvReleaseCapture( CvCapture** capture ); + +enum +{ + // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) + // every feature can have only one mode turned on at a time + CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CV_CAP_PROP_DC1394_MODE_AUTO = -2, + CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CV_CAP_PROP_POS_MSEC =0, + CV_CAP_PROP_POS_FRAMES =1, + CV_CAP_PROP_POS_AVI_RATIO =2, + CV_CAP_PROP_FRAME_WIDTH =3, + CV_CAP_PROP_FRAME_HEIGHT =4, + CV_CAP_PROP_FPS =5, + CV_CAP_PROP_FOURCC =6, + CV_CAP_PROP_FRAME_COUNT =7, + CV_CAP_PROP_FORMAT =8, + CV_CAP_PROP_MODE =9, + CV_CAP_PROP_BRIGHTNESS =10, + CV_CAP_PROP_CONTRAST =11, + CV_CAP_PROP_SATURATION =12, + CV_CAP_PROP_HUE =13, + CV_CAP_PROP_GAIN =14, + CV_CAP_PROP_EXPOSURE =15, + CV_CAP_PROP_CONVERT_RGB =16, + CV_CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CV_CAP_PROP_RECTIFICATION =18, + CV_CAP_PROP_MONOCHROME =19, + CV_CAP_PROP_SHARPNESS =20, + CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera, + // user can adjust reference level + // using this feature + CV_CAP_PROP_GAMMA =22, + CV_CAP_PROP_TEMPERATURE =23, + CV_CAP_PROP_TRIGGER =24, + CV_CAP_PROP_TRIGGER_DELAY =25, + CV_CAP_PROP_WHITE_BALANCE_RED_V =26, + CV_CAP_PROP_ZOOM =27, + CV_CAP_PROP_FOCUS =28, + CV_CAP_PROP_GUID =29, + CV_CAP_PROP_ISO_SPEED =30, + CV_CAP_PROP_MAX_DC1394 =31, + CV_CAP_PROP_BACKLIGHT =32, + CV_CAP_PROP_PAN =33, + CV_CAP_PROP_TILT =34, + CV_CAP_PROP_ROLL =35, + CV_CAP_PROP_IRIS =36, + CV_CAP_PROP_SETTINGS =37, + CV_CAP_PROP_BUFFERSIZE =38, + CV_CAP_PROP_AUTOFOCUS =39, + CV_CAP_PROP_SAR_NUM =40, + CV_CAP_PROP_SAR_DEN =41, + + CV_CAP_PROP_AUTOGRAB =1024, // property for videoio class CvCapture_Android only + CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed + CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed + + // OpenNI map generators + CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CV_CAP_OPENNI_IR_GENERATOR = 1 << 29, + CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_OPENNI_IR_GENERATOR, + + // Properties of cameras available through OpenNI interfaces + CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm + CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag + CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109, + CV_CAP_PROP_OPENNI2_SYNC = 110, + CV_CAP_PROP_OPENNI2_MIRROR = 111, + + CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE, + CV_CAP_OPENNI_DEPTH_GENERATOR_PRESENT = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE, + CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION, + CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION, + CV_CAP_OPENNI_IR_GENERATOR_PRESENT = CV_CAP_OPENNI_IR_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT, + + // Properties of cameras available through GStreamer interface + CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1 + + // PVAPI + CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast + CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated + CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image + CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image + CV_CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor + CV_CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor + CV_CAP_PROP_PVAPI_PIXELFORMAT = 306, // Pixel format + + // Properties of cameras available through XIMEA SDK interface + CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds + CV_CAP_PROP_XI_EXPOSURE = 421, // Exposure time in microseconds + CV_CAP_PROP_XI_EXPOSURE_BURST_COUNT = 422, // Sets the number of times of exposure in one frame. + CV_CAP_PROP_XI_GAIN_SELECTOR = 423, // Gain selector for parameter Gain allows to select different type of gains. + CV_CAP_PROP_XI_GAIN = 424, // Gain in dB + CV_CAP_PROP_XI_DOWNSAMPLING_TYPE = 426, // Change image downsampling type. + CV_CAP_PROP_XI_BINNING_SELECTOR = 427, // Binning engine selector. + CV_CAP_PROP_XI_BINNING_VERTICAL = 428, // Vertical Binning - number of vertical photo-sensitive cells to combine together. + CV_CAP_PROP_XI_BINNING_HORIZONTAL = 429, // Horizontal Binning - number of horizontal photo-sensitive cells to combine together. + CV_CAP_PROP_XI_BINNING_PATTERN = 430, // Binning pattern type. + CV_CAP_PROP_XI_DECIMATION_SELECTOR = 431, // Decimation engine selector. + CV_CAP_PROP_XI_DECIMATION_VERTICAL = 432, // Vertical Decimation - vertical sub-sampling of the image - reduces the vertical resolution of the image by the specified vertical decimation factor. + CV_CAP_PROP_XI_DECIMATION_HORIZONTAL = 433, // Horizontal Decimation - horizontal sub-sampling of the image - reduces the horizontal resolution of the image by the specified vertical decimation factor. + CV_CAP_PROP_XI_DECIMATION_PATTERN = 434, // Decimation pattern type. + CV_CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR = 587, // Selects which test pattern generator is controlled by the TestPattern feature. + CV_CAP_PROP_XI_TEST_PATTERN = 588, // Selects which test pattern type is generated by the selected generator. + CV_CAP_PROP_XI_IMAGE_DATA_FORMAT = 435, // Output data format. + CV_CAP_PROP_XI_SHUTTER_TYPE = 436, // Change sensor shutter type(CMOS sensor). + CV_CAP_PROP_XI_SENSOR_TAPS = 437, // Number of taps + CV_CAP_PROP_XI_AEAG_ROI_OFFSET_X = 439, // Automatic exposure/gain ROI offset X + CV_CAP_PROP_XI_AEAG_ROI_OFFSET_Y = 440, // Automatic exposure/gain ROI offset Y + CV_CAP_PROP_XI_AEAG_ROI_WIDTH = 441, // Automatic exposure/gain ROI Width + CV_CAP_PROP_XI_AEAG_ROI_HEIGHT = 442, // Automatic exposure/gain ROI Height + CV_CAP_PROP_XI_BPC = 445, // Correction of bad pixels + CV_CAP_PROP_XI_WB_KR = 448, // White balance red coefficient + CV_CAP_PROP_XI_WB_KG = 449, // White balance green coefficient + CV_CAP_PROP_XI_WB_KB = 450, // White balance blue coefficient + CV_CAP_PROP_XI_WIDTH = 451, // Width of the Image provided by the device (in pixels). + CV_CAP_PROP_XI_HEIGHT = 452, // Height of the Image provided by the device (in pixels). + CV_CAP_PROP_XI_REGION_SELECTOR = 589, // Selects Region in Multiple ROI which parameters are set by width, height, ... ,region mode + CV_CAP_PROP_XI_REGION_MODE = 595, // Activates/deactivates Region selected by Region Selector + CV_CAP_PROP_XI_LIMIT_BANDWIDTH = 459, // Set/get bandwidth(datarate)(in Megabits) + CV_CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH = 460, // Sensor output data bit depth. + CV_CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH = 461, // Device output data bit depth. + CV_CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH = 462, // bitdepth of data returned by function xiGetImage + CV_CAP_PROP_XI_OUTPUT_DATA_PACKING = 463, // Device output data packing (or grouping) enabled. Packing could be enabled if output_data_bit_depth > 8 and packing capability is available. + CV_CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE = 464, // Data packing type. Some cameras supports only specific packing type. + CV_CAP_PROP_XI_IS_COOLED = 465, // Returns 1 for cameras that support cooling. + CV_CAP_PROP_XI_COOLING = 466, // Start camera cooling. + CV_CAP_PROP_XI_TARGET_TEMP = 467, // Set sensor target temperature for cooling. + CV_CAP_PROP_XI_CHIP_TEMP = 468, // Camera sensor temperature + CV_CAP_PROP_XI_HOUS_TEMP = 469, // Camera housing temperature + CV_CAP_PROP_XI_HOUS_BACK_SIDE_TEMP = 590, // Camera housing back side temperature + CV_CAP_PROP_XI_SENSOR_BOARD_TEMP = 596, // Camera sensor board temperature + CV_CAP_PROP_XI_CMS = 470, // Mode of color management system. + CV_CAP_PROP_XI_APPLY_CMS = 471, // Enable applying of CMS profiles to xiGetImage (see XI_PRM_INPUT_CMS_PROFILE, XI_PRM_OUTPUT_CMS_PROFILE). + CV_CAP_PROP_XI_IMAGE_IS_COLOR = 474, // Returns 1 for color cameras. + CV_CAP_PROP_XI_COLOR_FILTER_ARRAY = 475, // Returns color filter array type of RAW data. + CV_CAP_PROP_XI_GAMMAY = 476, // Luminosity gamma + CV_CAP_PROP_XI_GAMMAC = 477, // Chromaticity gamma + CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strength + CV_CAP_PROP_XI_CC_MATRIX_00 = 479, // Color Correction Matrix element [0][0] + CV_CAP_PROP_XI_CC_MATRIX_01 = 480, // Color Correction Matrix element [0][1] + CV_CAP_PROP_XI_CC_MATRIX_02 = 481, // Color Correction Matrix element [0][2] + CV_CAP_PROP_XI_CC_MATRIX_03 = 482, // Color Correction Matrix element [0][3] + CV_CAP_PROP_XI_CC_MATRIX_10 = 483, // Color Correction Matrix element [1][0] + CV_CAP_PROP_XI_CC_MATRIX_11 = 484, // Color Correction Matrix element [1][1] + CV_CAP_PROP_XI_CC_MATRIX_12 = 485, // Color Correction Matrix element [1][2] + CV_CAP_PROP_XI_CC_MATRIX_13 = 486, // Color Correction Matrix element [1][3] + CV_CAP_PROP_XI_CC_MATRIX_20 = 487, // Color Correction Matrix element [2][0] + CV_CAP_PROP_XI_CC_MATRIX_21 = 488, // Color Correction Matrix element [2][1] + CV_CAP_PROP_XI_CC_MATRIX_22 = 489, // Color Correction Matrix element [2][2] + CV_CAP_PROP_XI_CC_MATRIX_23 = 490, // Color Correction Matrix element [2][3] + CV_CAP_PROP_XI_CC_MATRIX_30 = 491, // Color Correction Matrix element [3][0] + CV_CAP_PROP_XI_CC_MATRIX_31 = 492, // Color Correction Matrix element [3][1] + CV_CAP_PROP_XI_CC_MATRIX_32 = 493, // Color Correction Matrix element [3][2] + CV_CAP_PROP_XI_CC_MATRIX_33 = 494, // Color Correction Matrix element [3][3] + CV_CAP_PROP_XI_DEFAULT_CC_MATRIX = 495, // Set default Color Correction Matrix + CV_CAP_PROP_XI_TRG_SELECTOR = 498, // Selects the type of trigger. + CV_CAP_PROP_XI_ACQ_FRAME_BURST_COUNT = 499, // Sets number of frames acquired by burst. This burst is used only if trigger is set to FrameBurstStart + CV_CAP_PROP_XI_DEBOUNCE_EN = 507, // Enable/Disable debounce to selected GPI + CV_CAP_PROP_XI_DEBOUNCE_T0 = 508, // Debounce time (x * 10us) + CV_CAP_PROP_XI_DEBOUNCE_T1 = 509, // Debounce time (x * 10us) + CV_CAP_PROP_XI_DEBOUNCE_POL = 510, // Debounce polarity (pol = 1 t0 - falling edge, t1 - rising edge) + CV_CAP_PROP_XI_LENS_MODE = 511, // Status of lens control interface. This shall be set to XI_ON before any Lens operations. + CV_CAP_PROP_XI_LENS_APERTURE_VALUE = 512, // Current lens aperture value in stops. Examples: 2.8, 4, 5.6, 8, 11 + CV_CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE = 513, // Lens current focus movement value to be used by XI_PRM_LENS_FOCUS_MOVE in motor steps. + CV_CAP_PROP_XI_LENS_FOCUS_MOVE = 514, // Moves lens focus motor by steps set in XI_PRM_LENS_FOCUS_MOVEMENT_VALUE. + CV_CAP_PROP_XI_LENS_FOCUS_DISTANCE = 515, // Lens focus distance in cm. + CV_CAP_PROP_XI_LENS_FOCAL_LENGTH = 516, // Lens focal distance in mm. + CV_CAP_PROP_XI_LENS_FEATURE_SELECTOR = 517, // Selects the current feature which is accessible by XI_PRM_LENS_FEATURE. + CV_CAP_PROP_XI_LENS_FEATURE = 518, // Allows access to lens feature value currently selected by XI_PRM_LENS_FEATURE_SELECTOR. + CV_CAP_PROP_XI_DEVICE_MODEL_ID = 521, // Return device model id + CV_CAP_PROP_XI_DEVICE_SN = 522, // Return device serial number + CV_CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA = 529, // The alpha channel of RGB32 output image format. + CV_CAP_PROP_XI_IMAGE_PAYLOAD_SIZE = 530, // Buffer size in bytes sufficient for output image returned by xiGetImage + CV_CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT = 531, // Current format of pixels on transport layer. + CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ = 532, // Sensor clock frequency in Hz. + CV_CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX = 533, // Sensor clock frequency index. Sensor with selected frequencies have possibility to set the frequency only by this index. + CV_CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT = 534, // Number of output channels from sensor used for data transfer. + CV_CAP_PROP_XI_FRAMERATE = 535, // Define framerate in Hz + CV_CAP_PROP_XI_COUNTER_SELECTOR = 536, // Select counter + CV_CAP_PROP_XI_COUNTER_VALUE = 537, // Counter status + CV_CAP_PROP_XI_ACQ_TIMING_MODE = 538, // Type of sensor frames timing. + CV_CAP_PROP_XI_AVAILABLE_BANDWIDTH = 539, // Calculate and return available interface bandwidth(int Megabits) + CV_CAP_PROP_XI_BUFFER_POLICY = 540, // Data move policy + CV_CAP_PROP_XI_LUT_EN = 541, // Activates LUT. + CV_CAP_PROP_XI_LUT_INDEX = 542, // Control the index (offset) of the coefficient to access in the LUT. + CV_CAP_PROP_XI_LUT_VALUE = 543, // Value at entry LUTIndex of the LUT + CV_CAP_PROP_XI_TRG_DELAY = 544, // Specifies the delay in microseconds (us) to apply after the trigger reception before activating it. + CV_CAP_PROP_XI_TS_RST_MODE = 545, // Defines how time stamp reset engine will be armed + CV_CAP_PROP_XI_TS_RST_SOURCE = 546, // Defines which source will be used for timestamp reset. Writing this parameter will trigger settings of engine (arming) + CV_CAP_PROP_XI_IS_DEVICE_EXIST = 547, // Returns 1 if camera connected and works properly. + CV_CAP_PROP_XI_ACQ_BUFFER_SIZE = 548, // Acquisition buffer size in buffer_size_unit. Default bytes. + CV_CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT = 549, // Acquisition buffer size unit in bytes. Default 1. E.g. Value 1024 means that buffer_size is in KiBytes + CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE = 550, // Acquisition transport buffer size in bytes + CV_CAP_PROP_XI_BUFFERS_QUEUE_SIZE = 551, // Queue of field/frame buffers + CV_CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT = 552, // Number of buffers to commit to low level + CV_CAP_PROP_XI_RECENT_FRAME = 553, // GetImage returns most recent frame + CV_CAP_PROP_XI_DEVICE_RESET = 554, // Resets the camera to default state. + CV_CAP_PROP_XI_COLUMN_FPN_CORRECTION = 555, // Correction of column FPN + CV_CAP_PROP_XI_ROW_FPN_CORRECTION = 591, // Correction of row FPN + CV_CAP_PROP_XI_SENSOR_MODE = 558, // Current sensor mode. Allows to select sensor mode by one integer. Setting of this parameter affects: image dimensions and downsampling. + CV_CAP_PROP_XI_HDR = 559, // Enable High Dynamic Range feature. + CV_CAP_PROP_XI_HDR_KNEEPOINT_COUNT = 560, // The number of kneepoints in the PWLR. + CV_CAP_PROP_XI_HDR_T1 = 561, // position of first kneepoint(in % of XI_PRM_EXPOSURE) + CV_CAP_PROP_XI_HDR_T2 = 562, // position of second kneepoint (in % of XI_PRM_EXPOSURE) + CV_CAP_PROP_XI_KNEEPOINT1 = 563, // value of first kneepoint (% of sensor saturation) + CV_CAP_PROP_XI_KNEEPOINT2 = 564, // value of second kneepoint (% of sensor saturation) + CV_CAP_PROP_XI_IMAGE_BLACK_LEVEL = 565, // Last image black level counts. Can be used for Offline processing to recall it. + CV_CAP_PROP_XI_HW_REVISION = 571, // Returns hardware revision number. + CV_CAP_PROP_XI_DEBUG_LEVEL = 572, // Set debug level + CV_CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION = 573, // Automatic bandwidth calculation, + CV_CAP_PROP_XI_FFS_FILE_ID = 594, // File number. + CV_CAP_PROP_XI_FFS_FILE_SIZE = 580, // Size of file. + CV_CAP_PROP_XI_FREE_FFS_SIZE = 581, // Size of free camera FFS. + CV_CAP_PROP_XI_USED_FFS_SIZE = 582, // Size of used camera FFS. + CV_CAP_PROP_XI_FFS_ACCESS_KEY = 583, // Setting of key enables file operations on some cameras. + CV_CAP_PROP_XI_SENSOR_FEATURE_SELECTOR = 585, // Selects the current feature which is accessible by XI_PRM_SENSOR_FEATURE_VALUE. + CV_CAP_PROP_XI_SENSOR_FEATURE_VALUE = 586, // Allows access to sensor feature value currently selected by XI_PRM_SENSOR_FEATURE_SELECTOR. + + + // Properties for Android cameras + CV_CAP_PROP_ANDROID_FLASH_MODE = 8001, + CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CV_CAP_PROP_ANDROID_ANTIBANDING = 8004, + CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008, + CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009, + CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010, + + // Properties of cameras available through AVFOUNDATION interface + CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CV_CAP_PROP_IOS_DEVICE_FLASH = 9003, + CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CV_CAP_PROP_IOS_DEVICE_TORCH = 9005, + + // Properties of cameras available through Smartek Giganetix Ethernet Vision interface + /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ + CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006, + + CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001, + CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002, + CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003, + CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004, + CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006, + CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007, + + // Intel PerC streams + CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29, + CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28, + CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR +}; + +// Generic camera output modes. +// Currently, these are supported through the libv4l interface only. +enum +{ + CV_CAP_MODE_BGR = 0, // BGR24 (default) + CV_CAP_MODE_RGB = 1, // RGB24 + CV_CAP_MODE_GRAY = 2, // Y8 + CV_CAP_MODE_YUYV = 3 // YUYV +}; + +enum +{ + // Data given from depth generator. + CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator. + CV_CAP_OPENNI_BGR_IMAGE = 5, + CV_CAP_OPENNI_GRAY_IMAGE = 6, + + // Data given from IR image generator. + CV_CAP_OPENNI_IR_IMAGE = 7 +}; + +// Supported output modes of OpenNI image generator +enum +{ + CV_CAP_OPENNI_VGA_30HZ = 0, + CV_CAP_OPENNI_SXGA_15HZ = 1, + CV_CAP_OPENNI_SXGA_30HZ = 2, + CV_CAP_OPENNI_QVGA_30HZ = 3, + CV_CAP_OPENNI_QVGA_60HZ = 4 +}; + +enum +{ + CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth. + CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates. + CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam. + CV_CAP_INTELPERC_IMAGE = 3 +}; + +// gPhoto2 properties, if propertyId is less than 0 then work on widget with that __additive inversed__ camera setting ID +// Get IDs by using CAP_PROP_GPHOTO2_WIDGET_ENUMERATE. +// @see CvCaptureCAM_GPHOTO2 for more info +enum +{ + CV_CAP_PROP_GPHOTO2_PREVIEW = 17001, // Capture only preview from liveview mode. + CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE = 17002, // Readonly, returns (const char *). + CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG = 17003, // Trigger, only by set. Reload camera settings. + CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE = 17004, // Reload all settings on set. + CV_CAP_PROP_GPHOTO2_COLLECT_MSGS = 17005, // Collect messages with details. + CV_CAP_PROP_GPHOTO2_FLUSH_MSGS = 17006, // Readonly, returns (const char *). + CV_CAP_PROP_SPEED = 17007, // Exposure speed. Can be readonly, depends on camera program. + CV_CAP_PROP_APERTURE = 17008, // Aperture. Can be readonly, depends on camera program. + CV_CAP_PROP_EXPOSUREPROGRAM = 17009, // Camera exposure program. + CV_CAP_PROP_VIEWFINDER = 17010 // Enter liveview mode. +}; + +/** @brief retrieve capture properties +*/ +CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id ); +/** @brief set capture properties +*/ +CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value ); + +/** @brief Return the type of the capturer (eg, ::CV_CAP_VFW, ::CV_CAP_UNICAP) + +It is unknown if created with ::CV_CAP_ANY +*/ +CVAPI(int) cvGetCaptureDomain( CvCapture* capture); + +/** @brief "black box" video file writer structure + +In C++ use cv::VideoWriter +*/ +typedef struct CvVideoWriter CvVideoWriter; + +//! Macro to construct the fourcc code of the codec. Same as CV_FOURCC() +#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24)) + +/** @brief Constructs the fourcc code of the codec function + +Simply call it with 4 chars fourcc code like `CV_FOURCC('I', 'Y', 'U', 'V')` + +List of codes can be obtained at [Video Codecs by FOURCC](http://www.fourcc.org/codecs.php) page. +FFMPEG backend with MP4 container natively uses other values as fourcc code: +see [ObjectType](http://mp4ra.org/#/codecs). +*/ +CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4) +{ + return CV_FOURCC_MACRO(c1, c2, c3, c4); +} + +//! (Windows only) Open Codec Selection Dialog +#define CV_FOURCC_PROMPT -1 +//! (Linux only) Use default codec for specified filename +#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') + +/** @brief initialize video file writer +*/ +CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, + double fps, CvSize frame_size, + int is_color CV_DEFAULT(1)); + +/** @brief write frame to video file +*/ +CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); + +/** @brief close video file writer +*/ +CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer ); + +// *************************************************************************************** +//! @name Obsolete functions/synonyms +//! @{ +#define cvCaptureFromCAM cvCreateCameraCapture //!< @deprecated use cvCreateCameraCapture() instead +#define cvCaptureFromFile cvCreateFileCapture //!< @deprecated use cvCreateFileCapture() instead +#define cvCaptureFromAVI cvCaptureFromFile //!< @deprecated use cvCreateFileCapture() instead +#define cvCreateAVIWriter cvCreateVideoWriter //!< @deprecated use cvCreateVideoWriter() instead +#define cvWriteToAVI cvWriteFrame //!< @deprecated use cvWriteFrame() instead +//! @} Obsolete... + +//! @} videoio_c + +#ifdef __cplusplus +} +#endif + +#endif //OPENCV_VIDEOIO_H diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab.hpp new file mode 100644 index 00000000..ca3f5ade --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab.hpp @@ -0,0 +1,81 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_HPP +#define OPENCV_VIDEOSTAB_HPP + +/** + @defgroup videostab Video Stabilization + +The video stabilization module contains a set of functions and classes that can be used to solve the +problem of video stabilization. There are a few methods implemented, most of them are described in +the papers @cite OF06 and @cite G11 . However, there are some extensions and deviations from the original +paper methods. + +### References + + 1. "Full-Frame Video Stabilization with Motion Inpainting" + Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum + 2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths" + Matthias Grundmann, Vivek Kwatra, Irfan Essa + + @{ + @defgroup videostab_motion Global Motion Estimation + +The video stabilization module contains a set of functions and classes for global motion estimation +between point clouds or between images. In the last case features are extracted and matched +internally. For the sake of convenience the motion estimation functions are wrapped into classes. +Both the functions and the classes are available. + + @defgroup videostab_marching Fast Marching Method + +The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and +color inpainting. The method is implemented is a flexible way and it's made public for other users. + + @} + +*/ + +#include "opencv2/videostab/stabilizer.hpp" +#include "opencv2/videostab/ring_buffer.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/deblurring.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/deblurring.hpp new file mode 100644 index 00000000..c6656401 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/deblurring.hpp @@ -0,0 +1,116 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_DEBLURRING_HPP +#define OPENCV_VIDEOSTAB_DEBLURRING_HPP + +#include +#include "opencv2/core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +CV_EXPORTS float calcBlurriness(const Mat &frame); + +class CV_EXPORTS DeblurerBase +{ +public: + DeblurerBase() : radius_(0), frames_(0), motions_(0), blurrinessRates_(0) {} + + virtual ~DeblurerBase() {} + + virtual void setRadius(int val) { radius_ = val; } + virtual int radius() const { return radius_; } + + virtual void deblur(int idx, Mat &frame) = 0; + + + // data from stabilizer + + virtual void setFrames(const std::vector &val) { frames_ = &val; } + virtual const std::vector& frames() const { return *frames_; } + + virtual void setMotions(const std::vector &val) { motions_ = &val; } + virtual const std::vector& motions() const { return *motions_; } + + virtual void setBlurrinessRates(const std::vector &val) { blurrinessRates_ = &val; } + virtual const std::vector& blurrinessRates() const { return *blurrinessRates_; } + +protected: + int radius_; + const std::vector *frames_; + const std::vector *motions_; + const std::vector *blurrinessRates_; +}; + +class CV_EXPORTS NullDeblurer : public DeblurerBase +{ +public: + virtual void deblur(int /*idx*/, Mat &/*frame*/) CV_OVERRIDE {} +}; + +class CV_EXPORTS WeightingDeblurer : public DeblurerBase +{ +public: + WeightingDeblurer(); + + void setSensitivity(float val) { sensitivity_ = val; } + float sensitivity() const { return sensitivity_; } + + virtual void deblur(int idx, Mat &frame) CV_OVERRIDE; + +private: + float sensitivity_; + Mat_ bSum_, gSum_, rSum_, wSum_; +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching.hpp new file mode 100644 index 00000000..43f8e4a7 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching.hpp @@ -0,0 +1,121 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_HPP +#define OPENCV_VIDEOSTAB_FAST_MARCHING_HPP + +#include +#include +#include +#include "opencv2/core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab_marching +//! @{ + +/** @brief Describes the Fast Marching Method implementation. + + See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf + */ +class CV_EXPORTS FastMarchingMethod +{ +public: + FastMarchingMethod() : inf_(1e6f), size_(0) {} + + /** @brief Template method that runs the Fast Marching Method. + + @param mask Image mask. 0 value indicates that the pixel value must be inpainted, 255 indicates + that the pixel value is known, other values aren't acceptable. + @param inpaint Inpainting functor that overloads void operator ()(int x, int y). + @return Inpainting functor. + */ + template + Inpaint run(const Mat &mask, Inpaint inpaint); + + /** + @return Distance map that's created during working of the method. + */ + Mat distanceMap() const { return dist_; } + +private: + enum { INSIDE = 0, BAND = 1, KNOWN = 255 }; + + struct DXY + { + float dist; + int x, y; + + DXY() : dist(0), x(0), y(0) {} + DXY(float _dist, int _x, int _y) : dist(_dist), x(_x), y(_y) {} + bool operator <(const DXY &dxy) const { return dist < dxy.dist; } + }; + + float solve(int x1, int y1, int x2, int y2) const; + int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); } + + void heapUp(int idx); + void heapDown(int idx); + void heapAdd(const DXY &dxy); + void heapRemoveMin(); + + float inf_; + + cv::Mat_ flag_; // flag map + cv::Mat_ dist_; // distance map + + cv::Mat_ index_; // index of point in the narrow band + std::vector narrowBand_; // narrow band heap + int size_; // narrow band size +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#include "fast_marching_inl.hpp" + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching_inl.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching_inl.hpp new file mode 100644 index 00000000..fdd488aa --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/fast_marching_inl.hpp @@ -0,0 +1,165 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP +#define OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP + +#include "opencv2/videostab/fast_marching.hpp" + +namespace cv +{ +namespace videostab +{ + +template +Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint) +{ + using namespace cv; + + CV_Assert(mask.type() == CV_8U); + + static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}}; + + mask.copyTo(flag_); + flag_.create(mask.size()); + dist_.create(mask.size()); + index_.create(mask.size()); + narrowBand_.clear(); + size_ = 0; + + // init + for (int y = 0; y < flag_.rows; ++y) + { + for (int x = 0; x < flag_.cols; ++x) + { + if (flag_(y,x) == KNOWN) + dist_(y,x) = 0.f; + else + { + int n = 0; + int nunknown = 0; + + for (int i = 0; i < 4; ++i) + { + int xn = x + lut[i][0]; + int yn = y + lut[i][1]; + + if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows) + { + n++; + if (flag_(yn,xn) != KNOWN) + nunknown++; + } + } + + if (n>0 && nunknown == n) + { + dist_(y,x) = inf_; + flag_(y,x) = INSIDE; + } + else + { + dist_(y,x) = 0.f; + flag_(y,x) = BAND; + inpaint(x, y); + + narrowBand_.push_back(DXY(0.f,x,y)); + index_(y,x) = size_++; + } + } + } + } + + // make heap + for (int i = size_/2-1; i >= 0; --i) + heapDown(i); + + // main cycle + while (size_ > 0) + { + int x = narrowBand_[0].x; + int y = narrowBand_[0].y; + heapRemoveMin(); + + flag_(y,x) = KNOWN; + for (int n = 0; n < 4; ++n) + { + int xn = x + lut[n][0]; + int yn = y + lut[n][1]; + + if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN) + { + dist_(yn,xn) = std::min(std::min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)), + std::min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1))); + + if (flag_(yn,xn) == INSIDE) + { + flag_(yn,xn) = BAND; + inpaint(xn, yn); + heapAdd(DXY(dist_(yn,xn),xn,yn)); + } + else + { + int i = index_(yn,xn); + if (dist_(yn,xn) < narrowBand_[i].dist) + { + narrowBand_[i].dist = dist_(yn,xn); + heapUp(i); + } + // works better if it's commented out + /*else if (dist(yn,xn) > narrowBand[i].dist) + { + narrowBand[i].dist = dist(yn,xn); + heapDown(i); + }*/ + } + } + } + } + + return inpaint; +} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/frame_source.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/frame_source.hpp new file mode 100644 index 00000000..171c637c --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/frame_source.hpp @@ -0,0 +1,94 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP +#define OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP + +#include +#include "opencv2/core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS IFrameSource +{ +public: + virtual ~IFrameSource() {} + virtual void reset() = 0; + virtual Mat nextFrame() = 0; +}; + +class CV_EXPORTS NullFrameSource : public IFrameSource +{ +public: + virtual void reset() CV_OVERRIDE {} + virtual Mat nextFrame() CV_OVERRIDE { return Mat(); } +}; + +class CV_EXPORTS VideoFileSource : public IFrameSource +{ +public: + VideoFileSource(const String &path, bool volatileFrame = false); + + virtual void reset() CV_OVERRIDE; + virtual Mat nextFrame() CV_OVERRIDE; + + int width(); + int height(); + int count(); + double fps(); + +private: + Ptr impl; +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/global_motion.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/global_motion.hpp new file mode 100644 index 00000000..fedca2cf --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/global_motion.hpp @@ -0,0 +1,300 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP +#define OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP + +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/features2d.hpp" +#include "opencv2/opencv_modules.hpp" +#include "opencv2/videostab/optical_flow.hpp" +#include "opencv2/videostab/motion_core.hpp" +#include "opencv2/videostab/outlier_rejection.hpp" + +#ifdef HAVE_OPENCV_CUDAIMGPROC +# include "opencv2/cudaimgproc.hpp" +#endif + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab_motion +//! @{ + +/** @brief Estimates best global motion between two 2D point clouds in the least-squares sense. + +@note Works in-place and changes input point arrays. + +@param points0 Source set of 2D points (32F). +@param points1 Destination set of 2D points (32F). +@param model Motion model (up to MM_AFFINE). +@param rmse Final root-mean-square error. +@return 3x3 2D transformation matrix (32F). + */ +CV_EXPORTS Mat estimateGlobalMotionLeastSquares( + InputOutputArray points0, InputOutputArray points1, int model = MM_AFFINE, + float *rmse = 0); + +/** @brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method). + +@param points0 Source set of 2D points (32F). +@param points1 Destination set of 2D points (32F). +@param model Motion model. See cv::videostab::MotionModel. +@param params RANSAC method parameters. See videostab::RansacParams. +@param rmse Final root-mean-square error. +@param ninliers Final number of inliers. + */ +CV_EXPORTS Mat estimateGlobalMotionRansac( + InputArray points0, InputArray points1, int model = MM_AFFINE, + const RansacParams ¶ms = RansacParams::default2dMotion(MM_AFFINE), + float *rmse = 0, int *ninliers = 0); + +/** @brief Base class for all global motion estimation methods. + */ +class CV_EXPORTS MotionEstimatorBase +{ +public: + virtual ~MotionEstimatorBase() {} + + /** @brief Sets motion model. + + @param val Motion model. See cv::videostab::MotionModel. + */ + virtual void setMotionModel(MotionModel val) { motionModel_ = val; } + + /** + @return Motion model. See cv::videostab::MotionModel. + */ + virtual MotionModel motionModel() const { return motionModel_; } + + /** @brief Estimates global motion between two 2D point clouds. + + @param points0 Source set of 2D points (32F). + @param points1 Destination set of 2D points (32F). + @param ok Indicates whether motion was estimated successfully. + @return 3x3 2D transformation matrix (32F). + */ + virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0; + +protected: + MotionEstimatorBase(MotionModel model) { setMotionModel(model); } + +private: + MotionModel motionModel_; +}; + +/** @brief Describes a robust RANSAC-based global 2D motion estimation method which minimizes L2 error. + */ +class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase +{ +public: + MotionEstimatorRansacL2(MotionModel model = MM_AFFINE); + + void setRansacParams(const RansacParams &val) { ransacParams_ = val; } + RansacParams ransacParams() const { return ransacParams_; } + + void setMinInlierRatio(float val) { minInlierRatio_ = val; } + float minInlierRatio() const { return minInlierRatio_; } + + virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE; + +private: + RansacParams ransacParams_; + float minInlierRatio_; +}; + +/** @brief Describes a global 2D motion estimation method which minimizes L1 error. + +@note To be able to use this method you must build OpenCV with CLP library support. : + */ +class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase +{ +public: + MotionEstimatorL1(MotionModel model = MM_AFFINE); + + virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE; + +private: + std::vector obj_, collb_, colub_; + std::vector elems_, rowlb_, rowub_; + std::vector rows_, cols_; + + void set(int row, int col, double coef) + { + rows_.push_back(row); + cols_.push_back(col); + elems_.push_back(coef); + } +}; + +/** @brief Base class for global 2D motion estimation methods which take frames as input. + */ +class CV_EXPORTS ImageMotionEstimatorBase +{ +public: + virtual ~ImageMotionEstimatorBase() {} + + virtual void setMotionModel(MotionModel val) { motionModel_ = val; } + virtual MotionModel motionModel() const { return motionModel_; } + + virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0; + +protected: + ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); } + +private: + MotionModel motionModel_; +}; + +class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase +{ +public: + FromFileMotionReader(const String &path); + + virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE; + +private: + std::ifstream file_; +}; + +class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase +{ +public: + ToFileMotionWriter(const String &path, Ptr estimator); + + virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); } + virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); } + + virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE; + +private: + std::ofstream file_; + Ptr motionEstimator_; +}; + +/** @brief Describes a global 2D motion estimation method which uses keypoints detection and optical flow for +matching. + */ +class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase +{ +public: + KeypointBasedMotionEstimator(Ptr estimator); + + virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); } + virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); } + + void setDetector(Ptr val) { detector_ = val; } + Ptr detector() const { return detector_; } + + void setOpticalFlowEstimator(Ptr val) { optFlowEstimator_ = val; } + Ptr opticalFlowEstimator() const { return optFlowEstimator_; } + + void setOutlierRejector(Ptr val) { outlierRejector_ = val; } + Ptr outlierRejector() const { return outlierRejector_; } + + virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE; + Mat estimate(InputArray frame0, InputArray frame1, bool *ok = 0); + +private: + Ptr motionEstimator_; + Ptr detector_; + Ptr optFlowEstimator_; + Ptr outlierRejector_; + + std::vector status_; + std::vector keypointsPrev_; + std::vector pointsPrev_, points_; + std::vector pointsPrevGood_, pointsGood_; +}; + +#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) + +class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase +{ +public: + KeypointBasedMotionEstimatorGpu(Ptr estimator); + + virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); } + virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); } + + void setOutlierRejector(Ptr val) { outlierRejector_ = val; } + Ptr outlierRejector() const { return outlierRejector_; } + + virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE; + Mat estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok = 0); + +private: + Ptr motionEstimator_; + Ptr detector_; + SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_; + Ptr outlierRejector_; + + cuda::GpuMat frame0_, grayFrame0_, frame1_; + cuda::GpuMat pointsPrev_, points_; + cuda::GpuMat status_; + + Mat hostPointsPrev_, hostPoints_; + std::vector hostPointsPrevTmp_, hostPointsTmp_; + std::vector rejectionStatus_; +}; + +#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW) + +/** @brief Computes motion between two frames assuming that all the intermediate motions are known. + +@param from Source frame index. +@param to Destination frame index. +@param motions Pair-wise motions. motions[i] denotes motion from the frame i to the frame i+1 +@return Motion from the Source frame to the Destination frame. + */ +CV_EXPORTS Mat getMotion(int from, int to, const std::vector &motions); + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/inpainting.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/inpainting.hpp new file mode 100644 index 00000000..9c123f02 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/inpainting.hpp @@ -0,0 +1,212 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_INPAINTINT_HPP +#define OPENCV_VIDEOSTAB_INPAINTINT_HPP + +#include +#include "opencv2/core.hpp" +#include "opencv2/videostab/optical_flow.hpp" +#include "opencv2/videostab/fast_marching.hpp" +#include "opencv2/videostab/global_motion.hpp" +#include "opencv2/photo.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS InpainterBase +{ +public: + InpainterBase() + : radius_(0), motionModel_(MM_UNKNOWN), frames_(0), motions_(0), + stabilizedFrames_(0), stabilizationMotions_(0) {} + + virtual ~InpainterBase() {} + + virtual void setRadius(int val) { radius_ = val; } + virtual int radius() const { return radius_; } + + virtual void setMotionModel(MotionModel val) { motionModel_ = val; } + virtual MotionModel motionModel() const { return motionModel_; } + + virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0; + + + // data from stabilizer + + virtual void setFrames(const std::vector &val) { frames_ = &val; } + virtual const std::vector& frames() const { return *frames_; } + + virtual void setMotions(const std::vector &val) { motions_ = &val; } + virtual const std::vector& motions() const { return *motions_; } + + virtual void setStabilizedFrames(const std::vector &val) { stabilizedFrames_ = &val; } + virtual const std::vector& stabilizedFrames() const { return *stabilizedFrames_; } + + virtual void setStabilizationMotions(const std::vector &val) { stabilizationMotions_ = &val; } + virtual const std::vector& stabilizationMotions() const { return *stabilizationMotions_; } + +protected: + int radius_; + MotionModel motionModel_; + const std::vector *frames_; + const std::vector *motions_; + const std::vector *stabilizedFrames_; + const std::vector *stabilizationMotions_; +}; + +class CV_EXPORTS NullInpainter : public InpainterBase +{ +public: + virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) CV_OVERRIDE {} +}; + +class CV_EXPORTS InpaintingPipeline : public InpainterBase +{ +public: + void pushBack(Ptr inpainter) { inpainters_.push_back(inpainter); } + bool empty() const { return inpainters_.empty(); } + + virtual void setRadius(int val) CV_OVERRIDE; + virtual void setMotionModel(MotionModel val) CV_OVERRIDE; + virtual void setFrames(const std::vector &val) CV_OVERRIDE; + virtual void setMotions(const std::vector &val) CV_OVERRIDE; + virtual void setStabilizedFrames(const std::vector &val) CV_OVERRIDE; + virtual void setStabilizationMotions(const std::vector &val) CV_OVERRIDE; + + virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE; + +private: + std::vector > inpainters_; +}; + +class CV_EXPORTS ConsistentMosaicInpainter : public InpainterBase +{ +public: + ConsistentMosaicInpainter(); + + void setStdevThresh(float val) { stdevThresh_ = val; } + float stdevThresh() const { return stdevThresh_; } + + virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE; + +private: + float stdevThresh_; +}; + +class CV_EXPORTS MotionInpainter : public InpainterBase +{ +public: + MotionInpainter(); + + void setOptFlowEstimator(Ptr val) { optFlowEstimator_ = val; } + Ptr optFlowEstimator() const { return optFlowEstimator_; } + + void setFlowErrorThreshold(float val) { flowErrorThreshold_ = val; } + float flowErrorThreshold() const { return flowErrorThreshold_; } + + void setDistThreshold(float val) { distThresh_ = val; } + float distThresh() const { return distThresh_; } + + void setBorderMode(int val) { borderMode_ = val; } + int borderMode() const { return borderMode_; } + + virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE; + +private: + FastMarchingMethod fmm_; + Ptr optFlowEstimator_; + float flowErrorThreshold_; + float distThresh_; + int borderMode_; + + Mat frame1_, transformedFrame1_; + Mat_ grayFrame_, transformedGrayFrame1_; + Mat_ mask1_, transformedMask1_; + Mat_ flowX_, flowY_, flowErrors_; + Mat_ flowMask_; +}; + +class CV_EXPORTS ColorAverageInpainter : public InpainterBase +{ +public: + virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE; + +private: + FastMarchingMethod fmm_; +}; + +class CV_EXPORTS ColorInpainter : public InpainterBase +{ +public: + ColorInpainter(int method = INPAINT_TELEA, double radius = 2.); + + virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE; + +private: + int method_; + double radius_; + Mat invMask_; +}; + +inline ColorInpainter::ColorInpainter(int _method, double _radius) + : method_(_method), radius_(_radius) {} + +CV_EXPORTS void calcFlowMask( + const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError, + const Mat &mask0, const Mat &mask1, Mat &flowMask); + +CV_EXPORTS void completeFrameAccordingToFlow( + const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1, + float distThresh, Mat& frame0, Mat &mask0); + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/log.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/log.hpp new file mode 100644 index 00000000..73e70499 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/log.hpp @@ -0,0 +1,80 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_LOG_HPP +#define OPENCV_VIDEOSTAB_LOG_HPP + +#include "opencv2/core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS ILog +{ +public: + virtual ~ILog() {} + virtual void print(const char *format, ...) = 0; +}; + +class CV_EXPORTS NullLog : public ILog +{ +public: + virtual void print(const char * /*format*/, ...) CV_OVERRIDE {} +}; + +class CV_EXPORTS LogToStdout : public ILog +{ +public: + virtual void print(const char *format, ...) CV_OVERRIDE; +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_core.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_core.hpp new file mode 100644 index 00000000..4525cc7b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_core.hpp @@ -0,0 +1,129 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_MOTION_CORE_HPP +#define OPENCV_VIDEOSTAB_MOTION_CORE_HPP + +#include +#include "opencv2/core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab_motion +//! @{ + +/** @brief Describes motion model between two point clouds. + */ +enum MotionModel +{ + MM_TRANSLATION = 0, + MM_TRANSLATION_AND_SCALE = 1, + MM_ROTATION = 2, + MM_RIGID = 3, + MM_SIMILARITY = 4, + MM_AFFINE = 5, + MM_HOMOGRAPHY = 6, + MM_UNKNOWN = 7 +}; + +/** @brief Describes RANSAC method parameters. + */ +struct CV_EXPORTS RansacParams +{ + int size; //!< subset size + float thresh; //!< max error to classify as inlier + float eps; //!< max outliers ratio + float prob; //!< probability of success + + RansacParams() : size(0), thresh(0), eps(0), prob(0) {} + /** @brief Constructor + @param size Subset size. + @param thresh Maximum re-projection error value to classify as inlier. + @param eps Maximum ratio of incorrect correspondences. + @param prob Required success probability. + */ + RansacParams(int size, float thresh, float eps, float prob); + + /** + @return Number of iterations that'll be performed by RANSAC method. + */ + int niters() const + { + return static_cast( + std::ceil(std::log(1 - prob) / std::log(1 - std::pow(1 - eps, size)))); + } + + /** + @param model Motion model. See cv::videostab::MotionModel. + @return Default RANSAC method parameters for the given motion model. + */ + static RansacParams default2dMotion(MotionModel model) + { + CV_Assert(model < MM_UNKNOWN); + if (model == MM_TRANSLATION) + return RansacParams(1, 0.5f, 0.5f, 0.99f); + if (model == MM_TRANSLATION_AND_SCALE) + return RansacParams(2, 0.5f, 0.5f, 0.99f); + if (model == MM_ROTATION) + return RansacParams(1, 0.5f, 0.5f, 0.99f); + if (model == MM_RIGID) + return RansacParams(2, 0.5f, 0.5f, 0.99f); + if (model == MM_SIMILARITY) + return RansacParams(2, 0.5f, 0.5f, 0.99f); + if (model == MM_AFFINE) + return RansacParams(3, 0.5f, 0.5f, 0.99f); + return RansacParams(4, 0.5f, 0.5f, 0.99f); + } +}; + +inline RansacParams::RansacParams(int _size, float _thresh, float _eps, float _prob) + : size(_size), thresh(_thresh), eps(_eps), prob(_prob) {} + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_stabilizing.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_stabilizing.hpp new file mode 100644 index 00000000..c50095bb --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/motion_stabilizing.hpp @@ -0,0 +1,174 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP +#define OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP + +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/videostab/global_motion.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab_motion +//! @{ + +class CV_EXPORTS IMotionStabilizer +{ +public: + virtual ~IMotionStabilizer() {} + + //! assumes that [0, size-1) is in or equals to [range.first, range.second) + virtual void stabilize( + int size, const std::vector &motions, std::pair range, + Mat *stabilizationMotions) = 0; +}; + +class CV_EXPORTS MotionStabilizationPipeline : public IMotionStabilizer +{ +public: + void pushBack(Ptr stabilizer) { stabilizers_.push_back(stabilizer); } + bool empty() const { return stabilizers_.empty(); } + + virtual void stabilize( + int size, const std::vector &motions, std::pair range, + Mat *stabilizationMotions) CV_OVERRIDE; + +private: + std::vector > stabilizers_; +}; + +class CV_EXPORTS MotionFilterBase : public IMotionStabilizer +{ +public: + virtual ~MotionFilterBase() {} + + virtual Mat stabilize( + int idx, const std::vector &motions, std::pair range) = 0; + + virtual void stabilize( + int size, const std::vector &motions, std::pair range, + Mat *stabilizationMotions) CV_OVERRIDE; +}; + +class CV_EXPORTS GaussianMotionFilter : public MotionFilterBase +{ +public: + GaussianMotionFilter(int radius = 15, float stdev = -1.f); + + void setParams(int radius, float stdev = -1.f); + int radius() const { return radius_; } + float stdev() const { return stdev_; } + + virtual Mat stabilize( + int idx, const std::vector &motions, std::pair range) CV_OVERRIDE; + +private: + int radius_; + float stdev_; + std::vector weight_; +}; + +inline GaussianMotionFilter::GaussianMotionFilter(int _radius, float _stdev) { setParams(_radius, _stdev); } + +class CV_EXPORTS LpMotionStabilizer : public IMotionStabilizer +{ +public: + LpMotionStabilizer(MotionModel model = MM_SIMILARITY); + + void setMotionModel(MotionModel val) { model_ = val; } + MotionModel motionModel() const { return model_; } + + void setFrameSize(Size val) { frameSize_ = val; } + Size frameSize() const { return frameSize_; } + + void setTrimRatio(float val) { trimRatio_ = val; } + float trimRatio() const { return trimRatio_; } + + void setWeight1(float val) { w1_ = val; } + float weight1() const { return w1_; } + + void setWeight2(float val) { w2_ = val; } + float weight2() const { return w2_; } + + void setWeight3(float val) { w3_ = val; } + float weight3() const { return w3_; } + + void setWeight4(float val) { w4_ = val; } + float weight4() const { return w4_; } + + virtual void stabilize( + int size, const std::vector &motions, std::pair range, + Mat *stabilizationMotions) CV_OVERRIDE; + +private: + MotionModel model_; + Size frameSize_; + float trimRatio_; + float w1_, w2_, w3_, w4_; + + std::vector obj_, collb_, colub_; + std::vector rows_, cols_; + std::vector elems_, rowlb_, rowub_; + + void set(int row, int col, double coef) + { + rows_.push_back(row); + cols_.push_back(col); + elems_.push_back(coef); + } +}; + +CV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio); + +CV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size); + +//! @} + +} // namespace videostab +} // namespace + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/optical_flow.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/optical_flow.hpp new file mode 100644 index 00000000..5e06941d --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/optical_flow.hpp @@ -0,0 +1,150 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP +#define OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP + +#include "opencv2/core.hpp" +#include "opencv2/opencv_modules.hpp" + +#ifdef HAVE_OPENCV_CUDAOPTFLOW + #include "opencv2/cudaoptflow.hpp" +#endif + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS ISparseOptFlowEstimator +{ +public: + virtual ~ISparseOptFlowEstimator() {} + virtual void run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors) = 0; +}; + +class CV_EXPORTS IDenseOptFlowEstimator +{ +public: + virtual ~IDenseOptFlowEstimator() {} + virtual void run( + InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, + OutputArray errors) = 0; +}; + +class CV_EXPORTS PyrLkOptFlowEstimatorBase +{ +public: + PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); } + + virtual void setWinSize(Size val) { winSize_ = val; } + virtual Size winSize() const { return winSize_; } + + virtual void setMaxLevel(int val) { maxLevel_ = val; } + virtual int maxLevel() const { return maxLevel_; } + virtual ~PyrLkOptFlowEstimatorBase() {} + +protected: + Size winSize_; + int maxLevel_; +}; + +class CV_EXPORTS SparsePyrLkOptFlowEstimator + : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator +{ +public: + virtual void run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors) CV_OVERRIDE; +}; + +#ifdef HAVE_OPENCV_CUDAOPTFLOW + +class CV_EXPORTS SparsePyrLkOptFlowEstimatorGpu + : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator +{ +public: + SparsePyrLkOptFlowEstimatorGpu(); + + virtual void run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors) CV_OVERRIDE; + + void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, + cuda::GpuMat &status, cuda::GpuMat &errors); + + void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1, + cuda::GpuMat &status); + +private: + Ptr optFlowEstimator_; + cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_; +}; + +class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu + : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator +{ +public: + DensePyrLkOptFlowEstimatorGpu(); + + virtual void run( + InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, + OutputArray errors) CV_OVERRIDE; + +private: + Ptr optFlowEstimator_; + cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; +}; + +#endif + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/outlier_rejection.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/outlier_rejection.hpp new file mode 100644 index 00000000..1d29896b --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/outlier_rejection.hpp @@ -0,0 +1,101 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP +#define OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP + +#include +#include "opencv2/core.hpp" +#include "opencv2/videostab/motion_core.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS IOutlierRejector +{ +public: + virtual ~IOutlierRejector() {} + + virtual void process( + Size frameSize, InputArray points0, InputArray points1, OutputArray mask) = 0; +}; + +class CV_EXPORTS NullOutlierRejector : public IOutlierRejector +{ +public: + virtual void process( + Size frameSize, InputArray points0, InputArray points1, OutputArray mask) CV_OVERRIDE; +}; + +class CV_EXPORTS TranslationBasedLocalOutlierRejector : public IOutlierRejector +{ +public: + TranslationBasedLocalOutlierRejector(); + + void setCellSize(Size val) { cellSize_ = val; } + Size cellSize() const { return cellSize_; } + + void setRansacParams(RansacParams val) { ransacParams_ = val; } + RansacParams ransacParams() const { return ransacParams_; } + + virtual void process( + Size frameSize, InputArray points0, InputArray points1, OutputArray mask) CV_OVERRIDE; + +private: + Size cellSize_; + RansacParams ransacParams_; + + typedef std::vector Cell; + std::vector grid_; +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/ring_buffer.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/ring_buffer.hpp new file mode 100644 index 00000000..55d52444 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/ring_buffer.hpp @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_RING_BUFFER_HPP +#define OPENCV_VIDEOSTAB_RING_BUFFER_HPP + +#include +#include "opencv2/imgproc.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +template inline T& at(int idx, std::vector &items) +{ + return items[cv::borderInterpolate(idx, static_cast(items.size()), cv::BORDER_WRAP)]; +} + +template inline const T& at(int idx, const std::vector &items) +{ + return items[cv::borderInterpolate(idx, static_cast(items.size()), cv::BORDER_WRAP)]; +} + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/stabilizer.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/stabilizer.hpp new file mode 100644 index 00000000..634a0aa9 --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/stabilizer.hpp @@ -0,0 +1,200 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_STABILIZER_HPP +#define OPENCV_VIDEOSTAB_STABILIZER_HPP + +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videostab/global_motion.hpp" +#include "opencv2/videostab/motion_stabilizing.hpp" +#include "opencv2/videostab/frame_source.hpp" +#include "opencv2/videostab/log.hpp" +#include "opencv2/videostab/inpainting.hpp" +#include "opencv2/videostab/deblurring.hpp" +#include "opencv2/videostab/wobble_suppression.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS StabilizerBase +{ +public: + virtual ~StabilizerBase() {} + + void setLog(Ptr ilog) { log_ = ilog; } + Ptr log() const { return log_; } + + void setRadius(int val) { radius_ = val; } + int radius() const { return radius_; } + + void setFrameSource(Ptr val) { frameSource_ = val; } + Ptr frameSource() const { return frameSource_; } + + void setMotionEstimator(Ptr val) { motionEstimator_ = val; } + Ptr motionEstimator() const { return motionEstimator_; } + + void setDeblurer(Ptr val) { deblurer_ = val; } + Ptr deblurrer() const { return deblurer_; } + + void setTrimRatio(float val) { trimRatio_ = val; } + float trimRatio() const { return trimRatio_; } + + void setCorrectionForInclusion(bool val) { doCorrectionForInclusion_ = val; } + bool doCorrectionForInclusion() const { return doCorrectionForInclusion_; } + + void setBorderMode(int val) { borderMode_ = val; } + int borderMode() const { return borderMode_; } + + void setInpainter(Ptr val) { inpainter_ = val; } + Ptr inpainter() const { return inpainter_; } + +protected: + StabilizerBase(); + + void reset(); + Mat nextStabilizedFrame(); + bool doOneIteration(); + virtual void setUp(const Mat &firstFrame); + virtual Mat estimateMotion() = 0; + virtual Mat estimateStabilizationMotion() = 0; + void stabilizeFrame(); + virtual Mat postProcessFrame(const Mat &frame); + void logProcessingTime(); + + Ptr log_; + Ptr frameSource_; + Ptr motionEstimator_; + Ptr deblurer_; + Ptr inpainter_; + int radius_; + float trimRatio_; + bool doCorrectionForInclusion_; + int borderMode_; + + Size frameSize_; + Mat frameMask_; + int curPos_; + int curStabilizedPos_; + bool doDeblurring_; + Mat preProcessedFrame_; + bool doInpainting_; + Mat inpaintingMask_; + Mat finalFrame_; + std::vector frames_; + std::vector motions_; // motions_[i] is the motion from i-th to i+1-th frame + std::vector blurrinessRates_; + std::vector stabilizedFrames_; + std::vector stabilizedMasks_; + std::vector stabilizationMotions_; + clock_t processingStartTime_; +}; + +class CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource +{ +public: + OnePassStabilizer(); + + void setMotionFilter(Ptr val) { motionFilter_ = val; } + Ptr motionFilter() const { return motionFilter_; } + + virtual void reset() CV_OVERRIDE; + virtual Mat nextFrame() CV_OVERRIDE { return nextStabilizedFrame(); } + +protected: + virtual void setUp(const Mat &firstFrame) CV_OVERRIDE; + virtual Mat estimateMotion() CV_OVERRIDE; + virtual Mat estimateStabilizationMotion() CV_OVERRIDE; + virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE; + + Ptr motionFilter_; +}; + +class CV_EXPORTS TwoPassStabilizer : public StabilizerBase, public IFrameSource +{ +public: + TwoPassStabilizer(); + + void setMotionStabilizer(Ptr val) { motionStabilizer_ = val; } + Ptr motionStabilizer() const { return motionStabilizer_; } + + void setWobbleSuppressor(Ptr val) { wobbleSuppressor_ = val; } + Ptr wobbleSuppressor() const { return wobbleSuppressor_; } + + void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; } + bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; } + + virtual void reset() CV_OVERRIDE; + virtual Mat nextFrame() CV_OVERRIDE; + +protected: + void runPrePassIfNecessary(); + + virtual void setUp(const Mat &firstFrame) CV_OVERRIDE; + virtual Mat estimateMotion() CV_OVERRIDE; + virtual Mat estimateStabilizationMotion() CV_OVERRIDE; + virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE; + + Ptr motionStabilizer_; + Ptr wobbleSuppressor_; + bool mustEstTrimRatio_; + + int frameCount_; + bool isPrePassDone_; + bool doWobbleSuppression_; + std::vector motions2_; + Mat suppressedFrame_; +}; + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/include/opencv2/videostab/wobble_suppression.hpp b/third_party/opencv/uos/sw_64/include/opencv2/videostab/wobble_suppression.hpp new file mode 100644 index 00000000..d60ae6dd --- /dev/null +++ b/third_party/opencv/uos/sw_64/include/opencv2/videostab/wobble_suppression.hpp @@ -0,0 +1,140 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP +#define OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP + +#include +#include "opencv2/core.hpp" +#include "opencv2/core/cuda.hpp" +#include "opencv2/videostab/global_motion.hpp" +#include "opencv2/videostab/log.hpp" + +namespace cv +{ +namespace videostab +{ + +//! @addtogroup videostab +//! @{ + +class CV_EXPORTS WobbleSuppressorBase +{ +public: + WobbleSuppressorBase(); + + virtual ~WobbleSuppressorBase() {} + + void setMotionEstimator(Ptr val) { motionEstimator_ = val; } + Ptr motionEstimator() const { return motionEstimator_; } + + virtual void suppress(int idx, const Mat &frame, Mat &result) = 0; + + + // data from stabilizer + + virtual void setFrameCount(int val) { frameCount_ = val; } + virtual int frameCount() const { return frameCount_; } + + virtual void setMotions(const std::vector &val) { motions_ = &val; } + virtual const std::vector& motions() const { return *motions_; } + + virtual void setMotions2(const std::vector &val) { motions2_ = &val; } + virtual const std::vector& motions2() const { return *motions2_; } + + virtual void setStabilizationMotions(const std::vector &val) { stabilizationMotions_ = &val; } + virtual const std::vector& stabilizationMotions() const { return *stabilizationMotions_; } + +protected: + Ptr motionEstimator_; + int frameCount_; + const std::vector *motions_; + const std::vector *motions2_; + const std::vector *stabilizationMotions_; +}; + +class CV_EXPORTS NullWobbleSuppressor : public WobbleSuppressorBase +{ +public: + virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE; +}; + +class CV_EXPORTS MoreAccurateMotionWobbleSuppressorBase : public WobbleSuppressorBase +{ +public: + virtual void setPeriod(int val) { period_ = val; } + virtual int period() const { return period_; } + +protected: + MoreAccurateMotionWobbleSuppressorBase() { setPeriod(30); } + + int period_; +}; + +class CV_EXPORTS MoreAccurateMotionWobbleSuppressor : public MoreAccurateMotionWobbleSuppressorBase +{ +public: + virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE; + +private: + Mat_ mapx_, mapy_; +}; + +#if defined(HAVE_OPENCV_CUDAWARPING) +class CV_EXPORTS MoreAccurateMotionWobbleSuppressorGpu : public MoreAccurateMotionWobbleSuppressorBase +{ +public: + void suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result); + virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE; + +private: + cuda::GpuMat frameDevice_, resultDevice_; + cuda::GpuMat mapx_, mapy_; +}; +#endif + +//! @} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/third_party/opencv/uos/sw_64/lib/libIlmImf.a b/third_party/opencv/uos/sw_64/lib/libIlmImf.a new file mode 100644 index 00000000..e5f6fd0a Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libIlmImf.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibjasper.a b/third_party/opencv/uos/sw_64/lib/liblibjasper.a new file mode 100644 index 00000000..323fb8c9 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibjasper.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibjpeg-turbo.a b/third_party/opencv/uos/sw_64/lib/liblibjpeg-turbo.a new file mode 100644 index 00000000..3158fdfe Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibjpeg-turbo.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibpng.a b/third_party/opencv/uos/sw_64/lib/liblibpng.a new file mode 100644 index 00000000..cb80aaa0 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibpng.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibprotobuf.a b/third_party/opencv/uos/sw_64/lib/liblibprotobuf.a new file mode 100644 index 00000000..839f7e8c Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibprotobuf.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibtiff.a b/third_party/opencv/uos/sw_64/lib/liblibtiff.a new file mode 100644 index 00000000..ed47a780 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibtiff.a differ diff --git a/third_party/opencv/uos/sw_64/lib/liblibwebp.a b/third_party/opencv/uos/sw_64/lib/liblibwebp.a new file mode 100644 index 00000000..c91e123d Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/liblibwebp.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_calib3d.a b/third_party/opencv/uos/sw_64/lib/libopencv_calib3d.a new file mode 100644 index 00000000..02659ad8 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_calib3d.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_core.a b/third_party/opencv/uos/sw_64/lib/libopencv_core.a new file mode 100644 index 00000000..ad238c58 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_core.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_dnn.a b/third_party/opencv/uos/sw_64/lib/libopencv_dnn.a new file mode 100644 index 00000000..4b77f56b Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_dnn.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_features2d.a b/third_party/opencv/uos/sw_64/lib/libopencv_features2d.a new file mode 100644 index 00000000..f3ffb630 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_features2d.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_flann.a b/third_party/opencv/uos/sw_64/lib/libopencv_flann.a new file mode 100644 index 00000000..092de73e Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_flann.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_highgui.a b/third_party/opencv/uos/sw_64/lib/libopencv_highgui.a new file mode 100644 index 00000000..ba40082b Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_highgui.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_imgcodecs.a b/third_party/opencv/uos/sw_64/lib/libopencv_imgcodecs.a new file mode 100644 index 00000000..bf8d5627 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_imgcodecs.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_imgproc.a b/third_party/opencv/uos/sw_64/lib/libopencv_imgproc.a new file mode 100644 index 00000000..bed11cb3 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_imgproc.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_ml.a b/third_party/opencv/uos/sw_64/lib/libopencv_ml.a new file mode 100644 index 00000000..317022f0 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_ml.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_objdetect.a b/third_party/opencv/uos/sw_64/lib/libopencv_objdetect.a new file mode 100644 index 00000000..7a8e41ca Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_objdetect.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_photo.a b/third_party/opencv/uos/sw_64/lib/libopencv_photo.a new file mode 100644 index 00000000..8d5d2d86 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_photo.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_shape.a b/third_party/opencv/uos/sw_64/lib/libopencv_shape.a new file mode 100644 index 00000000..88ab1894 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_shape.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_stitching.a b/third_party/opencv/uos/sw_64/lib/libopencv_stitching.a new file mode 100644 index 00000000..03031bc4 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_stitching.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_superres.a b/third_party/opencv/uos/sw_64/lib/libopencv_superres.a new file mode 100644 index 00000000..b0222411 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_superres.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_video.a b/third_party/opencv/uos/sw_64/lib/libopencv_video.a new file mode 100644 index 00000000..a579e825 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_video.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_videoio.a b/third_party/opencv/uos/sw_64/lib/libopencv_videoio.a new file mode 100644 index 00000000..5e94588a Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_videoio.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libopencv_videostab.a b/third_party/opencv/uos/sw_64/lib/libopencv_videostab.a new file mode 100644 index 00000000..e808d763 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libopencv_videostab.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libquirc.a b/third_party/opencv/uos/sw_64/lib/libquirc.a new file mode 100644 index 00000000..6b0b91a3 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libquirc.a differ diff --git a/third_party/opencv/uos/sw_64/lib/libzlib.a b/third_party/opencv/uos/sw_64/lib/libzlib.a new file mode 100644 index 00000000..2f1e6437 Binary files /dev/null and b/third_party/opencv/uos/sw_64/lib/libzlib.a differ diff --git a/third_party/opencv/uos/sw_64/lib/pkgconfig/opencv.pc b/third_party/opencv/uos/sw_64/lib/pkgconfig/opencv.pc new file mode 100644 index 00000000..57493d6d --- /dev/null +++ b/third_party/opencv/uos/sw_64/lib/pkgconfig/opencv.pc @@ -0,0 +1,14 @@ +# Package Information for pkg-config + +prefix=/home/SW2023/sane/code_app/third_party/opencv/opencv-3.4.16/opencv-3.4.16/build/release +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir_old=${prefix}/include/opencv +includedir_new=${prefix}/include + +Name: OpenCV +Description: Open Source Computer Vision Library +Version: 3.4.16 +Libs: -L${exec_prefix}/lib -lopencv_dnn -lopencv_highgui -lopencv_ml -lopencv_objdetect -lopencv_shape -lopencv_stitching -lopencv_superres -lopencv_videostab -lopencv_calib3d -lopencv_videoio -lopencv_imgcodecs -lopencv_features2d -lopencv_video -lopencv_photo -lopencv_imgproc -lopencv_flann -lopencv_core +Libs.private: -L${exec_prefix}/share/OpenCV/3rdparty/lib -llibprotobuf -llibjpeg-turbo -llibwebp -llibpng -llibtiff -llibjasper -lIlmImf -lzlib -lquirc -ldl -lm -lpthread -lrt +Cflags: -I${includedir_old} -I${includedir_new} diff --git a/third_party/openssl/uos/sw_64/include/openssl/aes.h b/third_party/openssl/uos/sw_64/include/openssl/aes.h new file mode 100644 index 00000000..245c552a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/aes.h @@ -0,0 +1,92 @@ +/* + * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_AES_H +# define HEADER_AES_H + +# include + +# include +# ifdef __cplusplus +extern "C" { +# endif + +# define AES_ENCRYPT 1 +# define AES_DECRYPT 0 + +/* + * Because array size can't be a const in C, the following two are macros. + * Both sizes are in bytes. + */ +# define AES_MAXNR 14 +# define AES_BLOCK_SIZE 16 + +/* This should be a hidden type, but EVP requires that the size be known */ +struct aes_key_st { +# ifdef AES_LONG + unsigned long rd_key[4 * (AES_MAXNR + 1)]; +# else + unsigned int rd_key[4 * (AES_MAXNR + 1)]; +# endif + int rounds; +}; +typedef struct aes_key_st AES_KEY; + +const char *AES_options(void); + +int AES_set_encrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); +int AES_set_decrypt_key(const unsigned char *userKey, const int bits, + AES_KEY *key); + +void AES_encrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); +void AES_decrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key); + +void AES_ecb_encrypt(const unsigned char *in, unsigned char *out, + const AES_KEY *key, const int enc); +void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, const int enc); +void AES_cfb128_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, int *num, const int enc); +void AES_cfb1_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, int *num, const int enc); +void AES_cfb8_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, int *num, const int enc); +void AES_ofb128_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, int *num); +/* NB: the IV is _two_ blocks long */ +void AES_ige_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + unsigned char *ivec, const int enc); +/* NB: the IV is _four_ blocks long */ +void AES_bi_ige_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const AES_KEY *key, + const AES_KEY *key2, const unsigned char *ivec, + const int enc); + +int AES_wrap_key(AES_KEY *key, const unsigned char *iv, + unsigned char *out, + const unsigned char *in, unsigned int inlen); +int AES_unwrap_key(AES_KEY *key, const unsigned char *iv, + unsigned char *out, + const unsigned char *in, unsigned int inlen); + + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/asn1.h b/third_party/openssl/uos/sw_64/include/openssl/asn1.h new file mode 100644 index 00000000..9522eec1 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/asn1.h @@ -0,0 +1,886 @@ +/* + * Copyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ASN1_H +# define HEADER_ASN1_H + +# include +# include +# include +# include +# include +# include +# include + +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif + +# ifdef OPENSSL_BUILD_SHLIBCRYPTO +# undef OPENSSL_EXTERN +# define OPENSSL_EXTERN OPENSSL_EXPORT +# endif + +#ifdef __cplusplus +extern "C" { +#endif + +# define V_ASN1_UNIVERSAL 0x00 +# define V_ASN1_APPLICATION 0x40 +# define V_ASN1_CONTEXT_SPECIFIC 0x80 +# define V_ASN1_PRIVATE 0xc0 + +# define V_ASN1_CONSTRUCTED 0x20 +# define V_ASN1_PRIMITIVE_TAG 0x1f +# define V_ASN1_PRIMATIVE_TAG /*compat*/ V_ASN1_PRIMITIVE_TAG + +# define V_ASN1_APP_CHOOSE -2/* let the recipient choose */ +# define V_ASN1_OTHER -3/* used in ASN1_TYPE */ +# define V_ASN1_ANY -4/* used in ASN1 template code */ + +# define V_ASN1_UNDEF -1 +/* ASN.1 tag values */ +# define V_ASN1_EOC 0 +# define V_ASN1_BOOLEAN 1 /**/ +# define V_ASN1_INTEGER 2 +# define V_ASN1_BIT_STRING 3 +# define V_ASN1_OCTET_STRING 4 +# define V_ASN1_NULL 5 +# define V_ASN1_OBJECT 6 +# define V_ASN1_OBJECT_DESCRIPTOR 7 +# define V_ASN1_EXTERNAL 8 +# define V_ASN1_REAL 9 +# define V_ASN1_ENUMERATED 10 +# define V_ASN1_UTF8STRING 12 +# define V_ASN1_SEQUENCE 16 +# define V_ASN1_SET 17 +# define V_ASN1_NUMERICSTRING 18 /**/ +# define V_ASN1_PRINTABLESTRING 19 +# define V_ASN1_T61STRING 20 +# define V_ASN1_TELETEXSTRING 20/* alias */ +# define V_ASN1_VIDEOTEXSTRING 21 /**/ +# define V_ASN1_IA5STRING 22 +# define V_ASN1_UTCTIME 23 +# define V_ASN1_GENERALIZEDTIME 24 /**/ +# define V_ASN1_GRAPHICSTRING 25 /**/ +# define V_ASN1_ISO64STRING 26 /**/ +# define V_ASN1_VISIBLESTRING 26/* alias */ +# define V_ASN1_GENERALSTRING 27 /**/ +# define V_ASN1_UNIVERSALSTRING 28 /**/ +# define V_ASN1_BMPSTRING 30 + +/* + * NB the constants below are used internally by ASN1_INTEGER + * and ASN1_ENUMERATED to indicate the sign. They are *not* on + * the wire tag values. + */ + +# define V_ASN1_NEG 0x100 +# define V_ASN1_NEG_INTEGER (2 | V_ASN1_NEG) +# define V_ASN1_NEG_ENUMERATED (10 | V_ASN1_NEG) + +/* For use with d2i_ASN1_type_bytes() */ +# define B_ASN1_NUMERICSTRING 0x0001 +# define B_ASN1_PRINTABLESTRING 0x0002 +# define B_ASN1_T61STRING 0x0004 +# define B_ASN1_TELETEXSTRING 0x0004 +# define B_ASN1_VIDEOTEXSTRING 0x0008 +# define B_ASN1_IA5STRING 0x0010 +# define B_ASN1_GRAPHICSTRING 0x0020 +# define B_ASN1_ISO64STRING 0x0040 +# define B_ASN1_VISIBLESTRING 0x0040 +# define B_ASN1_GENERALSTRING 0x0080 +# define B_ASN1_UNIVERSALSTRING 0x0100 +# define B_ASN1_OCTET_STRING 0x0200 +# define B_ASN1_BIT_STRING 0x0400 +# define B_ASN1_BMPSTRING 0x0800 +# define B_ASN1_UNKNOWN 0x1000 +# define B_ASN1_UTF8STRING 0x2000 +# define B_ASN1_UTCTIME 0x4000 +# define B_ASN1_GENERALIZEDTIME 0x8000 +# define B_ASN1_SEQUENCE 0x10000 +/* For use with ASN1_mbstring_copy() */ +# define MBSTRING_FLAG 0x1000 +# define MBSTRING_UTF8 (MBSTRING_FLAG) +# define MBSTRING_ASC (MBSTRING_FLAG|1) +# define MBSTRING_BMP (MBSTRING_FLAG|2) +# define MBSTRING_UNIV (MBSTRING_FLAG|4) +# define SMIME_OLDMIME 0x400 +# define SMIME_CRLFEOL 0x800 +# define SMIME_STREAM 0x1000 + struct X509_algor_st; +DEFINE_STACK_OF(X509_ALGOR) + +# define ASN1_STRING_FLAG_BITS_LEFT 0x08/* Set if 0x07 has bits left value */ +/* + * This indicates that the ASN1_STRING is not a real value but just a place + * holder for the location where indefinite length constructed data should be + * inserted in the memory buffer + */ +# define ASN1_STRING_FLAG_NDEF 0x010 + +/* + * This flag is used by the CMS code to indicate that a string is not + * complete and is a place holder for content when it had all been accessed. + * The flag will be reset when content has been written to it. + */ + +# define ASN1_STRING_FLAG_CONT 0x020 +/* + * This flag is used by ASN1 code to indicate an ASN1_STRING is an MSTRING + * type. + */ +# define ASN1_STRING_FLAG_MSTRING 0x040 +/* String is embedded and only content should be freed */ +# define ASN1_STRING_FLAG_EMBED 0x080 +/* String should be parsed in RFC 5280's time format */ +# define ASN1_STRING_FLAG_X509_TIME 0x100 +/* This is the base type that holds just about everything :-) */ +struct asn1_string_st { + int length; + int type; + unsigned char *data; + /* + * The value of the following field depends on the type being held. It + * is mostly being used for BIT_STRING so if the input data has a + * non-zero 'unused bits' value, it will be handled correctly + */ + long flags; +}; + +/* + * ASN1_ENCODING structure: this is used to save the received encoding of an + * ASN1 type. This is useful to get round problems with invalid encodings + * which can break signatures. + */ + +typedef struct ASN1_ENCODING_st { + unsigned char *enc; /* DER encoding */ + long len; /* Length of encoding */ + int modified; /* set to 1 if 'enc' is invalid */ +} ASN1_ENCODING; + +/* Used with ASN1 LONG type: if a long is set to this it is omitted */ +# define ASN1_LONG_UNDEF 0x7fffffffL + +# define STABLE_FLAGS_MALLOC 0x01 +/* + * A zero passed to ASN1_STRING_TABLE_new_add for the flags is interpreted + * as "don't change" and STABLE_FLAGS_MALLOC is always set. By setting + * STABLE_FLAGS_MALLOC only we can clear the existing value. Use the alias + * STABLE_FLAGS_CLEAR to reflect this. + */ +# define STABLE_FLAGS_CLEAR STABLE_FLAGS_MALLOC +# define STABLE_NO_MASK 0x02 +# define DIRSTRING_TYPE \ + (B_ASN1_PRINTABLESTRING|B_ASN1_T61STRING|B_ASN1_BMPSTRING|B_ASN1_UTF8STRING) +# define PKCS9STRING_TYPE (DIRSTRING_TYPE|B_ASN1_IA5STRING) + +typedef struct asn1_string_table_st { + int nid; + long minsize; + long maxsize; + unsigned long mask; + unsigned long flags; +} ASN1_STRING_TABLE; + +DEFINE_STACK_OF(ASN1_STRING_TABLE) + +/* size limits: this stuff is taken straight from RFC2459 */ + +# define ub_name 32768 +# define ub_common_name 64 +# define ub_locality_name 128 +# define ub_state_name 128 +# define ub_organization_name 64 +# define ub_organization_unit_name 64 +# define ub_title 64 +# define ub_email_address 128 + +/* + * Declarations for template structures: for full definitions see asn1t.h + */ +typedef struct ASN1_TEMPLATE_st ASN1_TEMPLATE; +typedef struct ASN1_TLC_st ASN1_TLC; +/* This is just an opaque pointer */ +typedef struct ASN1_VALUE_st ASN1_VALUE; + +/* Declare ASN1 functions: the implement macro in in asn1t.h */ + +# define DECLARE_ASN1_FUNCTIONS(type) DECLARE_ASN1_FUNCTIONS_name(type, type) + +# define DECLARE_ASN1_ALLOC_FUNCTIONS(type) \ + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, type) + +# define DECLARE_ASN1_FUNCTIONS_name(type, name) \ + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS(type, name, name) + +# define DECLARE_ASN1_FUNCTIONS_fname(type, itname, name) \ + DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) + +# define DECLARE_ASN1_ENCODE_FUNCTIONS(type, itname, name) \ + type *d2i_##name(type **a, const unsigned char **in, long len); \ + int i2d_##name(type *a, unsigned char **out); \ + DECLARE_ASN1_ITEM(itname) + +# define DECLARE_ASN1_ENCODE_FUNCTIONS_const(type, name) \ + type *d2i_##name(type **a, const unsigned char **in, long len); \ + int i2d_##name(const type *a, unsigned char **out); \ + DECLARE_ASN1_ITEM(name) + +# define DECLARE_ASN1_NDEF_FUNCTION(name) \ + int i2d_##name##_NDEF(name *a, unsigned char **out); + +# define DECLARE_ASN1_FUNCTIONS_const(name) \ + DECLARE_ASN1_ALLOC_FUNCTIONS(name) \ + DECLARE_ASN1_ENCODE_FUNCTIONS_const(name, name) + +# define DECLARE_ASN1_ALLOC_FUNCTIONS_name(type, name) \ + type *name##_new(void); \ + void name##_free(type *a); + +# define DECLARE_ASN1_PRINT_FUNCTION(stname) \ + DECLARE_ASN1_PRINT_FUNCTION_fname(stname, stname) + +# define DECLARE_ASN1_PRINT_FUNCTION_fname(stname, fname) \ + int fname##_print_ctx(BIO *out, stname *x, int indent, \ + const ASN1_PCTX *pctx); + +# define D2I_OF(type) type *(*)(type **,const unsigned char **,long) +# define I2D_OF(type) int (*)(type *,unsigned char **) +# define I2D_OF_const(type) int (*)(const type *,unsigned char **) + +# define CHECKED_D2I_OF(type, d2i) \ + ((d2i_of_void*) (1 ? d2i : ((D2I_OF(type))0))) +# define CHECKED_I2D_OF(type, i2d) \ + ((i2d_of_void*) (1 ? i2d : ((I2D_OF(type))0))) +# define CHECKED_NEW_OF(type, xnew) \ + ((void *(*)(void)) (1 ? xnew : ((type *(*)(void))0))) +# define CHECKED_PTR_OF(type, p) \ + ((void*) (1 ? p : (type*)0)) +# define CHECKED_PPTR_OF(type, p) \ + ((void**) (1 ? p : (type**)0)) + +# define TYPEDEF_D2I_OF(type) typedef type *d2i_of_##type(type **,const unsigned char **,long) +# define TYPEDEF_I2D_OF(type) typedef int i2d_of_##type(type *,unsigned char **) +# define TYPEDEF_D2I2D_OF(type) TYPEDEF_D2I_OF(type); TYPEDEF_I2D_OF(type) + +TYPEDEF_D2I2D_OF(void); + +/*- + * The following macros and typedefs allow an ASN1_ITEM + * to be embedded in a structure and referenced. Since + * the ASN1_ITEM pointers need to be globally accessible + * (possibly from shared libraries) they may exist in + * different forms. On platforms that support it the + * ASN1_ITEM structure itself will be globally exported. + * Other platforms will export a function that returns + * an ASN1_ITEM pointer. + * + * To handle both cases transparently the macros below + * should be used instead of hard coding an ASN1_ITEM + * pointer in a structure. + * + * The structure will look like this: + * + * typedef struct SOMETHING_st { + * ... + * ASN1_ITEM_EXP *iptr; + * ... + * } SOMETHING; + * + * It would be initialised as e.g.: + * + * SOMETHING somevar = {...,ASN1_ITEM_ref(X509),...}; + * + * and the actual pointer extracted with: + * + * const ASN1_ITEM *it = ASN1_ITEM_ptr(somevar.iptr); + * + * Finally an ASN1_ITEM pointer can be extracted from an + * appropriate reference with: ASN1_ITEM_rptr(X509). This + * would be used when a function takes an ASN1_ITEM * argument. + * + */ + +# ifndef OPENSSL_EXPORT_VAR_AS_FUNCTION + +/* ASN1_ITEM pointer exported type */ +typedef const ASN1_ITEM ASN1_ITEM_EXP; + +/* Macro to obtain ASN1_ITEM pointer from exported type */ +# define ASN1_ITEM_ptr(iptr) (iptr) + +/* Macro to include ASN1_ITEM pointer from base type */ +# define ASN1_ITEM_ref(iptr) (&(iptr##_it)) + +# define ASN1_ITEM_rptr(ref) (&(ref##_it)) + +# define DECLARE_ASN1_ITEM(name) \ + OPENSSL_EXTERN const ASN1_ITEM name##_it; + +# else + +/* + * Platforms that can't easily handle shared global variables are declared as + * functions returning ASN1_ITEM pointers. + */ + +/* ASN1_ITEM pointer exported type */ +typedef const ASN1_ITEM *ASN1_ITEM_EXP (void); + +/* Macro to obtain ASN1_ITEM pointer from exported type */ +# define ASN1_ITEM_ptr(iptr) (iptr()) + +/* Macro to include ASN1_ITEM pointer from base type */ +# define ASN1_ITEM_ref(iptr) (iptr##_it) + +# define ASN1_ITEM_rptr(ref) (ref##_it()) + +# define DECLARE_ASN1_ITEM(name) \ + const ASN1_ITEM * name##_it(void); + +# endif + +/* Parameters used by ASN1_STRING_print_ex() */ + +/* + * These determine which characters to escape: RFC2253 special characters, + * control characters and MSB set characters + */ + +# define ASN1_STRFLGS_ESC_2253 1 +# define ASN1_STRFLGS_ESC_CTRL 2 +# define ASN1_STRFLGS_ESC_MSB 4 + +/* + * This flag determines how we do escaping: normally RC2253 backslash only, + * set this to use backslash and quote. + */ + +# define ASN1_STRFLGS_ESC_QUOTE 8 + +/* These three flags are internal use only. */ + +/* Character is a valid PrintableString character */ +# define CHARTYPE_PRINTABLESTRING 0x10 +/* Character needs escaping if it is the first character */ +# define CHARTYPE_FIRST_ESC_2253 0x20 +/* Character needs escaping if it is the last character */ +# define CHARTYPE_LAST_ESC_2253 0x40 + +/* + * NB the internal flags are safely reused below by flags handled at the top + * level. + */ + +/* + * If this is set we convert all character strings to UTF8 first + */ + +# define ASN1_STRFLGS_UTF8_CONVERT 0x10 + +/* + * If this is set we don't attempt to interpret content: just assume all + * strings are 1 byte per character. This will produce some pretty odd + * looking output! + */ + +# define ASN1_STRFLGS_IGNORE_TYPE 0x20 + +/* If this is set we include the string type in the output */ +# define ASN1_STRFLGS_SHOW_TYPE 0x40 + +/* + * This determines which strings to display and which to 'dump' (hex dump of + * content octets or DER encoding). We can only dump non character strings or + * everything. If we don't dump 'unknown' they are interpreted as character + * strings with 1 octet per character and are subject to the usual escaping + * options. + */ + +# define ASN1_STRFLGS_DUMP_ALL 0x80 +# define ASN1_STRFLGS_DUMP_UNKNOWN 0x100 + +/* + * These determine what 'dumping' does, we can dump the content octets or the + * DER encoding: both use the RFC2253 #XXXXX notation. + */ + +# define ASN1_STRFLGS_DUMP_DER 0x200 + +/* + * This flag specifies that RC2254 escaping shall be performed. + */ +#define ASN1_STRFLGS_ESC_2254 0x400 + +/* + * All the string flags consistent with RFC2253, escaping control characters + * isn't essential in RFC2253 but it is advisable anyway. + */ + +# define ASN1_STRFLGS_RFC2253 (ASN1_STRFLGS_ESC_2253 | \ + ASN1_STRFLGS_ESC_CTRL | \ + ASN1_STRFLGS_ESC_MSB | \ + ASN1_STRFLGS_UTF8_CONVERT | \ + ASN1_STRFLGS_DUMP_UNKNOWN | \ + ASN1_STRFLGS_DUMP_DER) + +DEFINE_STACK_OF(ASN1_INTEGER) + +DEFINE_STACK_OF(ASN1_GENERALSTRING) + +DEFINE_STACK_OF(ASN1_UTF8STRING) + +typedef struct asn1_type_st { + int type; + union { + char *ptr; + ASN1_BOOLEAN boolean; + ASN1_STRING *asn1_string; + ASN1_OBJECT *object; + ASN1_INTEGER *integer; + ASN1_ENUMERATED *enumerated; + ASN1_BIT_STRING *bit_string; + ASN1_OCTET_STRING *octet_string; + ASN1_PRINTABLESTRING *printablestring; + ASN1_T61STRING *t61string; + ASN1_IA5STRING *ia5string; + ASN1_GENERALSTRING *generalstring; + ASN1_BMPSTRING *bmpstring; + ASN1_UNIVERSALSTRING *universalstring; + ASN1_UTCTIME *utctime; + ASN1_GENERALIZEDTIME *generalizedtime; + ASN1_VISIBLESTRING *visiblestring; + ASN1_UTF8STRING *utf8string; + /* + * set and sequence are left complete and still contain the set or + * sequence bytes + */ + ASN1_STRING *set; + ASN1_STRING *sequence; + ASN1_VALUE *asn1_value; + } value; +} ASN1_TYPE; + +DEFINE_STACK_OF(ASN1_TYPE) + +typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY; + +DECLARE_ASN1_ENCODE_FUNCTIONS_const(ASN1_SEQUENCE_ANY, ASN1_SEQUENCE_ANY) +DECLARE_ASN1_ENCODE_FUNCTIONS_const(ASN1_SEQUENCE_ANY, ASN1_SET_ANY) + +/* This is used to contain a list of bit names */ +typedef struct BIT_STRING_BITNAME_st { + int bitnum; + const char *lname; + const char *sname; +} BIT_STRING_BITNAME; + +# define B_ASN1_TIME \ + B_ASN1_UTCTIME | \ + B_ASN1_GENERALIZEDTIME + +# define B_ASN1_PRINTABLE \ + B_ASN1_NUMERICSTRING| \ + B_ASN1_PRINTABLESTRING| \ + B_ASN1_T61STRING| \ + B_ASN1_IA5STRING| \ + B_ASN1_BIT_STRING| \ + B_ASN1_UNIVERSALSTRING|\ + B_ASN1_BMPSTRING|\ + B_ASN1_UTF8STRING|\ + B_ASN1_SEQUENCE|\ + B_ASN1_UNKNOWN + +# define B_ASN1_DIRECTORYSTRING \ + B_ASN1_PRINTABLESTRING| \ + B_ASN1_TELETEXSTRING|\ + B_ASN1_BMPSTRING|\ + B_ASN1_UNIVERSALSTRING|\ + B_ASN1_UTF8STRING + +# define B_ASN1_DISPLAYTEXT \ + B_ASN1_IA5STRING| \ + B_ASN1_VISIBLESTRING| \ + B_ASN1_BMPSTRING|\ + B_ASN1_UTF8STRING + +DECLARE_ASN1_FUNCTIONS_fname(ASN1_TYPE, ASN1_ANY, ASN1_TYPE) + +int ASN1_TYPE_get(const ASN1_TYPE *a); +void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value); +int ASN1_TYPE_set1(ASN1_TYPE *a, int type, const void *value); +int ASN1_TYPE_cmp(const ASN1_TYPE *a, const ASN1_TYPE *b); + +ASN1_TYPE *ASN1_TYPE_pack_sequence(const ASN1_ITEM *it, void *s, ASN1_TYPE **t); +void *ASN1_TYPE_unpack_sequence(const ASN1_ITEM *it, const ASN1_TYPE *t); + +ASN1_OBJECT *ASN1_OBJECT_new(void); +void ASN1_OBJECT_free(ASN1_OBJECT *a); +int i2d_ASN1_OBJECT(const ASN1_OBJECT *a, unsigned char **pp); +ASN1_OBJECT *d2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp, + long length); + +DECLARE_ASN1_ITEM(ASN1_OBJECT) + +DEFINE_STACK_OF(ASN1_OBJECT) + +ASN1_STRING *ASN1_STRING_new(void); +void ASN1_STRING_free(ASN1_STRING *a); +void ASN1_STRING_clear_free(ASN1_STRING *a); +int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str); +ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *a); +ASN1_STRING *ASN1_STRING_type_new(int type); +int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b); + /* + * Since this is used to store all sorts of things, via macros, for now, + * make its data void * + */ +int ASN1_STRING_set(ASN1_STRING *str, const void *data, int len); +void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len); +int ASN1_STRING_length(const ASN1_STRING *x); +void ASN1_STRING_length_set(ASN1_STRING *x, int n); +int ASN1_STRING_type(const ASN1_STRING *x); +DEPRECATEDIN_1_1_0(unsigned char *ASN1_STRING_data(ASN1_STRING *x)) +const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x); + +DECLARE_ASN1_FUNCTIONS(ASN1_BIT_STRING) +int ASN1_BIT_STRING_set(ASN1_BIT_STRING *a, unsigned char *d, int length); +int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value); +int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *a, int n); +int ASN1_BIT_STRING_check(const ASN1_BIT_STRING *a, + const unsigned char *flags, int flags_len); + +int ASN1_BIT_STRING_name_print(BIO *out, ASN1_BIT_STRING *bs, + BIT_STRING_BITNAME *tbl, int indent); +int ASN1_BIT_STRING_num_asc(const char *name, BIT_STRING_BITNAME *tbl); +int ASN1_BIT_STRING_set_asc(ASN1_BIT_STRING *bs, const char *name, int value, + BIT_STRING_BITNAME *tbl); + +DECLARE_ASN1_FUNCTIONS(ASN1_INTEGER) +ASN1_INTEGER *d2i_ASN1_UINTEGER(ASN1_INTEGER **a, const unsigned char **pp, + long length); +ASN1_INTEGER *ASN1_INTEGER_dup(const ASN1_INTEGER *x); +int ASN1_INTEGER_cmp(const ASN1_INTEGER *x, const ASN1_INTEGER *y); + +DECLARE_ASN1_FUNCTIONS(ASN1_ENUMERATED) + +int ASN1_UTCTIME_check(const ASN1_UTCTIME *a); +ASN1_UTCTIME *ASN1_UTCTIME_set(ASN1_UTCTIME *s, time_t t); +ASN1_UTCTIME *ASN1_UTCTIME_adj(ASN1_UTCTIME *s, time_t t, + int offset_day, long offset_sec); +int ASN1_UTCTIME_set_string(ASN1_UTCTIME *s, const char *str); +int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t); + +int ASN1_GENERALIZEDTIME_check(const ASN1_GENERALIZEDTIME *a); +ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_set(ASN1_GENERALIZEDTIME *s, + time_t t); +ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj(ASN1_GENERALIZEDTIME *s, + time_t t, int offset_day, + long offset_sec); +int ASN1_GENERALIZEDTIME_set_string(ASN1_GENERALIZEDTIME *s, const char *str); + +int ASN1_TIME_diff(int *pday, int *psec, + const ASN1_TIME *from, const ASN1_TIME *to); + +DECLARE_ASN1_FUNCTIONS(ASN1_OCTET_STRING) +ASN1_OCTET_STRING *ASN1_OCTET_STRING_dup(const ASN1_OCTET_STRING *a); +int ASN1_OCTET_STRING_cmp(const ASN1_OCTET_STRING *a, + const ASN1_OCTET_STRING *b); +int ASN1_OCTET_STRING_set(ASN1_OCTET_STRING *str, const unsigned char *data, + int len); + +DECLARE_ASN1_FUNCTIONS(ASN1_VISIBLESTRING) +DECLARE_ASN1_FUNCTIONS(ASN1_UNIVERSALSTRING) +DECLARE_ASN1_FUNCTIONS(ASN1_UTF8STRING) +DECLARE_ASN1_FUNCTIONS(ASN1_NULL) +DECLARE_ASN1_FUNCTIONS(ASN1_BMPSTRING) + +int UTF8_getc(const unsigned char *str, int len, unsigned long *val); +int UTF8_putc(unsigned char *str, int len, unsigned long value); + +DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, ASN1_PRINTABLE) + +DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, DIRECTORYSTRING) +DECLARE_ASN1_FUNCTIONS_name(ASN1_STRING, DISPLAYTEXT) +DECLARE_ASN1_FUNCTIONS(ASN1_PRINTABLESTRING) +DECLARE_ASN1_FUNCTIONS(ASN1_T61STRING) +DECLARE_ASN1_FUNCTIONS(ASN1_IA5STRING) +DECLARE_ASN1_FUNCTIONS(ASN1_GENERALSTRING) +DECLARE_ASN1_FUNCTIONS(ASN1_UTCTIME) +DECLARE_ASN1_FUNCTIONS(ASN1_GENERALIZEDTIME) +DECLARE_ASN1_FUNCTIONS(ASN1_TIME) + +DECLARE_ASN1_ITEM(ASN1_OCTET_STRING_NDEF) + +ASN1_TIME *ASN1_TIME_set(ASN1_TIME *s, time_t t); +ASN1_TIME *ASN1_TIME_adj(ASN1_TIME *s, time_t t, + int offset_day, long offset_sec); +int ASN1_TIME_check(const ASN1_TIME *t); +ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(const ASN1_TIME *t, + ASN1_GENERALIZEDTIME **out); +int ASN1_TIME_set_string(ASN1_TIME *s, const char *str); +int ASN1_TIME_set_string_X509(ASN1_TIME *s, const char *str); +int ASN1_TIME_to_tm(const ASN1_TIME *s, struct tm *tm); +int ASN1_TIME_normalize(ASN1_TIME *s); +int ASN1_TIME_cmp_time_t(const ASN1_TIME *s, time_t t); +int ASN1_TIME_compare(const ASN1_TIME *a, const ASN1_TIME *b); + +int i2a_ASN1_INTEGER(BIO *bp, const ASN1_INTEGER *a); +int a2i_ASN1_INTEGER(BIO *bp, ASN1_INTEGER *bs, char *buf, int size); +int i2a_ASN1_ENUMERATED(BIO *bp, const ASN1_ENUMERATED *a); +int a2i_ASN1_ENUMERATED(BIO *bp, ASN1_ENUMERATED *bs, char *buf, int size); +int i2a_ASN1_OBJECT(BIO *bp, const ASN1_OBJECT *a); +int a2i_ASN1_STRING(BIO *bp, ASN1_STRING *bs, char *buf, int size); +int i2a_ASN1_STRING(BIO *bp, const ASN1_STRING *a, int type); +int i2t_ASN1_OBJECT(char *buf, int buf_len, const ASN1_OBJECT *a); + +int a2d_ASN1_OBJECT(unsigned char *out, int olen, const char *buf, int num); +ASN1_OBJECT *ASN1_OBJECT_create(int nid, unsigned char *data, int len, + const char *sn, const char *ln); + +int ASN1_INTEGER_get_int64(int64_t *pr, const ASN1_INTEGER *a); +int ASN1_INTEGER_set_int64(ASN1_INTEGER *a, int64_t r); +int ASN1_INTEGER_get_uint64(uint64_t *pr, const ASN1_INTEGER *a); +int ASN1_INTEGER_set_uint64(ASN1_INTEGER *a, uint64_t r); + +int ASN1_INTEGER_set(ASN1_INTEGER *a, long v); +long ASN1_INTEGER_get(const ASN1_INTEGER *a); +ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai); +BIGNUM *ASN1_INTEGER_to_BN(const ASN1_INTEGER *ai, BIGNUM *bn); + +int ASN1_ENUMERATED_get_int64(int64_t *pr, const ASN1_ENUMERATED *a); +int ASN1_ENUMERATED_set_int64(ASN1_ENUMERATED *a, int64_t r); + + +int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v); +long ASN1_ENUMERATED_get(const ASN1_ENUMERATED *a); +ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(const BIGNUM *bn, ASN1_ENUMERATED *ai); +BIGNUM *ASN1_ENUMERATED_to_BN(const ASN1_ENUMERATED *ai, BIGNUM *bn); + +/* General */ +/* given a string, return the correct type, max is the maximum length */ +int ASN1_PRINTABLE_type(const unsigned char *s, int max); + +unsigned long ASN1_tag2bit(int tag); + +/* SPECIALS */ +int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag, + int *pclass, long omax); +int ASN1_check_infinite_end(unsigned char **p, long len); +int ASN1_const_check_infinite_end(const unsigned char **p, long len); +void ASN1_put_object(unsigned char **pp, int constructed, int length, + int tag, int xclass); +int ASN1_put_eoc(unsigned char **pp); +int ASN1_object_size(int constructed, int length, int tag); + +/* Used to implement other functions */ +void *ASN1_dup(i2d_of_void *i2d, d2i_of_void *d2i, void *x); + +# define ASN1_dup_of(type,i2d,d2i,x) \ + ((type*)ASN1_dup(CHECKED_I2D_OF(type, i2d), \ + CHECKED_D2I_OF(type, d2i), \ + CHECKED_PTR_OF(type, x))) + +# define ASN1_dup_of_const(type,i2d,d2i,x) \ + ((type*)ASN1_dup(CHECKED_I2D_OF(const type, i2d), \ + CHECKED_D2I_OF(type, d2i), \ + CHECKED_PTR_OF(const type, x))) + +void *ASN1_item_dup(const ASN1_ITEM *it, void *x); + +/* ASN1 alloc/free macros for when a type is only used internally */ + +# define M_ASN1_new_of(type) (type *)ASN1_item_new(ASN1_ITEM_rptr(type)) +# define M_ASN1_free_of(x, type) \ + ASN1_item_free(CHECKED_PTR_OF(type, x), ASN1_ITEM_rptr(type)) + +# ifndef OPENSSL_NO_STDIO +void *ASN1_d2i_fp(void *(*xnew) (void), d2i_of_void *d2i, FILE *in, void **x); + +# define ASN1_d2i_fp_of(type,xnew,d2i,in,x) \ + ((type*)ASN1_d2i_fp(CHECKED_NEW_OF(type, xnew), \ + CHECKED_D2I_OF(type, d2i), \ + in, \ + CHECKED_PPTR_OF(type, x))) + +void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x); +int ASN1_i2d_fp(i2d_of_void *i2d, FILE *out, void *x); + +# define ASN1_i2d_fp_of(type,i2d,out,x) \ + (ASN1_i2d_fp(CHECKED_I2D_OF(type, i2d), \ + out, \ + CHECKED_PTR_OF(type, x))) + +# define ASN1_i2d_fp_of_const(type,i2d,out,x) \ + (ASN1_i2d_fp(CHECKED_I2D_OF(const type, i2d), \ + out, \ + CHECKED_PTR_OF(const type, x))) + +int ASN1_item_i2d_fp(const ASN1_ITEM *it, FILE *out, void *x); +int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str, unsigned long flags); +# endif + +int ASN1_STRING_to_UTF8(unsigned char **out, const ASN1_STRING *in); + +void *ASN1_d2i_bio(void *(*xnew) (void), d2i_of_void *d2i, BIO *in, void **x); + +# define ASN1_d2i_bio_of(type,xnew,d2i,in,x) \ + ((type*)ASN1_d2i_bio( CHECKED_NEW_OF(type, xnew), \ + CHECKED_D2I_OF(type, d2i), \ + in, \ + CHECKED_PPTR_OF(type, x))) + +void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x); +int ASN1_i2d_bio(i2d_of_void *i2d, BIO *out, unsigned char *x); + +# define ASN1_i2d_bio_of(type,i2d,out,x) \ + (ASN1_i2d_bio(CHECKED_I2D_OF(type, i2d), \ + out, \ + CHECKED_PTR_OF(type, x))) + +# define ASN1_i2d_bio_of_const(type,i2d,out,x) \ + (ASN1_i2d_bio(CHECKED_I2D_OF(const type, i2d), \ + out, \ + CHECKED_PTR_OF(const type, x))) + +int ASN1_item_i2d_bio(const ASN1_ITEM *it, BIO *out, void *x); +int ASN1_UTCTIME_print(BIO *fp, const ASN1_UTCTIME *a); +int ASN1_GENERALIZEDTIME_print(BIO *fp, const ASN1_GENERALIZEDTIME *a); +int ASN1_TIME_print(BIO *fp, const ASN1_TIME *a); +int ASN1_STRING_print(BIO *bp, const ASN1_STRING *v); +int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, unsigned long flags); +int ASN1_buf_print(BIO *bp, const unsigned char *buf, size_t buflen, int off); +int ASN1_bn_print(BIO *bp, const char *number, const BIGNUM *num, + unsigned char *buf, int off); +int ASN1_parse(BIO *bp, const unsigned char *pp, long len, int indent); +int ASN1_parse_dump(BIO *bp, const unsigned char *pp, long len, int indent, + int dump); +const char *ASN1_tag2str(int tag); + +/* Used to load and write Netscape format cert */ + +int ASN1_UNIVERSALSTRING_to_string(ASN1_UNIVERSALSTRING *s); + +int ASN1_TYPE_set_octetstring(ASN1_TYPE *a, unsigned char *data, int len); +int ASN1_TYPE_get_octetstring(const ASN1_TYPE *a, unsigned char *data, int max_len); +int ASN1_TYPE_set_int_octetstring(ASN1_TYPE *a, long num, + unsigned char *data, int len); +int ASN1_TYPE_get_int_octetstring(const ASN1_TYPE *a, long *num, + unsigned char *data, int max_len); + +void *ASN1_item_unpack(const ASN1_STRING *oct, const ASN1_ITEM *it); + +ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, + ASN1_OCTET_STRING **oct); + +void ASN1_STRING_set_default_mask(unsigned long mask); +int ASN1_STRING_set_default_mask_asc(const char *p); +unsigned long ASN1_STRING_get_default_mask(void); +int ASN1_mbstring_copy(ASN1_STRING **out, const unsigned char *in, int len, + int inform, unsigned long mask); +int ASN1_mbstring_ncopy(ASN1_STRING **out, const unsigned char *in, int len, + int inform, unsigned long mask, + long minsize, long maxsize); + +ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out, + const unsigned char *in, int inlen, + int inform, int nid); +ASN1_STRING_TABLE *ASN1_STRING_TABLE_get(int nid); +int ASN1_STRING_TABLE_add(int, long, long, unsigned long, unsigned long); +void ASN1_STRING_TABLE_cleanup(void); + +/* ASN1 template functions */ + +/* Old API compatible functions */ +ASN1_VALUE *ASN1_item_new(const ASN1_ITEM *it); +void ASN1_item_free(ASN1_VALUE *val, const ASN1_ITEM *it); +ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **val, const unsigned char **in, + long len, const ASN1_ITEM *it); +int ASN1_item_i2d(ASN1_VALUE *val, unsigned char **out, const ASN1_ITEM *it); +int ASN1_item_ndef_i2d(ASN1_VALUE *val, unsigned char **out, + const ASN1_ITEM *it); + +void ASN1_add_oid_module(void); +void ASN1_add_stable_module(void); + +ASN1_TYPE *ASN1_generate_nconf(const char *str, CONF *nconf); +ASN1_TYPE *ASN1_generate_v3(const char *str, X509V3_CTX *cnf); +int ASN1_str2mask(const char *str, unsigned long *pmask); + +/* ASN1 Print flags */ + +/* Indicate missing OPTIONAL fields */ +# define ASN1_PCTX_FLAGS_SHOW_ABSENT 0x001 +/* Mark start and end of SEQUENCE */ +# define ASN1_PCTX_FLAGS_SHOW_SEQUENCE 0x002 +/* Mark start and end of SEQUENCE/SET OF */ +# define ASN1_PCTX_FLAGS_SHOW_SSOF 0x004 +/* Show the ASN1 type of primitives */ +# define ASN1_PCTX_FLAGS_SHOW_TYPE 0x008 +/* Don't show ASN1 type of ANY */ +# define ASN1_PCTX_FLAGS_NO_ANY_TYPE 0x010 +/* Don't show ASN1 type of MSTRINGs */ +# define ASN1_PCTX_FLAGS_NO_MSTRING_TYPE 0x020 +/* Don't show field names in SEQUENCE */ +# define ASN1_PCTX_FLAGS_NO_FIELD_NAME 0x040 +/* Show structure names of each SEQUENCE field */ +# define ASN1_PCTX_FLAGS_SHOW_FIELD_STRUCT_NAME 0x080 +/* Don't show structure name even at top level */ +# define ASN1_PCTX_FLAGS_NO_STRUCT_NAME 0x100 + +int ASN1_item_print(BIO *out, ASN1_VALUE *ifld, int indent, + const ASN1_ITEM *it, const ASN1_PCTX *pctx); +ASN1_PCTX *ASN1_PCTX_new(void); +void ASN1_PCTX_free(ASN1_PCTX *p); +unsigned long ASN1_PCTX_get_flags(const ASN1_PCTX *p); +void ASN1_PCTX_set_flags(ASN1_PCTX *p, unsigned long flags); +unsigned long ASN1_PCTX_get_nm_flags(const ASN1_PCTX *p); +void ASN1_PCTX_set_nm_flags(ASN1_PCTX *p, unsigned long flags); +unsigned long ASN1_PCTX_get_cert_flags(const ASN1_PCTX *p); +void ASN1_PCTX_set_cert_flags(ASN1_PCTX *p, unsigned long flags); +unsigned long ASN1_PCTX_get_oid_flags(const ASN1_PCTX *p); +void ASN1_PCTX_set_oid_flags(ASN1_PCTX *p, unsigned long flags); +unsigned long ASN1_PCTX_get_str_flags(const ASN1_PCTX *p); +void ASN1_PCTX_set_str_flags(ASN1_PCTX *p, unsigned long flags); + +ASN1_SCTX *ASN1_SCTX_new(int (*scan_cb) (ASN1_SCTX *ctx)); +void ASN1_SCTX_free(ASN1_SCTX *p); +const ASN1_ITEM *ASN1_SCTX_get_item(ASN1_SCTX *p); +const ASN1_TEMPLATE *ASN1_SCTX_get_template(ASN1_SCTX *p); +unsigned long ASN1_SCTX_get_flags(ASN1_SCTX *p); +void ASN1_SCTX_set_app_data(ASN1_SCTX *p, void *data); +void *ASN1_SCTX_get_app_data(ASN1_SCTX *p); + +const BIO_METHOD *BIO_f_asn1(void); + +BIO *BIO_new_NDEF(BIO *out, ASN1_VALUE *val, const ASN1_ITEM *it); + +int i2d_ASN1_bio_stream(BIO *out, ASN1_VALUE *val, BIO *in, int flags, + const ASN1_ITEM *it); +int PEM_write_bio_ASN1_stream(BIO *out, ASN1_VALUE *val, BIO *in, int flags, + const char *hdr, const ASN1_ITEM *it); +int SMIME_write_ASN1(BIO *bio, ASN1_VALUE *val, BIO *data, int flags, + int ctype_nid, int econt_nid, + STACK_OF(X509_ALGOR) *mdalgs, const ASN1_ITEM *it); +ASN1_VALUE *SMIME_read_ASN1(BIO *bio, BIO **bcont, const ASN1_ITEM *it); +int SMIME_crlf_copy(BIO *in, BIO *out, int flags); +int SMIME_text(BIO *in, BIO *out); + +const ASN1_ITEM *ASN1_ITEM_lookup(const char *name); +const ASN1_ITEM *ASN1_ITEM_get(size_t i); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/asn1_mac.h b/third_party/openssl/uos/sw_64/include/openssl/asn1_mac.h new file mode 100644 index 00000000..7ac1782a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/asn1_mac.h @@ -0,0 +1,10 @@ +/* + * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#error "This file is obsolete; please update your software." diff --git a/third_party/openssl/uos/sw_64/include/openssl/asn1err.h b/third_party/openssl/uos/sw_64/include/openssl/asn1err.h new file mode 100644 index 00000000..e1ad1fef --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/asn1err.h @@ -0,0 +1,256 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ASN1ERR_H +# define HEADER_ASN1ERR_H + +# include + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_ASN1_strings(void); + +/* + * ASN1 function codes. + */ +# define ASN1_F_A2D_ASN1_OBJECT 100 +# define ASN1_F_A2I_ASN1_INTEGER 102 +# define ASN1_F_A2I_ASN1_STRING 103 +# define ASN1_F_APPEND_EXP 176 +# define ASN1_F_ASN1_BIO_INIT 113 +# define ASN1_F_ASN1_BIT_STRING_SET_BIT 183 +# define ASN1_F_ASN1_CB 177 +# define ASN1_F_ASN1_CHECK_TLEN 104 +# define ASN1_F_ASN1_COLLECT 106 +# define ASN1_F_ASN1_D2I_EX_PRIMITIVE 108 +# define ASN1_F_ASN1_D2I_FP 109 +# define ASN1_F_ASN1_D2I_READ_BIO 107 +# define ASN1_F_ASN1_DIGEST 184 +# define ASN1_F_ASN1_DO_ADB 110 +# define ASN1_F_ASN1_DO_LOCK 233 +# define ASN1_F_ASN1_DUP 111 +# define ASN1_F_ASN1_ENC_SAVE 115 +# define ASN1_F_ASN1_EX_C2I 204 +# define ASN1_F_ASN1_FIND_END 190 +# define ASN1_F_ASN1_GENERALIZEDTIME_ADJ 216 +# define ASN1_F_ASN1_GENERATE_V3 178 +# define ASN1_F_ASN1_GET_INT64 224 +# define ASN1_F_ASN1_GET_OBJECT 114 +# define ASN1_F_ASN1_GET_UINT64 225 +# define ASN1_F_ASN1_I2D_BIO 116 +# define ASN1_F_ASN1_I2D_FP 117 +# define ASN1_F_ASN1_ITEM_D2I_FP 206 +# define ASN1_F_ASN1_ITEM_DUP 191 +# define ASN1_F_ASN1_ITEM_EMBED_D2I 120 +# define ASN1_F_ASN1_ITEM_EMBED_NEW 121 +# define ASN1_F_ASN1_ITEM_EX_I2D 144 +# define ASN1_F_ASN1_ITEM_FLAGS_I2D 118 +# define ASN1_F_ASN1_ITEM_I2D_BIO 192 +# define ASN1_F_ASN1_ITEM_I2D_FP 193 +# define ASN1_F_ASN1_ITEM_PACK 198 +# define ASN1_F_ASN1_ITEM_SIGN 195 +# define ASN1_F_ASN1_ITEM_SIGN_CTX 220 +# define ASN1_F_ASN1_ITEM_UNPACK 199 +# define ASN1_F_ASN1_ITEM_VERIFY 197 +# define ASN1_F_ASN1_MBSTRING_NCOPY 122 +# define ASN1_F_ASN1_OBJECT_NEW 123 +# define ASN1_F_ASN1_OUTPUT_DATA 214 +# define ASN1_F_ASN1_PCTX_NEW 205 +# define ASN1_F_ASN1_PRIMITIVE_NEW 119 +# define ASN1_F_ASN1_SCTX_NEW 221 +# define ASN1_F_ASN1_SIGN 128 +# define ASN1_F_ASN1_STR2TYPE 179 +# define ASN1_F_ASN1_STRING_GET_INT64 227 +# define ASN1_F_ASN1_STRING_GET_UINT64 230 +# define ASN1_F_ASN1_STRING_SET 186 +# define ASN1_F_ASN1_STRING_TABLE_ADD 129 +# define ASN1_F_ASN1_STRING_TO_BN 228 +# define ASN1_F_ASN1_STRING_TYPE_NEW 130 +# define ASN1_F_ASN1_TEMPLATE_EX_D2I 132 +# define ASN1_F_ASN1_TEMPLATE_NEW 133 +# define ASN1_F_ASN1_TEMPLATE_NOEXP_D2I 131 +# define ASN1_F_ASN1_TIME_ADJ 217 +# define ASN1_F_ASN1_TYPE_GET_INT_OCTETSTRING 134 +# define ASN1_F_ASN1_TYPE_GET_OCTETSTRING 135 +# define ASN1_F_ASN1_UTCTIME_ADJ 218 +# define ASN1_F_ASN1_VERIFY 137 +# define ASN1_F_B64_READ_ASN1 209 +# define ASN1_F_B64_WRITE_ASN1 210 +# define ASN1_F_BIO_NEW_NDEF 208 +# define ASN1_F_BITSTR_CB 180 +# define ASN1_F_BN_TO_ASN1_STRING 229 +# define ASN1_F_C2I_ASN1_BIT_STRING 189 +# define ASN1_F_C2I_ASN1_INTEGER 194 +# define ASN1_F_C2I_ASN1_OBJECT 196 +# define ASN1_F_C2I_IBUF 226 +# define ASN1_F_C2I_UINT64_INT 101 +# define ASN1_F_COLLECT_DATA 140 +# define ASN1_F_D2I_ASN1_OBJECT 147 +# define ASN1_F_D2I_ASN1_UINTEGER 150 +# define ASN1_F_D2I_AUTOPRIVATEKEY 207 +# define ASN1_F_D2I_PRIVATEKEY 154 +# define ASN1_F_D2I_PUBLICKEY 155 +# define ASN1_F_DO_BUF 142 +# define ASN1_F_DO_CREATE 124 +# define ASN1_F_DO_DUMP 125 +# define ASN1_F_DO_TCREATE 222 +# define ASN1_F_I2A_ASN1_OBJECT 126 +# define ASN1_F_I2D_ASN1_BIO_STREAM 211 +# define ASN1_F_I2D_ASN1_OBJECT 143 +# define ASN1_F_I2D_DSA_PUBKEY 161 +# define ASN1_F_I2D_EC_PUBKEY 181 +# define ASN1_F_I2D_PRIVATEKEY 163 +# define ASN1_F_I2D_PUBLICKEY 164 +# define ASN1_F_I2D_RSA_PUBKEY 165 +# define ASN1_F_LONG_C2I 166 +# define ASN1_F_NDEF_PREFIX 127 +# define ASN1_F_NDEF_SUFFIX 136 +# define ASN1_F_OID_MODULE_INIT 174 +# define ASN1_F_PARSE_TAGGING 182 +# define ASN1_F_PKCS5_PBE2_SET_IV 167 +# define ASN1_F_PKCS5_PBE2_SET_SCRYPT 231 +# define ASN1_F_PKCS5_PBE_SET 202 +# define ASN1_F_PKCS5_PBE_SET0_ALGOR 215 +# define ASN1_F_PKCS5_PBKDF2_SET 219 +# define ASN1_F_PKCS5_SCRYPT_SET 232 +# define ASN1_F_SMIME_READ_ASN1 212 +# define ASN1_F_SMIME_TEXT 213 +# define ASN1_F_STABLE_GET 138 +# define ASN1_F_STBL_MODULE_INIT 223 +# define ASN1_F_UINT32_C2I 105 +# define ASN1_F_UINT32_NEW 139 +# define ASN1_F_UINT64_C2I 112 +# define ASN1_F_UINT64_NEW 141 +# define ASN1_F_X509_CRL_ADD0_REVOKED 169 +# define ASN1_F_X509_INFO_NEW 170 +# define ASN1_F_X509_NAME_ENCODE 203 +# define ASN1_F_X509_NAME_EX_D2I 158 +# define ASN1_F_X509_NAME_EX_NEW 171 +# define ASN1_F_X509_PKEY_NEW 173 + +/* + * ASN1 reason codes. + */ +# define ASN1_R_ADDING_OBJECT 171 +# define ASN1_R_ASN1_PARSE_ERROR 203 +# define ASN1_R_ASN1_SIG_PARSE_ERROR 204 +# define ASN1_R_AUX_ERROR 100 +# define ASN1_R_BAD_OBJECT_HEADER 102 +# define ASN1_R_BAD_TEMPLATE 230 +# define ASN1_R_BMPSTRING_IS_WRONG_LENGTH 214 +# define ASN1_R_BN_LIB 105 +# define ASN1_R_BOOLEAN_IS_WRONG_LENGTH 106 +# define ASN1_R_BUFFER_TOO_SMALL 107 +# define ASN1_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER 108 +# define ASN1_R_CONTEXT_NOT_INITIALISED 217 +# define ASN1_R_DATA_IS_WRONG 109 +# define ASN1_R_DECODE_ERROR 110 +# define ASN1_R_DEPTH_EXCEEDED 174 +# define ASN1_R_DIGEST_AND_KEY_TYPE_NOT_SUPPORTED 198 +# define ASN1_R_ENCODE_ERROR 112 +# define ASN1_R_ERROR_GETTING_TIME 173 +# define ASN1_R_ERROR_LOADING_SECTION 172 +# define ASN1_R_ERROR_SETTING_CIPHER_PARAMS 114 +# define ASN1_R_EXPECTING_AN_INTEGER 115 +# define ASN1_R_EXPECTING_AN_OBJECT 116 +# define ASN1_R_EXPLICIT_LENGTH_MISMATCH 119 +# define ASN1_R_EXPLICIT_TAG_NOT_CONSTRUCTED 120 +# define ASN1_R_FIELD_MISSING 121 +# define ASN1_R_FIRST_NUM_TOO_LARGE 122 +# define ASN1_R_HEADER_TOO_LONG 123 +# define ASN1_R_ILLEGAL_BITSTRING_FORMAT 175 +# define ASN1_R_ILLEGAL_BOOLEAN 176 +# define ASN1_R_ILLEGAL_CHARACTERS 124 +# define ASN1_R_ILLEGAL_FORMAT 177 +# define ASN1_R_ILLEGAL_HEX 178 +# define ASN1_R_ILLEGAL_IMPLICIT_TAG 179 +# define ASN1_R_ILLEGAL_INTEGER 180 +# define ASN1_R_ILLEGAL_NEGATIVE_VALUE 226 +# define ASN1_R_ILLEGAL_NESTED_TAGGING 181 +# define ASN1_R_ILLEGAL_NULL 125 +# define ASN1_R_ILLEGAL_NULL_VALUE 182 +# define ASN1_R_ILLEGAL_OBJECT 183 +# define ASN1_R_ILLEGAL_OPTIONAL_ANY 126 +# define ASN1_R_ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE 170 +# define ASN1_R_ILLEGAL_PADDING 221 +# define ASN1_R_ILLEGAL_TAGGED_ANY 127 +# define ASN1_R_ILLEGAL_TIME_VALUE 184 +# define ASN1_R_ILLEGAL_ZERO_CONTENT 222 +# define ASN1_R_INTEGER_NOT_ASCII_FORMAT 185 +# define ASN1_R_INTEGER_TOO_LARGE_FOR_LONG 128 +# define ASN1_R_INVALID_BIT_STRING_BITS_LEFT 220 +# define ASN1_R_INVALID_BMPSTRING_LENGTH 129 +# define ASN1_R_INVALID_DIGIT 130 +# define ASN1_R_INVALID_MIME_TYPE 205 +# define ASN1_R_INVALID_MODIFIER 186 +# define ASN1_R_INVALID_NUMBER 187 +# define ASN1_R_INVALID_OBJECT_ENCODING 216 +# define ASN1_R_INVALID_SCRYPT_PARAMETERS 227 +# define ASN1_R_INVALID_SEPARATOR 131 +# define ASN1_R_INVALID_STRING_TABLE_VALUE 218 +# define ASN1_R_INVALID_UNIVERSALSTRING_LENGTH 133 +# define ASN1_R_INVALID_UTF8STRING 134 +# define ASN1_R_INVALID_VALUE 219 +# define ASN1_R_LIST_ERROR 188 +# define ASN1_R_MIME_NO_CONTENT_TYPE 206 +# define ASN1_R_MIME_PARSE_ERROR 207 +# define ASN1_R_MIME_SIG_PARSE_ERROR 208 +# define ASN1_R_MISSING_EOC 137 +# define ASN1_R_MISSING_SECOND_NUMBER 138 +# define ASN1_R_MISSING_VALUE 189 +# define ASN1_R_MSTRING_NOT_UNIVERSAL 139 +# define ASN1_R_MSTRING_WRONG_TAG 140 +# define ASN1_R_NESTED_ASN1_STRING 197 +# define ASN1_R_NESTED_TOO_DEEP 201 +# define ASN1_R_NON_HEX_CHARACTERS 141 +# define ASN1_R_NOT_ASCII_FORMAT 190 +# define ASN1_R_NOT_ENOUGH_DATA 142 +# define ASN1_R_NO_CONTENT_TYPE 209 +# define ASN1_R_NO_MATCHING_CHOICE_TYPE 143 +# define ASN1_R_NO_MULTIPART_BODY_FAILURE 210 +# define ASN1_R_NO_MULTIPART_BOUNDARY 211 +# define ASN1_R_NO_SIG_CONTENT_TYPE 212 +# define ASN1_R_NULL_IS_WRONG_LENGTH 144 +# define ASN1_R_OBJECT_NOT_ASCII_FORMAT 191 +# define ASN1_R_ODD_NUMBER_OF_CHARS 145 +# define ASN1_R_SECOND_NUMBER_TOO_LARGE 147 +# define ASN1_R_SEQUENCE_LENGTH_MISMATCH 148 +# define ASN1_R_SEQUENCE_NOT_CONSTRUCTED 149 +# define ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG 192 +# define ASN1_R_SHORT_LINE 150 +# define ASN1_R_SIG_INVALID_MIME_TYPE 213 +# define ASN1_R_STREAMING_NOT_SUPPORTED 202 +# define ASN1_R_STRING_TOO_LONG 151 +# define ASN1_R_STRING_TOO_SHORT 152 +# define ASN1_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD 154 +# define ASN1_R_TIME_NOT_ASCII_FORMAT 193 +# define ASN1_R_TOO_LARGE 223 +# define ASN1_R_TOO_LONG 155 +# define ASN1_R_TOO_SMALL 224 +# define ASN1_R_TYPE_NOT_CONSTRUCTED 156 +# define ASN1_R_TYPE_NOT_PRIMITIVE 195 +# define ASN1_R_UNEXPECTED_EOC 159 +# define ASN1_R_UNIVERSALSTRING_IS_WRONG_LENGTH 215 +# define ASN1_R_UNKNOWN_FORMAT 160 +# define ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM 161 +# define ASN1_R_UNKNOWN_OBJECT_TYPE 162 +# define ASN1_R_UNKNOWN_PUBLIC_KEY_TYPE 163 +# define ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM 199 +# define ASN1_R_UNKNOWN_TAG 194 +# define ASN1_R_UNSUPPORTED_ANY_DEFINED_BY_TYPE 164 +# define ASN1_R_UNSUPPORTED_CIPHER 228 +# define ASN1_R_UNSUPPORTED_PUBLIC_KEY_TYPE 167 +# define ASN1_R_UNSUPPORTED_TYPE 196 +# define ASN1_R_WRONG_INTEGER_TYPE 225 +# define ASN1_R_WRONG_PUBLIC_KEY_TYPE 200 +# define ASN1_R_WRONG_TAG 168 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/asn1t.h b/third_party/openssl/uos/sw_64/include/openssl/asn1t.h new file mode 100644 index 00000000..a450ba0d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/asn1t.h @@ -0,0 +1,945 @@ +/* + * Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ASN1T_H +# define HEADER_ASN1T_H + +# include +# include +# include + +# ifdef OPENSSL_BUILD_SHLIBCRYPTO +# undef OPENSSL_EXTERN +# define OPENSSL_EXTERN OPENSSL_EXPORT +# endif + +/* ASN1 template defines, structures and functions */ + +#ifdef __cplusplus +extern "C" { +#endif + +# ifndef OPENSSL_EXPORT_VAR_AS_FUNCTION + +/* Macro to obtain ASN1_ADB pointer from a type (only used internally) */ +# define ASN1_ADB_ptr(iptr) ((const ASN1_ADB *)(iptr)) + +/* Macros for start and end of ASN1_ITEM definition */ + +# define ASN1_ITEM_start(itname) \ + const ASN1_ITEM itname##_it = { + +# define static_ASN1_ITEM_start(itname) \ + static const ASN1_ITEM itname##_it = { + +# define ASN1_ITEM_end(itname) \ + }; + +# else + +/* Macro to obtain ASN1_ADB pointer from a type (only used internally) */ +# define ASN1_ADB_ptr(iptr) ((const ASN1_ADB *)((iptr)())) + +/* Macros for start and end of ASN1_ITEM definition */ + +# define ASN1_ITEM_start(itname) \ + const ASN1_ITEM * itname##_it(void) \ + { \ + static const ASN1_ITEM local_it = { + +# define static_ASN1_ITEM_start(itname) \ + static ASN1_ITEM_start(itname) + +# define ASN1_ITEM_end(itname) \ + }; \ + return &local_it; \ + } + +# endif + +/* Macros to aid ASN1 template writing */ + +# define ASN1_ITEM_TEMPLATE(tname) \ + static const ASN1_TEMPLATE tname##_item_tt + +# define ASN1_ITEM_TEMPLATE_END(tname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_PRIMITIVE,\ + -1,\ + &tname##_item_tt,\ + 0,\ + NULL,\ + 0,\ + #tname \ + ASN1_ITEM_end(tname) +# define static_ASN1_ITEM_TEMPLATE_END(tname) \ + ;\ + static_ASN1_ITEM_start(tname) \ + ASN1_ITYPE_PRIMITIVE,\ + -1,\ + &tname##_item_tt,\ + 0,\ + NULL,\ + 0,\ + #tname \ + ASN1_ITEM_end(tname) + +/* This is a ASN1 type which just embeds a template */ + +/*- + * This pair helps declare a SEQUENCE. We can do: + * + * ASN1_SEQUENCE(stname) = { + * ... SEQUENCE components ... + * } ASN1_SEQUENCE_END(stname) + * + * This will produce an ASN1_ITEM called stname_it + * for a structure called stname. + * + * If you want the same structure but a different + * name then use: + * + * ASN1_SEQUENCE(itname) = { + * ... SEQUENCE components ... + * } ASN1_SEQUENCE_END_name(stname, itname) + * + * This will create an item called itname_it using + * a structure called stname. + */ + +# define ASN1_SEQUENCE(tname) \ + static const ASN1_TEMPLATE tname##_seq_tt[] + +# define ASN1_SEQUENCE_END(stname) ASN1_SEQUENCE_END_name(stname, stname) + +# define static_ASN1_SEQUENCE_END(stname) static_ASN1_SEQUENCE_END_name(stname, stname) + +# define ASN1_SEQUENCE_END_name(stname, tname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(stname),\ + #tname \ + ASN1_ITEM_end(tname) + +# define static_ASN1_SEQUENCE_END_name(stname, tname) \ + ;\ + static_ASN1_ITEM_start(tname) \ + ASN1_ITYPE_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +# define ASN1_NDEF_SEQUENCE(tname) \ + ASN1_SEQUENCE(tname) + +# define ASN1_NDEF_SEQUENCE_cb(tname, cb) \ + ASN1_SEQUENCE_cb(tname, cb) + +# define ASN1_SEQUENCE_cb(tname, cb) \ + static const ASN1_AUX tname##_aux = {NULL, 0, 0, 0, cb, 0}; \ + ASN1_SEQUENCE(tname) + +# define ASN1_BROKEN_SEQUENCE(tname) \ + static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_BROKEN, 0, 0, 0, 0}; \ + ASN1_SEQUENCE(tname) + +# define ASN1_SEQUENCE_ref(tname, cb) \ + static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_REFCOUNT, offsetof(tname, references), offsetof(tname, lock), cb, 0}; \ + ASN1_SEQUENCE(tname) + +# define ASN1_SEQUENCE_enc(tname, enc, cb) \ + static const ASN1_AUX tname##_aux = {NULL, ASN1_AFLG_ENCODING, 0, 0, cb, offsetof(tname, enc)}; \ + ASN1_SEQUENCE(tname) + +# define ASN1_NDEF_SEQUENCE_END(tname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_NDEF_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(tname),\ + #tname \ + ASN1_ITEM_end(tname) +# define static_ASN1_NDEF_SEQUENCE_END(tname) \ + ;\ + static_ASN1_ITEM_start(tname) \ + ASN1_ITYPE_NDEF_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(tname),\ + #tname \ + ASN1_ITEM_end(tname) + +# define ASN1_BROKEN_SEQUENCE_END(stname) ASN1_SEQUENCE_END_ref(stname, stname) +# define static_ASN1_BROKEN_SEQUENCE_END(stname) \ + static_ASN1_SEQUENCE_END_ref(stname, stname) + +# define ASN1_SEQUENCE_END_enc(stname, tname) ASN1_SEQUENCE_END_ref(stname, tname) + +# define ASN1_SEQUENCE_END_cb(stname, tname) ASN1_SEQUENCE_END_ref(stname, tname) +# define static_ASN1_SEQUENCE_END_cb(stname, tname) static_ASN1_SEQUENCE_END_ref(stname, tname) + +# define ASN1_SEQUENCE_END_ref(stname, tname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + &tname##_aux,\ + sizeof(stname),\ + #tname \ + ASN1_ITEM_end(tname) +# define static_ASN1_SEQUENCE_END_ref(stname, tname) \ + ;\ + static_ASN1_ITEM_start(tname) \ + ASN1_ITYPE_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + &tname##_aux,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +# define ASN1_NDEF_SEQUENCE_END_cb(stname, tname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_NDEF_SEQUENCE,\ + V_ASN1_SEQUENCE,\ + tname##_seq_tt,\ + sizeof(tname##_seq_tt) / sizeof(ASN1_TEMPLATE),\ + &tname##_aux,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +/*- + * This pair helps declare a CHOICE type. We can do: + * + * ASN1_CHOICE(chname) = { + * ... CHOICE options ... + * ASN1_CHOICE_END(chname) + * + * This will produce an ASN1_ITEM called chname_it + * for a structure called chname. The structure + * definition must look like this: + * typedef struct { + * int type; + * union { + * ASN1_SOMETHING *opt1; + * ASN1_SOMEOTHER *opt2; + * } value; + * } chname; + * + * the name of the selector must be 'type'. + * to use an alternative selector name use the + * ASN1_CHOICE_END_selector() version. + */ + +# define ASN1_CHOICE(tname) \ + static const ASN1_TEMPLATE tname##_ch_tt[] + +# define ASN1_CHOICE_cb(tname, cb) \ + static const ASN1_AUX tname##_aux = {NULL, 0, 0, 0, cb, 0}; \ + ASN1_CHOICE(tname) + +# define ASN1_CHOICE_END(stname) ASN1_CHOICE_END_name(stname, stname) + +# define static_ASN1_CHOICE_END(stname) static_ASN1_CHOICE_END_name(stname, stname) + +# define ASN1_CHOICE_END_name(stname, tname) ASN1_CHOICE_END_selector(stname, tname, type) + +# define static_ASN1_CHOICE_END_name(stname, tname) static_ASN1_CHOICE_END_selector(stname, tname, type) + +# define ASN1_CHOICE_END_selector(stname, tname, selname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_CHOICE,\ + offsetof(stname,selname) ,\ + tname##_ch_tt,\ + sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +# define static_ASN1_CHOICE_END_selector(stname, tname, selname) \ + ;\ + static_ASN1_ITEM_start(tname) \ + ASN1_ITYPE_CHOICE,\ + offsetof(stname,selname) ,\ + tname##_ch_tt,\ + sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE),\ + NULL,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +# define ASN1_CHOICE_END_cb(stname, tname, selname) \ + ;\ + ASN1_ITEM_start(tname) \ + ASN1_ITYPE_CHOICE,\ + offsetof(stname,selname) ,\ + tname##_ch_tt,\ + sizeof(tname##_ch_tt) / sizeof(ASN1_TEMPLATE),\ + &tname##_aux,\ + sizeof(stname),\ + #stname \ + ASN1_ITEM_end(tname) + +/* This helps with the template wrapper form of ASN1_ITEM */ + +# define ASN1_EX_TEMPLATE_TYPE(flags, tag, name, type) { \ + (flags), (tag), 0,\ + #name, ASN1_ITEM_ref(type) } + +/* These help with SEQUENCE or CHOICE components */ + +/* used to declare other types */ + +# define ASN1_EX_TYPE(flags, tag, stname, field, type) { \ + (flags), (tag), offsetof(stname, field),\ + #field, ASN1_ITEM_ref(type) } + +/* implicit and explicit helper macros */ + +# define ASN1_IMP_EX(stname, field, type, tag, ex) \ + ASN1_EX_TYPE(ASN1_TFLG_IMPLICIT | (ex), tag, stname, field, type) + +# define ASN1_EXP_EX(stname, field, type, tag, ex) \ + ASN1_EX_TYPE(ASN1_TFLG_EXPLICIT | (ex), tag, stname, field, type) + +/* Any defined by macros: the field used is in the table itself */ + +# ifndef OPENSSL_EXPORT_VAR_AS_FUNCTION +# define ASN1_ADB_OBJECT(tblname) { ASN1_TFLG_ADB_OID, -1, 0, #tblname, (const ASN1_ITEM *)&(tblname##_adb) } +# define ASN1_ADB_INTEGER(tblname) { ASN1_TFLG_ADB_INT, -1, 0, #tblname, (const ASN1_ITEM *)&(tblname##_adb) } +# else +# define ASN1_ADB_OBJECT(tblname) { ASN1_TFLG_ADB_OID, -1, 0, #tblname, tblname##_adb } +# define ASN1_ADB_INTEGER(tblname) { ASN1_TFLG_ADB_INT, -1, 0, #tblname, tblname##_adb } +# endif +/* Plain simple type */ +# define ASN1_SIMPLE(stname, field, type) ASN1_EX_TYPE(0,0, stname, field, type) +/* Embedded simple type */ +# define ASN1_EMBED(stname, field, type) ASN1_EX_TYPE(ASN1_TFLG_EMBED,0, stname, field, type) + +/* OPTIONAL simple type */ +# define ASN1_OPT(stname, field, type) ASN1_EX_TYPE(ASN1_TFLG_OPTIONAL, 0, stname, field, type) +# define ASN1_OPT_EMBED(stname, field, type) ASN1_EX_TYPE(ASN1_TFLG_OPTIONAL|ASN1_TFLG_EMBED, 0, stname, field, type) + +/* IMPLICIT tagged simple type */ +# define ASN1_IMP(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, 0) +# define ASN1_IMP_EMBED(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_EMBED) + +/* IMPLICIT tagged OPTIONAL simple type */ +# define ASN1_IMP_OPT(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL) +# define ASN1_IMP_OPT_EMBED(stname, field, type, tag) ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL|ASN1_TFLG_EMBED) + +/* Same as above but EXPLICIT */ + +# define ASN1_EXP(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, 0) +# define ASN1_EXP_EMBED(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_EMBED) +# define ASN1_EXP_OPT(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL) +# define ASN1_EXP_OPT_EMBED(stname, field, type, tag) ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL|ASN1_TFLG_EMBED) + +/* SEQUENCE OF type */ +# define ASN1_SEQUENCE_OF(stname, field, type) \ + ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF, 0, stname, field, type) + +/* OPTIONAL SEQUENCE OF */ +# define ASN1_SEQUENCE_OF_OPT(stname, field, type) \ + ASN1_EX_TYPE(ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL, 0, stname, field, type) + +/* Same as above but for SET OF */ + +# define ASN1_SET_OF(stname, field, type) \ + ASN1_EX_TYPE(ASN1_TFLG_SET_OF, 0, stname, field, type) + +# define ASN1_SET_OF_OPT(stname, field, type) \ + ASN1_EX_TYPE(ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL, 0, stname, field, type) + +/* Finally compound types of SEQUENCE, SET, IMPLICIT, EXPLICIT and OPTIONAL */ + +# define ASN1_IMP_SET_OF(stname, field, type, tag) \ + ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF) + +# define ASN1_EXP_SET_OF(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF) + +# define ASN1_IMP_SET_OF_OPT(stname, field, type, tag) \ + ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL) + +# define ASN1_EXP_SET_OF_OPT(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SET_OF|ASN1_TFLG_OPTIONAL) + +# define ASN1_IMP_SEQUENCE_OF(stname, field, type, tag) \ + ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF) + +# define ASN1_IMP_SEQUENCE_OF_OPT(stname, field, type, tag) \ + ASN1_IMP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL) + +# define ASN1_EXP_SEQUENCE_OF(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF) + +# define ASN1_EXP_SEQUENCE_OF_OPT(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_SEQUENCE_OF|ASN1_TFLG_OPTIONAL) + +/* EXPLICIT using indefinite length constructed form */ +# define ASN1_NDEF_EXP(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_NDEF) + +/* EXPLICIT OPTIONAL using indefinite length constructed form */ +# define ASN1_NDEF_EXP_OPT(stname, field, type, tag) \ + ASN1_EXP_EX(stname, field, type, tag, ASN1_TFLG_OPTIONAL|ASN1_TFLG_NDEF) + +/* Macros for the ASN1_ADB structure */ + +# define ASN1_ADB(name) \ + static const ASN1_ADB_TABLE name##_adbtbl[] + +# ifndef OPENSSL_EXPORT_VAR_AS_FUNCTION + +# define ASN1_ADB_END(name, flags, field, adb_cb, def, none) \ + ;\ + static const ASN1_ADB name##_adb = {\ + flags,\ + offsetof(name, field),\ + adb_cb,\ + name##_adbtbl,\ + sizeof(name##_adbtbl) / sizeof(ASN1_ADB_TABLE),\ + def,\ + none\ + } + +# else + +# define ASN1_ADB_END(name, flags, field, adb_cb, def, none) \ + ;\ + static const ASN1_ITEM *name##_adb(void) \ + { \ + static const ASN1_ADB internal_adb = \ + {\ + flags,\ + offsetof(name, field),\ + adb_cb,\ + name##_adbtbl,\ + sizeof(name##_adbtbl) / sizeof(ASN1_ADB_TABLE),\ + def,\ + none\ + }; \ + return (const ASN1_ITEM *) &internal_adb; \ + } \ + void dummy_function(void) + +# endif + +# define ADB_ENTRY(val, template) {val, template} + +# define ASN1_ADB_TEMPLATE(name) \ + static const ASN1_TEMPLATE name##_tt + +/* + * This is the ASN1 template structure that defines a wrapper round the + * actual type. It determines the actual position of the field in the value + * structure, various flags such as OPTIONAL and the field name. + */ + +struct ASN1_TEMPLATE_st { + unsigned long flags; /* Various flags */ + long tag; /* tag, not used if no tagging */ + unsigned long offset; /* Offset of this field in structure */ + const char *field_name; /* Field name */ + ASN1_ITEM_EXP *item; /* Relevant ASN1_ITEM or ASN1_ADB */ +}; + +/* Macro to extract ASN1_ITEM and ASN1_ADB pointer from ASN1_TEMPLATE */ + +# define ASN1_TEMPLATE_item(t) (t->item_ptr) +# define ASN1_TEMPLATE_adb(t) (t->item_ptr) + +typedef struct ASN1_ADB_TABLE_st ASN1_ADB_TABLE; +typedef struct ASN1_ADB_st ASN1_ADB; + +struct ASN1_ADB_st { + unsigned long flags; /* Various flags */ + unsigned long offset; /* Offset of selector field */ + int (*adb_cb)(long *psel); /* Application callback */ + const ASN1_ADB_TABLE *tbl; /* Table of possible types */ + long tblcount; /* Number of entries in tbl */ + const ASN1_TEMPLATE *default_tt; /* Type to use if no match */ + const ASN1_TEMPLATE *null_tt; /* Type to use if selector is NULL */ +}; + +struct ASN1_ADB_TABLE_st { + long value; /* NID for an object or value for an int */ + const ASN1_TEMPLATE tt; /* item for this value */ +}; + +/* template flags */ + +/* Field is optional */ +# define ASN1_TFLG_OPTIONAL (0x1) + +/* Field is a SET OF */ +# define ASN1_TFLG_SET_OF (0x1 << 1) + +/* Field is a SEQUENCE OF */ +# define ASN1_TFLG_SEQUENCE_OF (0x2 << 1) + +/* + * Special case: this refers to a SET OF that will be sorted into DER order + * when encoded *and* the corresponding STACK will be modified to match the + * new order. + */ +# define ASN1_TFLG_SET_ORDER (0x3 << 1) + +/* Mask for SET OF or SEQUENCE OF */ +# define ASN1_TFLG_SK_MASK (0x3 << 1) + +/* + * These flags mean the tag should be taken from the tag field. If EXPLICIT + * then the underlying type is used for the inner tag. + */ + +/* IMPLICIT tagging */ +# define ASN1_TFLG_IMPTAG (0x1 << 3) + +/* EXPLICIT tagging, inner tag from underlying type */ +# define ASN1_TFLG_EXPTAG (0x2 << 3) + +# define ASN1_TFLG_TAG_MASK (0x3 << 3) + +/* context specific IMPLICIT */ +# define ASN1_TFLG_IMPLICIT (ASN1_TFLG_IMPTAG|ASN1_TFLG_CONTEXT) + +/* context specific EXPLICIT */ +# define ASN1_TFLG_EXPLICIT (ASN1_TFLG_EXPTAG|ASN1_TFLG_CONTEXT) + +/* + * If tagging is in force these determine the type of tag to use. Otherwise + * the tag is determined by the underlying type. These values reflect the + * actual octet format. + */ + +/* Universal tag */ +# define ASN1_TFLG_UNIVERSAL (0x0<<6) +/* Application tag */ +# define ASN1_TFLG_APPLICATION (0x1<<6) +/* Context specific tag */ +# define ASN1_TFLG_CONTEXT (0x2<<6) +/* Private tag */ +# define ASN1_TFLG_PRIVATE (0x3<<6) + +# define ASN1_TFLG_TAG_CLASS (0x3<<6) + +/* + * These are for ANY DEFINED BY type. In this case the 'item' field points to + * an ASN1_ADB structure which contains a table of values to decode the + * relevant type + */ + +# define ASN1_TFLG_ADB_MASK (0x3<<8) + +# define ASN1_TFLG_ADB_OID (0x1<<8) + +# define ASN1_TFLG_ADB_INT (0x1<<9) + +/* + * This flag when present in a SEQUENCE OF, SET OF or EXPLICIT causes + * indefinite length constructed encoding to be used if required. + */ + +# define ASN1_TFLG_NDEF (0x1<<11) + +/* Field is embedded and not a pointer */ +# define ASN1_TFLG_EMBED (0x1 << 12) + +/* This is the actual ASN1 item itself */ + +struct ASN1_ITEM_st { + char itype; /* The item type, primitive, SEQUENCE, CHOICE + * or extern */ + long utype; /* underlying type */ + const ASN1_TEMPLATE *templates; /* If SEQUENCE or CHOICE this contains + * the contents */ + long tcount; /* Number of templates if SEQUENCE or CHOICE */ + const void *funcs; /* functions that handle this type */ + long size; /* Structure size (usually) */ + const char *sname; /* Structure name */ +}; + +/*- + * These are values for the itype field and + * determine how the type is interpreted. + * + * For PRIMITIVE types the underlying type + * determines the behaviour if items is NULL. + * + * Otherwise templates must contain a single + * template and the type is treated in the + * same way as the type specified in the template. + * + * For SEQUENCE types the templates field points + * to the members, the size field is the + * structure size. + * + * For CHOICE types the templates field points + * to each possible member (typically a union) + * and the 'size' field is the offset of the + * selector. + * + * The 'funcs' field is used for application + * specific functions. + * + * The EXTERN type uses a new style d2i/i2d. + * The new style should be used where possible + * because it avoids things like the d2i IMPLICIT + * hack. + * + * MSTRING is a multiple string type, it is used + * for a CHOICE of character strings where the + * actual strings all occupy an ASN1_STRING + * structure. In this case the 'utype' field + * has a special meaning, it is used as a mask + * of acceptable types using the B_ASN1 constants. + * + * NDEF_SEQUENCE is the same as SEQUENCE except + * that it will use indefinite length constructed + * encoding if requested. + * + */ + +# define ASN1_ITYPE_PRIMITIVE 0x0 + +# define ASN1_ITYPE_SEQUENCE 0x1 + +# define ASN1_ITYPE_CHOICE 0x2 + +# define ASN1_ITYPE_EXTERN 0x4 + +# define ASN1_ITYPE_MSTRING 0x5 + +# define ASN1_ITYPE_NDEF_SEQUENCE 0x6 + +/* + * Cache for ASN1 tag and length, so we don't keep re-reading it for things + * like CHOICE + */ + +struct ASN1_TLC_st { + char valid; /* Values below are valid */ + int ret; /* return value */ + long plen; /* length */ + int ptag; /* class value */ + int pclass; /* class value */ + int hdrlen; /* header length */ +}; + +/* Typedefs for ASN1 function pointers */ +typedef int ASN1_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, + const ASN1_ITEM *it, int tag, int aclass, char opt, + ASN1_TLC *ctx); + +typedef int ASN1_ex_i2d(ASN1_VALUE **pval, unsigned char **out, + const ASN1_ITEM *it, int tag, int aclass); +typedef int ASN1_ex_new_func(ASN1_VALUE **pval, const ASN1_ITEM *it); +typedef void ASN1_ex_free_func(ASN1_VALUE **pval, const ASN1_ITEM *it); + +typedef int ASN1_ex_print_func(BIO *out, ASN1_VALUE **pval, + int indent, const char *fname, + const ASN1_PCTX *pctx); + +typedef int ASN1_primitive_i2c(ASN1_VALUE **pval, unsigned char *cont, + int *putype, const ASN1_ITEM *it); +typedef int ASN1_primitive_c2i(ASN1_VALUE **pval, const unsigned char *cont, + int len, int utype, char *free_cont, + const ASN1_ITEM *it); +typedef int ASN1_primitive_print(BIO *out, ASN1_VALUE **pval, + const ASN1_ITEM *it, int indent, + const ASN1_PCTX *pctx); + +typedef struct ASN1_EXTERN_FUNCS_st { + void *app_data; + ASN1_ex_new_func *asn1_ex_new; + ASN1_ex_free_func *asn1_ex_free; + ASN1_ex_free_func *asn1_ex_clear; + ASN1_ex_d2i *asn1_ex_d2i; + ASN1_ex_i2d *asn1_ex_i2d; + ASN1_ex_print_func *asn1_ex_print; +} ASN1_EXTERN_FUNCS; + +typedef struct ASN1_PRIMITIVE_FUNCS_st { + void *app_data; + unsigned long flags; + ASN1_ex_new_func *prim_new; + ASN1_ex_free_func *prim_free; + ASN1_ex_free_func *prim_clear; + ASN1_primitive_c2i *prim_c2i; + ASN1_primitive_i2c *prim_i2c; + ASN1_primitive_print *prim_print; +} ASN1_PRIMITIVE_FUNCS; + +/* + * This is the ASN1_AUX structure: it handles various miscellaneous + * requirements. For example the use of reference counts and an informational + * callback. The "informational callback" is called at various points during + * the ASN1 encoding and decoding. It can be used to provide minor + * customisation of the structures used. This is most useful where the + * supplied routines *almost* do the right thing but need some extra help at + * a few points. If the callback returns zero then it is assumed a fatal + * error has occurred and the main operation should be abandoned. If major + * changes in the default behaviour are required then an external type is + * more appropriate. + */ + +typedef int ASN1_aux_cb(int operation, ASN1_VALUE **in, const ASN1_ITEM *it, + void *exarg); + +typedef struct ASN1_AUX_st { + void *app_data; + int flags; + int ref_offset; /* Offset of reference value */ + int ref_lock; /* Lock type to use */ + ASN1_aux_cb *asn1_cb; + int enc_offset; /* Offset of ASN1_ENCODING structure */ +} ASN1_AUX; + +/* For print related callbacks exarg points to this structure */ +typedef struct ASN1_PRINT_ARG_st { + BIO *out; + int indent; + const ASN1_PCTX *pctx; +} ASN1_PRINT_ARG; + +/* For streaming related callbacks exarg points to this structure */ +typedef struct ASN1_STREAM_ARG_st { + /* BIO to stream through */ + BIO *out; + /* BIO with filters appended */ + BIO *ndef_bio; + /* Streaming I/O boundary */ + unsigned char **boundary; +} ASN1_STREAM_ARG; + +/* Flags in ASN1_AUX */ + +/* Use a reference count */ +# define ASN1_AFLG_REFCOUNT 1 +/* Save the encoding of structure (useful for signatures) */ +# define ASN1_AFLG_ENCODING 2 +/* The Sequence length is invalid */ +# define ASN1_AFLG_BROKEN 4 + +/* operation values for asn1_cb */ + +# define ASN1_OP_NEW_PRE 0 +# define ASN1_OP_NEW_POST 1 +# define ASN1_OP_FREE_PRE 2 +# define ASN1_OP_FREE_POST 3 +# define ASN1_OP_D2I_PRE 4 +# define ASN1_OP_D2I_POST 5 +# define ASN1_OP_I2D_PRE 6 +# define ASN1_OP_I2D_POST 7 +# define ASN1_OP_PRINT_PRE 8 +# define ASN1_OP_PRINT_POST 9 +# define ASN1_OP_STREAM_PRE 10 +# define ASN1_OP_STREAM_POST 11 +# define ASN1_OP_DETACHED_PRE 12 +# define ASN1_OP_DETACHED_POST 13 + +/* Macro to implement a primitive type */ +# define IMPLEMENT_ASN1_TYPE(stname) IMPLEMENT_ASN1_TYPE_ex(stname, stname, 0) +# define IMPLEMENT_ASN1_TYPE_ex(itname, vname, ex) \ + ASN1_ITEM_start(itname) \ + ASN1_ITYPE_PRIMITIVE, V_##vname, NULL, 0, NULL, ex, #itname \ + ASN1_ITEM_end(itname) + +/* Macro to implement a multi string type */ +# define IMPLEMENT_ASN1_MSTRING(itname, mask) \ + ASN1_ITEM_start(itname) \ + ASN1_ITYPE_MSTRING, mask, NULL, 0, NULL, sizeof(ASN1_STRING), #itname \ + ASN1_ITEM_end(itname) + +# define IMPLEMENT_EXTERN_ASN1(sname, tag, fptrs) \ + ASN1_ITEM_start(sname) \ + ASN1_ITYPE_EXTERN, \ + tag, \ + NULL, \ + 0, \ + &fptrs, \ + 0, \ + #sname \ + ASN1_ITEM_end(sname) + +/* Macro to implement standard functions in terms of ASN1_ITEM structures */ + +# define IMPLEMENT_ASN1_FUNCTIONS(stname) IMPLEMENT_ASN1_FUNCTIONS_fname(stname, stname, stname) + +# define IMPLEMENT_ASN1_FUNCTIONS_name(stname, itname) IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, itname) + +# define IMPLEMENT_ASN1_FUNCTIONS_ENCODE_name(stname, itname) \ + IMPLEMENT_ASN1_FUNCTIONS_ENCODE_fname(stname, itname, itname) + +# define IMPLEMENT_STATIC_ASN1_ALLOC_FUNCTIONS(stname) \ + IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(static, stname, stname, stname) + +# define IMPLEMENT_ASN1_ALLOC_FUNCTIONS(stname) \ + IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, stname, stname) + +# define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_pfname(pre, stname, itname, fname) \ + pre stname *fname##_new(void) \ + { \ + return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \ + } \ + pre void fname##_free(stname *a) \ + { \ + ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \ + } + +# define IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) \ + stname *fname##_new(void) \ + { \ + return (stname *)ASN1_item_new(ASN1_ITEM_rptr(itname)); \ + } \ + void fname##_free(stname *a) \ + { \ + ASN1_item_free((ASN1_VALUE *)a, ASN1_ITEM_rptr(itname)); \ + } + +# define IMPLEMENT_ASN1_FUNCTIONS_fname(stname, itname, fname) \ + IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \ + IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) + +# define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_fname(stname, itname, fname) \ + stname *d2i_##fname(stname **a, const unsigned char **in, long len) \ + { \ + return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, ASN1_ITEM_rptr(itname));\ + } \ + int i2d_##fname(stname *a, unsigned char **out) \ + { \ + return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname));\ + } + +# define IMPLEMENT_ASN1_NDEF_FUNCTION(stname) \ + int i2d_##stname##_NDEF(stname *a, unsigned char **out) \ + { \ + return ASN1_item_ndef_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(stname));\ + } + +# define IMPLEMENT_STATIC_ASN1_ENCODE_FUNCTIONS(stname) \ + static stname *d2i_##stname(stname **a, \ + const unsigned char **in, long len) \ + { \ + return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, \ + ASN1_ITEM_rptr(stname)); \ + } \ + static int i2d_##stname(stname *a, unsigned char **out) \ + { \ + return ASN1_item_i2d((ASN1_VALUE *)a, out, \ + ASN1_ITEM_rptr(stname)); \ + } + +/* + * This includes evil casts to remove const: they will go away when full ASN1 + * constification is done. + */ +# define IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \ + stname *d2i_##fname(stname **a, const unsigned char **in, long len) \ + { \ + return (stname *)ASN1_item_d2i((ASN1_VALUE **)a, in, len, ASN1_ITEM_rptr(itname));\ + } \ + int i2d_##fname(const stname *a, unsigned char **out) \ + { \ + return ASN1_item_i2d((ASN1_VALUE *)a, out, ASN1_ITEM_rptr(itname));\ + } + +# define IMPLEMENT_ASN1_DUP_FUNCTION(stname) \ + stname * stname##_dup(stname *x) \ + { \ + return ASN1_item_dup(ASN1_ITEM_rptr(stname), x); \ + } + +# define IMPLEMENT_ASN1_PRINT_FUNCTION(stname) \ + IMPLEMENT_ASN1_PRINT_FUNCTION_fname(stname, stname, stname) + +# define IMPLEMENT_ASN1_PRINT_FUNCTION_fname(stname, itname, fname) \ + int fname##_print_ctx(BIO *out, stname *x, int indent, \ + const ASN1_PCTX *pctx) \ + { \ + return ASN1_item_print(out, (ASN1_VALUE *)x, indent, \ + ASN1_ITEM_rptr(itname), pctx); \ + } + +# define IMPLEMENT_ASN1_FUNCTIONS_const(name) \ + IMPLEMENT_ASN1_FUNCTIONS_const_fname(name, name, name) + +# define IMPLEMENT_ASN1_FUNCTIONS_const_fname(stname, itname, fname) \ + IMPLEMENT_ASN1_ENCODE_FUNCTIONS_const_fname(stname, itname, fname) \ + IMPLEMENT_ASN1_ALLOC_FUNCTIONS_fname(stname, itname, fname) + +/* external definitions for primitive types */ + +DECLARE_ASN1_ITEM(ASN1_BOOLEAN) +DECLARE_ASN1_ITEM(ASN1_TBOOLEAN) +DECLARE_ASN1_ITEM(ASN1_FBOOLEAN) +DECLARE_ASN1_ITEM(ASN1_SEQUENCE) +DECLARE_ASN1_ITEM(CBIGNUM) +DECLARE_ASN1_ITEM(BIGNUM) +DECLARE_ASN1_ITEM(INT32) +DECLARE_ASN1_ITEM(ZINT32) +DECLARE_ASN1_ITEM(UINT32) +DECLARE_ASN1_ITEM(ZUINT32) +DECLARE_ASN1_ITEM(INT64) +DECLARE_ASN1_ITEM(ZINT64) +DECLARE_ASN1_ITEM(UINT64) +DECLARE_ASN1_ITEM(ZUINT64) + +# if OPENSSL_API_COMPAT < 0x10200000L +/* + * LONG and ZLONG are strongly discouraged for use as stored data, as the + * underlying C type (long) differs in size depending on the architecture. + * They are designed with 32-bit longs in mind. + */ +DECLARE_ASN1_ITEM(LONG) +DECLARE_ASN1_ITEM(ZLONG) +# endif + +DEFINE_STACK_OF(ASN1_VALUE) + +/* Functions used internally by the ASN1 code */ + +int ASN1_item_ex_new(ASN1_VALUE **pval, const ASN1_ITEM *it); +void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it); + +int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, + const ASN1_ITEM *it, int tag, int aclass, char opt, + ASN1_TLC *ctx); + +int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, + const ASN1_ITEM *it, int tag, int aclass); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/async.h b/third_party/openssl/uos/sw_64/include/openssl/async.h new file mode 100644 index 00000000..7052b890 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/async.h @@ -0,0 +1,76 @@ +/* + * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include + +#ifndef HEADER_ASYNC_H +# define HEADER_ASYNC_H + +#if defined(_WIN32) +# if defined(BASETYPES) || defined(_WINDEF_H) +/* application has to include to use this */ +#define OSSL_ASYNC_FD HANDLE +#define OSSL_BAD_ASYNC_FD INVALID_HANDLE_VALUE +# endif +#else +#define OSSL_ASYNC_FD int +#define OSSL_BAD_ASYNC_FD -1 +#endif +# include + + +# ifdef __cplusplus +extern "C" { +# endif + +typedef struct async_job_st ASYNC_JOB; +typedef struct async_wait_ctx_st ASYNC_WAIT_CTX; + +#define ASYNC_ERR 0 +#define ASYNC_NO_JOBS 1 +#define ASYNC_PAUSE 2 +#define ASYNC_FINISH 3 + +int ASYNC_init_thread(size_t max_size, size_t init_size); +void ASYNC_cleanup_thread(void); + +#ifdef OSSL_ASYNC_FD +ASYNC_WAIT_CTX *ASYNC_WAIT_CTX_new(void); +void ASYNC_WAIT_CTX_free(ASYNC_WAIT_CTX *ctx); +int ASYNC_WAIT_CTX_set_wait_fd(ASYNC_WAIT_CTX *ctx, const void *key, + OSSL_ASYNC_FD fd, + void *custom_data, + void (*cleanup)(ASYNC_WAIT_CTX *, const void *, + OSSL_ASYNC_FD, void *)); +int ASYNC_WAIT_CTX_get_fd(ASYNC_WAIT_CTX *ctx, const void *key, + OSSL_ASYNC_FD *fd, void **custom_data); +int ASYNC_WAIT_CTX_get_all_fds(ASYNC_WAIT_CTX *ctx, OSSL_ASYNC_FD *fd, + size_t *numfds); +int ASYNC_WAIT_CTX_get_changed_fds(ASYNC_WAIT_CTX *ctx, OSSL_ASYNC_FD *addfd, + size_t *numaddfds, OSSL_ASYNC_FD *delfd, + size_t *numdelfds); +int ASYNC_WAIT_CTX_clear_fd(ASYNC_WAIT_CTX *ctx, const void *key); +#endif + +int ASYNC_is_capable(void); + +int ASYNC_start_job(ASYNC_JOB **job, ASYNC_WAIT_CTX *ctx, int *ret, + int (*func)(void *), void *args, size_t size); +int ASYNC_pause_job(void); + +ASYNC_JOB *ASYNC_get_current_job(void); +ASYNC_WAIT_CTX *ASYNC_get_wait_ctx(ASYNC_JOB *job); +void ASYNC_block_pause(void); +void ASYNC_unblock_pause(void); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/asyncerr.h b/third_party/openssl/uos/sw_64/include/openssl/asyncerr.h new file mode 100644 index 00000000..91afbbb2 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/asyncerr.h @@ -0,0 +1,42 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ASYNCERR_H +# define HEADER_ASYNCERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_ASYNC_strings(void); + +/* + * ASYNC function codes. + */ +# define ASYNC_F_ASYNC_CTX_NEW 100 +# define ASYNC_F_ASYNC_INIT_THREAD 101 +# define ASYNC_F_ASYNC_JOB_NEW 102 +# define ASYNC_F_ASYNC_PAUSE_JOB 103 +# define ASYNC_F_ASYNC_START_FUNC 104 +# define ASYNC_F_ASYNC_START_JOB 105 +# define ASYNC_F_ASYNC_WAIT_CTX_SET_WAIT_FD 106 + +/* + * ASYNC reason codes. + */ +# define ASYNC_R_FAILED_TO_SET_POOL 101 +# define ASYNC_R_FAILED_TO_SWAP_CONTEXT 102 +# define ASYNC_R_INIT_FAILED 105 +# define ASYNC_R_INVALID_POOL_SIZE 103 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/bio.h b/third_party/openssl/uos/sw_64/include/openssl/bio.h new file mode 100644 index 00000000..ae559a51 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/bio.h @@ -0,0 +1,801 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BIO_H +# define HEADER_BIO_H + +# include + +# ifndef OPENSSL_NO_STDIO +# include +# endif +# include + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* There are the classes of BIOs */ +# define BIO_TYPE_DESCRIPTOR 0x0100 /* socket, fd, connect or accept */ +# define BIO_TYPE_FILTER 0x0200 +# define BIO_TYPE_SOURCE_SINK 0x0400 + +/* These are the 'types' of BIOs */ +# define BIO_TYPE_NONE 0 +# define BIO_TYPE_MEM ( 1|BIO_TYPE_SOURCE_SINK) +# define BIO_TYPE_FILE ( 2|BIO_TYPE_SOURCE_SINK) + +# define BIO_TYPE_FD ( 4|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) +# define BIO_TYPE_SOCKET ( 5|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) +# define BIO_TYPE_NULL ( 6|BIO_TYPE_SOURCE_SINK) +# define BIO_TYPE_SSL ( 7|BIO_TYPE_FILTER) +# define BIO_TYPE_MD ( 8|BIO_TYPE_FILTER) +# define BIO_TYPE_BUFFER ( 9|BIO_TYPE_FILTER) +# define BIO_TYPE_CIPHER (10|BIO_TYPE_FILTER) +# define BIO_TYPE_BASE64 (11|BIO_TYPE_FILTER) +# define BIO_TYPE_CONNECT (12|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) +# define BIO_TYPE_ACCEPT (13|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) + +# define BIO_TYPE_NBIO_TEST (16|BIO_TYPE_FILTER)/* server proxy BIO */ +# define BIO_TYPE_NULL_FILTER (17|BIO_TYPE_FILTER) +# define BIO_TYPE_BIO (19|BIO_TYPE_SOURCE_SINK)/* half a BIO pair */ +# define BIO_TYPE_LINEBUFFER (20|BIO_TYPE_FILTER) +# define BIO_TYPE_DGRAM (21|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) +# define BIO_TYPE_ASN1 (22|BIO_TYPE_FILTER) +# define BIO_TYPE_COMP (23|BIO_TYPE_FILTER) +# ifndef OPENSSL_NO_SCTP +# define BIO_TYPE_DGRAM_SCTP (24|BIO_TYPE_SOURCE_SINK|BIO_TYPE_DESCRIPTOR) +# endif + +#define BIO_TYPE_START 128 + +/* + * BIO_FILENAME_READ|BIO_CLOSE to open or close on free. + * BIO_set_fp(in,stdin,BIO_NOCLOSE); + */ +# define BIO_NOCLOSE 0x00 +# define BIO_CLOSE 0x01 + +/* + * These are used in the following macros and are passed to BIO_ctrl() + */ +# define BIO_CTRL_RESET 1/* opt - rewind/zero etc */ +# define BIO_CTRL_EOF 2/* opt - are we at the eof */ +# define BIO_CTRL_INFO 3/* opt - extra tit-bits */ +# define BIO_CTRL_SET 4/* man - set the 'IO' type */ +# define BIO_CTRL_GET 5/* man - get the 'IO' type */ +# define BIO_CTRL_PUSH 6/* opt - internal, used to signify change */ +# define BIO_CTRL_POP 7/* opt - internal, used to signify change */ +# define BIO_CTRL_GET_CLOSE 8/* man - set the 'close' on free */ +# define BIO_CTRL_SET_CLOSE 9/* man - set the 'close' on free */ +# define BIO_CTRL_PENDING 10/* opt - is their more data buffered */ +# define BIO_CTRL_FLUSH 11/* opt - 'flush' buffered output */ +# define BIO_CTRL_DUP 12/* man - extra stuff for 'duped' BIO */ +# define BIO_CTRL_WPENDING 13/* opt - number of bytes still to write */ +# define BIO_CTRL_SET_CALLBACK 14/* opt - set callback function */ +# define BIO_CTRL_GET_CALLBACK 15/* opt - set callback function */ + +# define BIO_CTRL_PEEK 29/* BIO_f_buffer special */ +# define BIO_CTRL_SET_FILENAME 30/* BIO_s_file special */ + +/* dgram BIO stuff */ +# define BIO_CTRL_DGRAM_CONNECT 31/* BIO dgram special */ +# define BIO_CTRL_DGRAM_SET_CONNECTED 32/* allow for an externally connected + * socket to be passed in */ +# define BIO_CTRL_DGRAM_SET_RECV_TIMEOUT 33/* setsockopt, essentially */ +# define BIO_CTRL_DGRAM_GET_RECV_TIMEOUT 34/* getsockopt, essentially */ +# define BIO_CTRL_DGRAM_SET_SEND_TIMEOUT 35/* setsockopt, essentially */ +# define BIO_CTRL_DGRAM_GET_SEND_TIMEOUT 36/* getsockopt, essentially */ + +# define BIO_CTRL_DGRAM_GET_RECV_TIMER_EXP 37/* flag whether the last */ +# define BIO_CTRL_DGRAM_GET_SEND_TIMER_EXP 38/* I/O operation tiemd out */ + +/* #ifdef IP_MTU_DISCOVER */ +# define BIO_CTRL_DGRAM_MTU_DISCOVER 39/* set DF bit on egress packets */ +/* #endif */ + +# define BIO_CTRL_DGRAM_QUERY_MTU 40/* as kernel for current MTU */ +# define BIO_CTRL_DGRAM_GET_FALLBACK_MTU 47 +# define BIO_CTRL_DGRAM_GET_MTU 41/* get cached value for MTU */ +# define BIO_CTRL_DGRAM_SET_MTU 42/* set cached value for MTU. + * want to use this if asking + * the kernel fails */ + +# define BIO_CTRL_DGRAM_MTU_EXCEEDED 43/* check whether the MTU was + * exceed in the previous write + * operation */ + +# define BIO_CTRL_DGRAM_GET_PEER 46 +# define BIO_CTRL_DGRAM_SET_PEER 44/* Destination for the data */ + +# define BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT 45/* Next DTLS handshake timeout + * to adjust socket timeouts */ +# define BIO_CTRL_DGRAM_SET_DONT_FRAG 48 + +# define BIO_CTRL_DGRAM_GET_MTU_OVERHEAD 49 + +/* Deliberately outside of OPENSSL_NO_SCTP - used in bss_dgram.c */ +# define BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE 50 +# ifndef OPENSSL_NO_SCTP +/* SCTP stuff */ +# define BIO_CTRL_DGRAM_SCTP_ADD_AUTH_KEY 51 +# define BIO_CTRL_DGRAM_SCTP_NEXT_AUTH_KEY 52 +# define BIO_CTRL_DGRAM_SCTP_AUTH_CCS_RCVD 53 +# define BIO_CTRL_DGRAM_SCTP_GET_SNDINFO 60 +# define BIO_CTRL_DGRAM_SCTP_SET_SNDINFO 61 +# define BIO_CTRL_DGRAM_SCTP_GET_RCVINFO 62 +# define BIO_CTRL_DGRAM_SCTP_SET_RCVINFO 63 +# define BIO_CTRL_DGRAM_SCTP_GET_PRINFO 64 +# define BIO_CTRL_DGRAM_SCTP_SET_PRINFO 65 +# define BIO_CTRL_DGRAM_SCTP_SAVE_SHUTDOWN 70 +# endif + +# define BIO_CTRL_DGRAM_SET_PEEK_MODE 71 + +/* modifiers */ +# define BIO_FP_READ 0x02 +# define BIO_FP_WRITE 0x04 +# define BIO_FP_APPEND 0x08 +# define BIO_FP_TEXT 0x10 + +# define BIO_FLAGS_READ 0x01 +# define BIO_FLAGS_WRITE 0x02 +# define BIO_FLAGS_IO_SPECIAL 0x04 +# define BIO_FLAGS_RWS (BIO_FLAGS_READ|BIO_FLAGS_WRITE|BIO_FLAGS_IO_SPECIAL) +# define BIO_FLAGS_SHOULD_RETRY 0x08 +# ifndef BIO_FLAGS_UPLINK +/* + * "UPLINK" flag denotes file descriptors provided by application. It + * defaults to 0, as most platforms don't require UPLINK interface. + */ +# define BIO_FLAGS_UPLINK 0 +# endif + +# define BIO_FLAGS_BASE64_NO_NL 0x100 + +/* + * This is used with memory BIOs: + * BIO_FLAGS_MEM_RDONLY means we shouldn't free up or change the data in any way; + * BIO_FLAGS_NONCLEAR_RST means we shouldn't clear data on reset. + */ +# define BIO_FLAGS_MEM_RDONLY 0x200 +# define BIO_FLAGS_NONCLEAR_RST 0x400 +# define BIO_FLAGS_IN_EOF 0x800 + +typedef union bio_addr_st BIO_ADDR; +typedef struct bio_addrinfo_st BIO_ADDRINFO; + +int BIO_get_new_index(void); +void BIO_set_flags(BIO *b, int flags); +int BIO_test_flags(const BIO *b, int flags); +void BIO_clear_flags(BIO *b, int flags); + +# define BIO_get_flags(b) BIO_test_flags(b, ~(0x0)) +# define BIO_set_retry_special(b) \ + BIO_set_flags(b, (BIO_FLAGS_IO_SPECIAL|BIO_FLAGS_SHOULD_RETRY)) +# define BIO_set_retry_read(b) \ + BIO_set_flags(b, (BIO_FLAGS_READ|BIO_FLAGS_SHOULD_RETRY)) +# define BIO_set_retry_write(b) \ + BIO_set_flags(b, (BIO_FLAGS_WRITE|BIO_FLAGS_SHOULD_RETRY)) + +/* These are normally used internally in BIOs */ +# define BIO_clear_retry_flags(b) \ + BIO_clear_flags(b, (BIO_FLAGS_RWS|BIO_FLAGS_SHOULD_RETRY)) +# define BIO_get_retry_flags(b) \ + BIO_test_flags(b, (BIO_FLAGS_RWS|BIO_FLAGS_SHOULD_RETRY)) + +/* These should be used by the application to tell why we should retry */ +# define BIO_should_read(a) BIO_test_flags(a, BIO_FLAGS_READ) +# define BIO_should_write(a) BIO_test_flags(a, BIO_FLAGS_WRITE) +# define BIO_should_io_special(a) BIO_test_flags(a, BIO_FLAGS_IO_SPECIAL) +# define BIO_retry_type(a) BIO_test_flags(a, BIO_FLAGS_RWS) +# define BIO_should_retry(a) BIO_test_flags(a, BIO_FLAGS_SHOULD_RETRY) + +/* + * The next three are used in conjunction with the BIO_should_io_special() + * condition. After this returns true, BIO *BIO_get_retry_BIO(BIO *bio, int + * *reason); will walk the BIO stack and return the 'reason' for the special + * and the offending BIO. Given a BIO, BIO_get_retry_reason(bio) will return + * the code. + */ +/* + * Returned from the SSL bio when the certificate retrieval code had an error + */ +# define BIO_RR_SSL_X509_LOOKUP 0x01 +/* Returned from the connect BIO when a connect would have blocked */ +# define BIO_RR_CONNECT 0x02 +/* Returned from the accept BIO when an accept would have blocked */ +# define BIO_RR_ACCEPT 0x03 + +/* These are passed by the BIO callback */ +# define BIO_CB_FREE 0x01 +# define BIO_CB_READ 0x02 +# define BIO_CB_WRITE 0x03 +# define BIO_CB_PUTS 0x04 +# define BIO_CB_GETS 0x05 +# define BIO_CB_CTRL 0x06 + +/* + * The callback is called before and after the underling operation, The + * BIO_CB_RETURN flag indicates if it is after the call + */ +# define BIO_CB_RETURN 0x80 +# define BIO_CB_return(a) ((a)|BIO_CB_RETURN) +# define BIO_cb_pre(a) (!((a)&BIO_CB_RETURN)) +# define BIO_cb_post(a) ((a)&BIO_CB_RETURN) + +typedef long (*BIO_callback_fn)(BIO *b, int oper, const char *argp, int argi, + long argl, long ret); +typedef long (*BIO_callback_fn_ex)(BIO *b, int oper, const char *argp, + size_t len, int argi, + long argl, int ret, size_t *processed); +BIO_callback_fn BIO_get_callback(const BIO *b); +void BIO_set_callback(BIO *b, BIO_callback_fn callback); + +BIO_callback_fn_ex BIO_get_callback_ex(const BIO *b); +void BIO_set_callback_ex(BIO *b, BIO_callback_fn_ex callback); + +char *BIO_get_callback_arg(const BIO *b); +void BIO_set_callback_arg(BIO *b, char *arg); + +typedef struct bio_method_st BIO_METHOD; + +const char *BIO_method_name(const BIO *b); +int BIO_method_type(const BIO *b); + +typedef int BIO_info_cb(BIO *, int, int); +typedef BIO_info_cb bio_info_cb; /* backward compatibility */ + +DEFINE_STACK_OF(BIO) + +/* Prefix and suffix callback in ASN1 BIO */ +typedef int asn1_ps_func (BIO *b, unsigned char **pbuf, int *plen, + void *parg); + +# ifndef OPENSSL_NO_SCTP +/* SCTP parameter structs */ +struct bio_dgram_sctp_sndinfo { + uint16_t snd_sid; + uint16_t snd_flags; + uint32_t snd_ppid; + uint32_t snd_context; +}; + +struct bio_dgram_sctp_rcvinfo { + uint16_t rcv_sid; + uint16_t rcv_ssn; + uint16_t rcv_flags; + uint32_t rcv_ppid; + uint32_t rcv_tsn; + uint32_t rcv_cumtsn; + uint32_t rcv_context; +}; + +struct bio_dgram_sctp_prinfo { + uint16_t pr_policy; + uint32_t pr_value; +}; +# endif + +/* + * #define BIO_CONN_get_param_hostname BIO_ctrl + */ + +# define BIO_C_SET_CONNECT 100 +# define BIO_C_DO_STATE_MACHINE 101 +# define BIO_C_SET_NBIO 102 +/* # define BIO_C_SET_PROXY_PARAM 103 */ +# define BIO_C_SET_FD 104 +# define BIO_C_GET_FD 105 +# define BIO_C_SET_FILE_PTR 106 +# define BIO_C_GET_FILE_PTR 107 +# define BIO_C_SET_FILENAME 108 +# define BIO_C_SET_SSL 109 +# define BIO_C_GET_SSL 110 +# define BIO_C_SET_MD 111 +# define BIO_C_GET_MD 112 +# define BIO_C_GET_CIPHER_STATUS 113 +# define BIO_C_SET_BUF_MEM 114 +# define BIO_C_GET_BUF_MEM_PTR 115 +# define BIO_C_GET_BUFF_NUM_LINES 116 +# define BIO_C_SET_BUFF_SIZE 117 +# define BIO_C_SET_ACCEPT 118 +# define BIO_C_SSL_MODE 119 +# define BIO_C_GET_MD_CTX 120 +/* # define BIO_C_GET_PROXY_PARAM 121 */ +# define BIO_C_SET_BUFF_READ_DATA 122/* data to read first */ +# define BIO_C_GET_CONNECT 123 +# define BIO_C_GET_ACCEPT 124 +# define BIO_C_SET_SSL_RENEGOTIATE_BYTES 125 +# define BIO_C_GET_SSL_NUM_RENEGOTIATES 126 +# define BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT 127 +# define BIO_C_FILE_SEEK 128 +# define BIO_C_GET_CIPHER_CTX 129 +# define BIO_C_SET_BUF_MEM_EOF_RETURN 130/* return end of input + * value */ +# define BIO_C_SET_BIND_MODE 131 +# define BIO_C_GET_BIND_MODE 132 +# define BIO_C_FILE_TELL 133 +# define BIO_C_GET_SOCKS 134 +# define BIO_C_SET_SOCKS 135 + +# define BIO_C_SET_WRITE_BUF_SIZE 136/* for BIO_s_bio */ +# define BIO_C_GET_WRITE_BUF_SIZE 137 +# define BIO_C_MAKE_BIO_PAIR 138 +# define BIO_C_DESTROY_BIO_PAIR 139 +# define BIO_C_GET_WRITE_GUARANTEE 140 +# define BIO_C_GET_READ_REQUEST 141 +# define BIO_C_SHUTDOWN_WR 142 +# define BIO_C_NREAD0 143 +# define BIO_C_NREAD 144 +# define BIO_C_NWRITE0 145 +# define BIO_C_NWRITE 146 +# define BIO_C_RESET_READ_REQUEST 147 +# define BIO_C_SET_MD_CTX 148 + +# define BIO_C_SET_PREFIX 149 +# define BIO_C_GET_PREFIX 150 +# define BIO_C_SET_SUFFIX 151 +# define BIO_C_GET_SUFFIX 152 + +# define BIO_C_SET_EX_ARG 153 +# define BIO_C_GET_EX_ARG 154 + +# define BIO_C_SET_CONNECT_MODE 155 + +# define BIO_set_app_data(s,arg) BIO_set_ex_data(s,0,arg) +# define BIO_get_app_data(s) BIO_get_ex_data(s,0) + +# define BIO_set_nbio(b,n) BIO_ctrl(b,BIO_C_SET_NBIO,(n),NULL) + +# ifndef OPENSSL_NO_SOCK +/* IP families we support, for BIO_s_connect() and BIO_s_accept() */ +/* Note: the underlying operating system may not support some of them */ +# define BIO_FAMILY_IPV4 4 +# define BIO_FAMILY_IPV6 6 +# define BIO_FAMILY_IPANY 256 + +/* BIO_s_connect() */ +# define BIO_set_conn_hostname(b,name) BIO_ctrl(b,BIO_C_SET_CONNECT,0, \ + (char *)(name)) +# define BIO_set_conn_port(b,port) BIO_ctrl(b,BIO_C_SET_CONNECT,1, \ + (char *)(port)) +# define BIO_set_conn_address(b,addr) BIO_ctrl(b,BIO_C_SET_CONNECT,2, \ + (char *)(addr)) +# define BIO_set_conn_ip_family(b,f) BIO_int_ctrl(b,BIO_C_SET_CONNECT,3,f) +# define BIO_get_conn_hostname(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,0)) +# define BIO_get_conn_port(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,1)) +# define BIO_get_conn_address(b) ((const BIO_ADDR *)BIO_ptr_ctrl(b,BIO_C_GET_CONNECT,2)) +# define BIO_get_conn_ip_family(b) BIO_ctrl(b,BIO_C_GET_CONNECT,3,NULL) +# define BIO_set_conn_mode(b,n) BIO_ctrl(b,BIO_C_SET_CONNECT_MODE,(n),NULL) + +/* BIO_s_accept() */ +# define BIO_set_accept_name(b,name) BIO_ctrl(b,BIO_C_SET_ACCEPT,0, \ + (char *)(name)) +# define BIO_set_accept_port(b,port) BIO_ctrl(b,BIO_C_SET_ACCEPT,1, \ + (char *)(port)) +# define BIO_get_accept_name(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_ACCEPT,0)) +# define BIO_get_accept_port(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_ACCEPT,1)) +# define BIO_get_peer_name(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_ACCEPT,2)) +# define BIO_get_peer_port(b) ((const char *)BIO_ptr_ctrl(b,BIO_C_GET_ACCEPT,3)) +/* #define BIO_set_nbio(b,n) BIO_ctrl(b,BIO_C_SET_NBIO,(n),NULL) */ +# define BIO_set_nbio_accept(b,n) BIO_ctrl(b,BIO_C_SET_ACCEPT,2,(n)?(void *)"a":NULL) +# define BIO_set_accept_bios(b,bio) BIO_ctrl(b,BIO_C_SET_ACCEPT,3, \ + (char *)(bio)) +# define BIO_set_accept_ip_family(b,f) BIO_int_ctrl(b,BIO_C_SET_ACCEPT,4,f) +# define BIO_get_accept_ip_family(b) BIO_ctrl(b,BIO_C_GET_ACCEPT,4,NULL) + +/* Aliases kept for backward compatibility */ +# define BIO_BIND_NORMAL 0 +# define BIO_BIND_REUSEADDR BIO_SOCK_REUSEADDR +# define BIO_BIND_REUSEADDR_IF_UNUSED BIO_SOCK_REUSEADDR +# define BIO_set_bind_mode(b,mode) BIO_ctrl(b,BIO_C_SET_BIND_MODE,mode,NULL) +# define BIO_get_bind_mode(b) BIO_ctrl(b,BIO_C_GET_BIND_MODE,0,NULL) + +/* BIO_s_accept() and BIO_s_connect() */ +# define BIO_do_connect(b) BIO_do_handshake(b) +# define BIO_do_accept(b) BIO_do_handshake(b) +# endif /* OPENSSL_NO_SOCK */ + +# define BIO_do_handshake(b) BIO_ctrl(b,BIO_C_DO_STATE_MACHINE,0,NULL) + +/* BIO_s_datagram(), BIO_s_fd(), BIO_s_socket(), BIO_s_accept() and BIO_s_connect() */ +# define BIO_set_fd(b,fd,c) BIO_int_ctrl(b,BIO_C_SET_FD,c,fd) +# define BIO_get_fd(b,c) BIO_ctrl(b,BIO_C_GET_FD,0,(char *)(c)) + +/* BIO_s_file() */ +# define BIO_set_fp(b,fp,c) BIO_ctrl(b,BIO_C_SET_FILE_PTR,c,(char *)(fp)) +# define BIO_get_fp(b,fpp) BIO_ctrl(b,BIO_C_GET_FILE_PTR,0,(char *)(fpp)) + +/* BIO_s_fd() and BIO_s_file() */ +# define BIO_seek(b,ofs) (int)BIO_ctrl(b,BIO_C_FILE_SEEK,ofs,NULL) +# define BIO_tell(b) (int)BIO_ctrl(b,BIO_C_FILE_TELL,0,NULL) + +/* + * name is cast to lose const, but might be better to route through a + * function so we can do it safely + */ +# ifdef CONST_STRICT +/* + * If you are wondering why this isn't defined, its because CONST_STRICT is + * purely a compile-time kludge to allow const to be checked. + */ +int BIO_read_filename(BIO *b, const char *name); +# else +# define BIO_read_filename(b,name) (int)BIO_ctrl(b,BIO_C_SET_FILENAME, \ + BIO_CLOSE|BIO_FP_READ,(char *)(name)) +# endif +# define BIO_write_filename(b,name) (int)BIO_ctrl(b,BIO_C_SET_FILENAME, \ + BIO_CLOSE|BIO_FP_WRITE,name) +# define BIO_append_filename(b,name) (int)BIO_ctrl(b,BIO_C_SET_FILENAME, \ + BIO_CLOSE|BIO_FP_APPEND,name) +# define BIO_rw_filename(b,name) (int)BIO_ctrl(b,BIO_C_SET_FILENAME, \ + BIO_CLOSE|BIO_FP_READ|BIO_FP_WRITE,name) + +/* + * WARNING WARNING, this ups the reference count on the read bio of the SSL + * structure. This is because the ssl read BIO is now pointed to by the + * next_bio field in the bio. So when you free the BIO, make sure you are + * doing a BIO_free_all() to catch the underlying BIO. + */ +# define BIO_set_ssl(b,ssl,c) BIO_ctrl(b,BIO_C_SET_SSL,c,(char *)(ssl)) +# define BIO_get_ssl(b,sslp) BIO_ctrl(b,BIO_C_GET_SSL,0,(char *)(sslp)) +# define BIO_set_ssl_mode(b,client) BIO_ctrl(b,BIO_C_SSL_MODE,client,NULL) +# define BIO_set_ssl_renegotiate_bytes(b,num) \ + BIO_ctrl(b,BIO_C_SET_SSL_RENEGOTIATE_BYTES,num,NULL) +# define BIO_get_num_renegotiates(b) \ + BIO_ctrl(b,BIO_C_GET_SSL_NUM_RENEGOTIATES,0,NULL) +# define BIO_set_ssl_renegotiate_timeout(b,seconds) \ + BIO_ctrl(b,BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT,seconds,NULL) + +/* defined in evp.h */ +/* #define BIO_set_md(b,md) BIO_ctrl(b,BIO_C_SET_MD,1,(char *)(md)) */ + +# define BIO_get_mem_data(b,pp) BIO_ctrl(b,BIO_CTRL_INFO,0,(char *)(pp)) +# define BIO_set_mem_buf(b,bm,c) BIO_ctrl(b,BIO_C_SET_BUF_MEM,c,(char *)(bm)) +# define BIO_get_mem_ptr(b,pp) BIO_ctrl(b,BIO_C_GET_BUF_MEM_PTR,0, \ + (char *)(pp)) +# define BIO_set_mem_eof_return(b,v) \ + BIO_ctrl(b,BIO_C_SET_BUF_MEM_EOF_RETURN,v,NULL) + +/* For the BIO_f_buffer() type */ +# define BIO_get_buffer_num_lines(b) BIO_ctrl(b,BIO_C_GET_BUFF_NUM_LINES,0,NULL) +# define BIO_set_buffer_size(b,size) BIO_ctrl(b,BIO_C_SET_BUFF_SIZE,size,NULL) +# define BIO_set_read_buffer_size(b,size) BIO_int_ctrl(b,BIO_C_SET_BUFF_SIZE,size,0) +# define BIO_set_write_buffer_size(b,size) BIO_int_ctrl(b,BIO_C_SET_BUFF_SIZE,size,1) +# define BIO_set_buffer_read_data(b,buf,num) BIO_ctrl(b,BIO_C_SET_BUFF_READ_DATA,num,buf) + +/* Don't use the next one unless you know what you are doing :-) */ +# define BIO_dup_state(b,ret) BIO_ctrl(b,BIO_CTRL_DUP,0,(char *)(ret)) + +# define BIO_reset(b) (int)BIO_ctrl(b,BIO_CTRL_RESET,0,NULL) +# define BIO_eof(b) (int)BIO_ctrl(b,BIO_CTRL_EOF,0,NULL) +# define BIO_set_close(b,c) (int)BIO_ctrl(b,BIO_CTRL_SET_CLOSE,(c),NULL) +# define BIO_get_close(b) (int)BIO_ctrl(b,BIO_CTRL_GET_CLOSE,0,NULL) +# define BIO_pending(b) (int)BIO_ctrl(b,BIO_CTRL_PENDING,0,NULL) +# define BIO_wpending(b) (int)BIO_ctrl(b,BIO_CTRL_WPENDING,0,NULL) +/* ...pending macros have inappropriate return type */ +size_t BIO_ctrl_pending(BIO *b); +size_t BIO_ctrl_wpending(BIO *b); +# define BIO_flush(b) (int)BIO_ctrl(b,BIO_CTRL_FLUSH,0,NULL) +# define BIO_get_info_callback(b,cbp) (int)BIO_ctrl(b,BIO_CTRL_GET_CALLBACK,0, \ + cbp) +# define BIO_set_info_callback(b,cb) (int)BIO_callback_ctrl(b,BIO_CTRL_SET_CALLBACK,cb) + +/* For the BIO_f_buffer() type */ +# define BIO_buffer_get_num_lines(b) BIO_ctrl(b,BIO_CTRL_GET,0,NULL) +# define BIO_buffer_peek(b,s,l) BIO_ctrl(b,BIO_CTRL_PEEK,(l),(s)) + +/* For BIO_s_bio() */ +# define BIO_set_write_buf_size(b,size) (int)BIO_ctrl(b,BIO_C_SET_WRITE_BUF_SIZE,size,NULL) +# define BIO_get_write_buf_size(b,size) (size_t)BIO_ctrl(b,BIO_C_GET_WRITE_BUF_SIZE,size,NULL) +# define BIO_make_bio_pair(b1,b2) (int)BIO_ctrl(b1,BIO_C_MAKE_BIO_PAIR,0,b2) +# define BIO_destroy_bio_pair(b) (int)BIO_ctrl(b,BIO_C_DESTROY_BIO_PAIR,0,NULL) +# define BIO_shutdown_wr(b) (int)BIO_ctrl(b, BIO_C_SHUTDOWN_WR, 0, NULL) +/* macros with inappropriate type -- but ...pending macros use int too: */ +# define BIO_get_write_guarantee(b) (int)BIO_ctrl(b,BIO_C_GET_WRITE_GUARANTEE,0,NULL) +# define BIO_get_read_request(b) (int)BIO_ctrl(b,BIO_C_GET_READ_REQUEST,0,NULL) +size_t BIO_ctrl_get_write_guarantee(BIO *b); +size_t BIO_ctrl_get_read_request(BIO *b); +int BIO_ctrl_reset_read_request(BIO *b); + +/* ctrl macros for dgram */ +# define BIO_ctrl_dgram_connect(b,peer) \ + (int)BIO_ctrl(b,BIO_CTRL_DGRAM_CONNECT,0, (char *)(peer)) +# define BIO_ctrl_set_connected(b,peer) \ + (int)BIO_ctrl(b, BIO_CTRL_DGRAM_SET_CONNECTED, 0, (char *)(peer)) +# define BIO_dgram_recv_timedout(b) \ + (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_RECV_TIMER_EXP, 0, NULL) +# define BIO_dgram_send_timedout(b) \ + (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_SEND_TIMER_EXP, 0, NULL) +# define BIO_dgram_get_peer(b,peer) \ + (int)BIO_ctrl(b, BIO_CTRL_DGRAM_GET_PEER, 0, (char *)(peer)) +# define BIO_dgram_set_peer(b,peer) \ + (int)BIO_ctrl(b, BIO_CTRL_DGRAM_SET_PEER, 0, (char *)(peer)) +# define BIO_dgram_get_mtu_overhead(b) \ + (unsigned int)BIO_ctrl((b), BIO_CTRL_DGRAM_GET_MTU_OVERHEAD, 0, NULL) + +#define BIO_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_BIO, l, p, newf, dupf, freef) +int BIO_set_ex_data(BIO *bio, int idx, void *data); +void *BIO_get_ex_data(BIO *bio, int idx); +uint64_t BIO_number_read(BIO *bio); +uint64_t BIO_number_written(BIO *bio); + +/* For BIO_f_asn1() */ +int BIO_asn1_set_prefix(BIO *b, asn1_ps_func *prefix, + asn1_ps_func *prefix_free); +int BIO_asn1_get_prefix(BIO *b, asn1_ps_func **pprefix, + asn1_ps_func **pprefix_free); +int BIO_asn1_set_suffix(BIO *b, asn1_ps_func *suffix, + asn1_ps_func *suffix_free); +int BIO_asn1_get_suffix(BIO *b, asn1_ps_func **psuffix, + asn1_ps_func **psuffix_free); + +const BIO_METHOD *BIO_s_file(void); +BIO *BIO_new_file(const char *filename, const char *mode); +# ifndef OPENSSL_NO_STDIO +BIO *BIO_new_fp(FILE *stream, int close_flag); +# endif +BIO *BIO_new(const BIO_METHOD *type); +int BIO_free(BIO *a); +void BIO_set_data(BIO *a, void *ptr); +void *BIO_get_data(BIO *a); +void BIO_set_init(BIO *a, int init); +int BIO_get_init(BIO *a); +void BIO_set_shutdown(BIO *a, int shut); +int BIO_get_shutdown(BIO *a); +void BIO_vfree(BIO *a); +int BIO_up_ref(BIO *a); +int BIO_read(BIO *b, void *data, int dlen); +int BIO_read_ex(BIO *b, void *data, size_t dlen, size_t *readbytes); +int BIO_gets(BIO *bp, char *buf, int size); +int BIO_write(BIO *b, const void *data, int dlen); +int BIO_write_ex(BIO *b, const void *data, size_t dlen, size_t *written); +int BIO_puts(BIO *bp, const char *buf); +int BIO_indent(BIO *b, int indent, int max); +long BIO_ctrl(BIO *bp, int cmd, long larg, void *parg); +long BIO_callback_ctrl(BIO *b, int cmd, BIO_info_cb *fp); +void *BIO_ptr_ctrl(BIO *bp, int cmd, long larg); +long BIO_int_ctrl(BIO *bp, int cmd, long larg, int iarg); +BIO *BIO_push(BIO *b, BIO *append); +BIO *BIO_pop(BIO *b); +void BIO_free_all(BIO *a); +BIO *BIO_find_type(BIO *b, int bio_type); +BIO *BIO_next(BIO *b); +void BIO_set_next(BIO *b, BIO *next); +BIO *BIO_get_retry_BIO(BIO *bio, int *reason); +int BIO_get_retry_reason(BIO *bio); +void BIO_set_retry_reason(BIO *bio, int reason); +BIO *BIO_dup_chain(BIO *in); + +int BIO_nread0(BIO *bio, char **buf); +int BIO_nread(BIO *bio, char **buf, int num); +int BIO_nwrite0(BIO *bio, char **buf); +int BIO_nwrite(BIO *bio, char **buf, int num); + +long BIO_debug_callback(BIO *bio, int cmd, const char *argp, int argi, + long argl, long ret); + +const BIO_METHOD *BIO_s_mem(void); +const BIO_METHOD *BIO_s_secmem(void); +BIO *BIO_new_mem_buf(const void *buf, int len); +# ifndef OPENSSL_NO_SOCK +const BIO_METHOD *BIO_s_socket(void); +const BIO_METHOD *BIO_s_connect(void); +const BIO_METHOD *BIO_s_accept(void); +# endif +const BIO_METHOD *BIO_s_fd(void); +const BIO_METHOD *BIO_s_log(void); +const BIO_METHOD *BIO_s_bio(void); +const BIO_METHOD *BIO_s_null(void); +const BIO_METHOD *BIO_f_null(void); +const BIO_METHOD *BIO_f_buffer(void); +const BIO_METHOD *BIO_f_linebuffer(void); +const BIO_METHOD *BIO_f_nbio_test(void); +# ifndef OPENSSL_NO_DGRAM +const BIO_METHOD *BIO_s_datagram(void); +int BIO_dgram_non_fatal_error(int error); +BIO *BIO_new_dgram(int fd, int close_flag); +# ifndef OPENSSL_NO_SCTP +const BIO_METHOD *BIO_s_datagram_sctp(void); +BIO *BIO_new_dgram_sctp(int fd, int close_flag); +int BIO_dgram_is_sctp(BIO *bio); +int BIO_dgram_sctp_notification_cb(BIO *b, + void (*handle_notifications) (BIO *bio, + void *context, + void *buf), + void *context); +int BIO_dgram_sctp_wait_for_dry(BIO *b); +int BIO_dgram_sctp_msg_waiting(BIO *b); +# endif +# endif + +# ifndef OPENSSL_NO_SOCK +int BIO_sock_should_retry(int i); +int BIO_sock_non_fatal_error(int error); +# endif + +int BIO_fd_should_retry(int i); +int BIO_fd_non_fatal_error(int error); +int BIO_dump_cb(int (*cb) (const void *data, size_t len, void *u), + void *u, const char *s, int len); +int BIO_dump_indent_cb(int (*cb) (const void *data, size_t len, void *u), + void *u, const char *s, int len, int indent); +int BIO_dump(BIO *b, const char *bytes, int len); +int BIO_dump_indent(BIO *b, const char *bytes, int len, int indent); +# ifndef OPENSSL_NO_STDIO +int BIO_dump_fp(FILE *fp, const char *s, int len); +int BIO_dump_indent_fp(FILE *fp, const char *s, int len, int indent); +# endif +int BIO_hex_string(BIO *out, int indent, int width, unsigned char *data, + int datalen); + +# ifndef OPENSSL_NO_SOCK +BIO_ADDR *BIO_ADDR_new(void); +int BIO_ADDR_rawmake(BIO_ADDR *ap, int family, + const void *where, size_t wherelen, unsigned short port); +void BIO_ADDR_free(BIO_ADDR *); +void BIO_ADDR_clear(BIO_ADDR *ap); +int BIO_ADDR_family(const BIO_ADDR *ap); +int BIO_ADDR_rawaddress(const BIO_ADDR *ap, void *p, size_t *l); +unsigned short BIO_ADDR_rawport(const BIO_ADDR *ap); +char *BIO_ADDR_hostname_string(const BIO_ADDR *ap, int numeric); +char *BIO_ADDR_service_string(const BIO_ADDR *ap, int numeric); +char *BIO_ADDR_path_string(const BIO_ADDR *ap); + +const BIO_ADDRINFO *BIO_ADDRINFO_next(const BIO_ADDRINFO *bai); +int BIO_ADDRINFO_family(const BIO_ADDRINFO *bai); +int BIO_ADDRINFO_socktype(const BIO_ADDRINFO *bai); +int BIO_ADDRINFO_protocol(const BIO_ADDRINFO *bai); +const BIO_ADDR *BIO_ADDRINFO_address(const BIO_ADDRINFO *bai); +void BIO_ADDRINFO_free(BIO_ADDRINFO *bai); + +enum BIO_hostserv_priorities { + BIO_PARSE_PRIO_HOST, BIO_PARSE_PRIO_SERV +}; +int BIO_parse_hostserv(const char *hostserv, char **host, char **service, + enum BIO_hostserv_priorities hostserv_prio); +enum BIO_lookup_type { + BIO_LOOKUP_CLIENT, BIO_LOOKUP_SERVER +}; +int BIO_lookup(const char *host, const char *service, + enum BIO_lookup_type lookup_type, + int family, int socktype, BIO_ADDRINFO **res); +int BIO_lookup_ex(const char *host, const char *service, + int lookup_type, int family, int socktype, int protocol, + BIO_ADDRINFO **res); +int BIO_sock_error(int sock); +int BIO_socket_ioctl(int fd, long type, void *arg); +int BIO_socket_nbio(int fd, int mode); +int BIO_sock_init(void); +# if OPENSSL_API_COMPAT < 0x10100000L +# define BIO_sock_cleanup() while(0) continue +# endif +int BIO_set_tcp_ndelay(int sock, int turn_on); + +DEPRECATEDIN_1_1_0(struct hostent *BIO_gethostbyname(const char *name)) +DEPRECATEDIN_1_1_0(int BIO_get_port(const char *str, unsigned short *port_ptr)) +DEPRECATEDIN_1_1_0(int BIO_get_host_ip(const char *str, unsigned char *ip)) +DEPRECATEDIN_1_1_0(int BIO_get_accept_socket(char *host_port, int mode)) +DEPRECATEDIN_1_1_0(int BIO_accept(int sock, char **ip_port)) + +union BIO_sock_info_u { + BIO_ADDR *addr; +}; +enum BIO_sock_info_type { + BIO_SOCK_INFO_ADDRESS +}; +int BIO_sock_info(int sock, + enum BIO_sock_info_type type, union BIO_sock_info_u *info); + +# define BIO_SOCK_REUSEADDR 0x01 +# define BIO_SOCK_V6_ONLY 0x02 +# define BIO_SOCK_KEEPALIVE 0x04 +# define BIO_SOCK_NONBLOCK 0x08 +# define BIO_SOCK_NODELAY 0x10 + +int BIO_socket(int domain, int socktype, int protocol, int options); +int BIO_connect(int sock, const BIO_ADDR *addr, int options); +int BIO_bind(int sock, const BIO_ADDR *addr, int options); +int BIO_listen(int sock, const BIO_ADDR *addr, int options); +int BIO_accept_ex(int accept_sock, BIO_ADDR *addr, int options); +int BIO_closesocket(int sock); + +BIO *BIO_new_socket(int sock, int close_flag); +BIO *BIO_new_connect(const char *host_port); +BIO *BIO_new_accept(const char *host_port); +# endif /* OPENSSL_NO_SOCK*/ + +BIO *BIO_new_fd(int fd, int close_flag); + +int BIO_new_bio_pair(BIO **bio1, size_t writebuf1, + BIO **bio2, size_t writebuf2); +/* + * If successful, returns 1 and in *bio1, *bio2 two BIO pair endpoints. + * Otherwise returns 0 and sets *bio1 and *bio2 to NULL. Size 0 uses default + * value. + */ + +void BIO_copy_next_retry(BIO *b); + +/* + * long BIO_ghbn_ctrl(int cmd,int iarg,char *parg); + */ + +# define ossl_bio__attr__(x) +# if defined(__GNUC__) && defined(__STDC_VERSION__) \ + && !defined(__APPLE__) + /* + * Because we support the 'z' modifier, which made its appearance in C99, + * we can't use __attribute__ with pre C99 dialects. + */ +# if __STDC_VERSION__ >= 199901L +# undef ossl_bio__attr__ +# define ossl_bio__attr__ __attribute__ +# if __GNUC__*10 + __GNUC_MINOR__ >= 44 +# define ossl_bio__printf__ __gnu_printf__ +# else +# define ossl_bio__printf__ __printf__ +# endif +# endif +# endif +int BIO_printf(BIO *bio, const char *format, ...) +ossl_bio__attr__((__format__(ossl_bio__printf__, 2, 3))); +int BIO_vprintf(BIO *bio, const char *format, va_list args) +ossl_bio__attr__((__format__(ossl_bio__printf__, 2, 0))); +int BIO_snprintf(char *buf, size_t n, const char *format, ...) +ossl_bio__attr__((__format__(ossl_bio__printf__, 3, 4))); +int BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args) +ossl_bio__attr__((__format__(ossl_bio__printf__, 3, 0))); +# undef ossl_bio__attr__ +# undef ossl_bio__printf__ + + +BIO_METHOD *BIO_meth_new(int type, const char *name); +void BIO_meth_free(BIO_METHOD *biom); +int (*BIO_meth_get_write(const BIO_METHOD *biom)) (BIO *, const char *, int); +int (*BIO_meth_get_write_ex(const BIO_METHOD *biom)) (BIO *, const char *, size_t, + size_t *); +int BIO_meth_set_write(BIO_METHOD *biom, + int (*write) (BIO *, const char *, int)); +int BIO_meth_set_write_ex(BIO_METHOD *biom, + int (*bwrite) (BIO *, const char *, size_t, size_t *)); +int (*BIO_meth_get_read(const BIO_METHOD *biom)) (BIO *, char *, int); +int (*BIO_meth_get_read_ex(const BIO_METHOD *biom)) (BIO *, char *, size_t, size_t *); +int BIO_meth_set_read(BIO_METHOD *biom, + int (*read) (BIO *, char *, int)); +int BIO_meth_set_read_ex(BIO_METHOD *biom, + int (*bread) (BIO *, char *, size_t, size_t *)); +int (*BIO_meth_get_puts(const BIO_METHOD *biom)) (BIO *, const char *); +int BIO_meth_set_puts(BIO_METHOD *biom, + int (*puts) (BIO *, const char *)); +int (*BIO_meth_get_gets(const BIO_METHOD *biom)) (BIO *, char *, int); +int BIO_meth_set_gets(BIO_METHOD *biom, + int (*gets) (BIO *, char *, int)); +long (*BIO_meth_get_ctrl(const BIO_METHOD *biom)) (BIO *, int, long, void *); +int BIO_meth_set_ctrl(BIO_METHOD *biom, + long (*ctrl) (BIO *, int, long, void *)); +int (*BIO_meth_get_create(const BIO_METHOD *bion)) (BIO *); +int BIO_meth_set_create(BIO_METHOD *biom, int (*create) (BIO *)); +int (*BIO_meth_get_destroy(const BIO_METHOD *biom)) (BIO *); +int BIO_meth_set_destroy(BIO_METHOD *biom, int (*destroy) (BIO *)); +long (*BIO_meth_get_callback_ctrl(const BIO_METHOD *biom)) + (BIO *, int, BIO_info_cb *); +int BIO_meth_set_callback_ctrl(BIO_METHOD *biom, + long (*callback_ctrl) (BIO *, int, + BIO_info_cb *)); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/bioerr.h b/third_party/openssl/uos/sw_64/include/openssl/bioerr.h new file mode 100644 index 00000000..46e2c96e --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/bioerr.h @@ -0,0 +1,124 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BIOERR_H +# define HEADER_BIOERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_BIO_strings(void); + +/* + * BIO function codes. + */ +# define BIO_F_ACPT_STATE 100 +# define BIO_F_ADDRINFO_WRAP 148 +# define BIO_F_ADDR_STRINGS 134 +# define BIO_F_BIO_ACCEPT 101 +# define BIO_F_BIO_ACCEPT_EX 137 +# define BIO_F_BIO_ACCEPT_NEW 152 +# define BIO_F_BIO_ADDR_NEW 144 +# define BIO_F_BIO_BIND 147 +# define BIO_F_BIO_CALLBACK_CTRL 131 +# define BIO_F_BIO_CONNECT 138 +# define BIO_F_BIO_CONNECT_NEW 153 +# define BIO_F_BIO_CTRL 103 +# define BIO_F_BIO_GETS 104 +# define BIO_F_BIO_GET_HOST_IP 106 +# define BIO_F_BIO_GET_NEW_INDEX 102 +# define BIO_F_BIO_GET_PORT 107 +# define BIO_F_BIO_LISTEN 139 +# define BIO_F_BIO_LOOKUP 135 +# define BIO_F_BIO_LOOKUP_EX 143 +# define BIO_F_BIO_MAKE_PAIR 121 +# define BIO_F_BIO_METH_NEW 146 +# define BIO_F_BIO_NEW 108 +# define BIO_F_BIO_NEW_DGRAM_SCTP 145 +# define BIO_F_BIO_NEW_FILE 109 +# define BIO_F_BIO_NEW_MEM_BUF 126 +# define BIO_F_BIO_NREAD 123 +# define BIO_F_BIO_NREAD0 124 +# define BIO_F_BIO_NWRITE 125 +# define BIO_F_BIO_NWRITE0 122 +# define BIO_F_BIO_PARSE_HOSTSERV 136 +# define BIO_F_BIO_PUTS 110 +# define BIO_F_BIO_READ 111 +# define BIO_F_BIO_READ_EX 105 +# define BIO_F_BIO_READ_INTERN 120 +# define BIO_F_BIO_SOCKET 140 +# define BIO_F_BIO_SOCKET_NBIO 142 +# define BIO_F_BIO_SOCK_INFO 141 +# define BIO_F_BIO_SOCK_INIT 112 +# define BIO_F_BIO_WRITE 113 +# define BIO_F_BIO_WRITE_EX 119 +# define BIO_F_BIO_WRITE_INTERN 128 +# define BIO_F_BUFFER_CTRL 114 +# define BIO_F_CONN_CTRL 127 +# define BIO_F_CONN_STATE 115 +# define BIO_F_DGRAM_SCTP_NEW 149 +# define BIO_F_DGRAM_SCTP_READ 132 +# define BIO_F_DGRAM_SCTP_WRITE 133 +# define BIO_F_DOAPR_OUTCH 150 +# define BIO_F_FILE_CTRL 116 +# define BIO_F_FILE_READ 130 +# define BIO_F_LINEBUFFER_CTRL 129 +# define BIO_F_LINEBUFFER_NEW 151 +# define BIO_F_MEM_WRITE 117 +# define BIO_F_NBIOF_NEW 154 +# define BIO_F_SLG_WRITE 155 +# define BIO_F_SSL_NEW 118 + +/* + * BIO reason codes. + */ +# define BIO_R_ACCEPT_ERROR 100 +# define BIO_R_ADDRINFO_ADDR_IS_NOT_AF_INET 141 +# define BIO_R_AMBIGUOUS_HOST_OR_SERVICE 129 +# define BIO_R_BAD_FOPEN_MODE 101 +# define BIO_R_BROKEN_PIPE 124 +# define BIO_R_CONNECT_ERROR 103 +# define BIO_R_GETHOSTBYNAME_ADDR_IS_NOT_AF_INET 107 +# define BIO_R_GETSOCKNAME_ERROR 132 +# define BIO_R_GETSOCKNAME_TRUNCATED_ADDRESS 133 +# define BIO_R_GETTING_SOCKTYPE 134 +# define BIO_R_INVALID_ARGUMENT 125 +# define BIO_R_INVALID_SOCKET 135 +# define BIO_R_IN_USE 123 +# define BIO_R_LENGTH_TOO_LONG 102 +# define BIO_R_LISTEN_V6_ONLY 136 +# define BIO_R_LOOKUP_RETURNED_NOTHING 142 +# define BIO_R_MALFORMED_HOST_OR_SERVICE 130 +# define BIO_R_NBIO_CONNECT_ERROR 110 +# define BIO_R_NO_ACCEPT_ADDR_OR_SERVICE_SPECIFIED 143 +# define BIO_R_NO_HOSTNAME_OR_SERVICE_SPECIFIED 144 +# define BIO_R_NO_PORT_DEFINED 113 +# define BIO_R_NO_SUCH_FILE 128 +# define BIO_R_NULL_PARAMETER 115 +# define BIO_R_UNABLE_TO_BIND_SOCKET 117 +# define BIO_R_UNABLE_TO_CREATE_SOCKET 118 +# define BIO_R_UNABLE_TO_KEEPALIVE 137 +# define BIO_R_UNABLE_TO_LISTEN_SOCKET 119 +# define BIO_R_UNABLE_TO_NODELAY 138 +# define BIO_R_UNABLE_TO_REUSEADDR 139 +# define BIO_R_UNAVAILABLE_IP_FAMILY 145 +# define BIO_R_UNINITIALIZED 120 +# define BIO_R_UNKNOWN_INFO_TYPE 140 +# define BIO_R_UNSUPPORTED_IP_FAMILY 146 +# define BIO_R_UNSUPPORTED_METHOD 121 +# define BIO_R_UNSUPPORTED_PROTOCOL_FAMILY 131 +# define BIO_R_WRITE_TO_READ_ONLY_BIO 126 +# define BIO_R_WSASTARTUP 122 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/blowfish.h b/third_party/openssl/uos/sw_64/include/openssl/blowfish.h new file mode 100644 index 00000000..cd3e460e --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/blowfish.h @@ -0,0 +1,61 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BLOWFISH_H +# define HEADER_BLOWFISH_H + +# include + +# ifndef OPENSSL_NO_BF +# include +# ifdef __cplusplus +extern "C" { +# endif + +# define BF_ENCRYPT 1 +# define BF_DECRYPT 0 + +/*- + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * ! BF_LONG has to be at least 32 bits wide. ! + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ +# define BF_LONG unsigned int + +# define BF_ROUNDS 16 +# define BF_BLOCK 8 + +typedef struct bf_key_st { + BF_LONG P[BF_ROUNDS + 2]; + BF_LONG S[4 * 256]; +} BF_KEY; + +void BF_set_key(BF_KEY *key, int len, const unsigned char *data); + +void BF_encrypt(BF_LONG *data, const BF_KEY *key); +void BF_decrypt(BF_LONG *data, const BF_KEY *key); + +void BF_ecb_encrypt(const unsigned char *in, unsigned char *out, + const BF_KEY *key, int enc); +void BF_cbc_encrypt(const unsigned char *in, unsigned char *out, long length, + const BF_KEY *schedule, unsigned char *ivec, int enc); +void BF_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, const BF_KEY *schedule, + unsigned char *ivec, int *num, int enc); +void BF_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, const BF_KEY *schedule, + unsigned char *ivec, int *num); +const char *BF_options(void); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/bn.h b/third_party/openssl/uos/sw_64/include/openssl/bn.h new file mode 100644 index 00000000..d8776604 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/bn.h @@ -0,0 +1,539 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BN_H +# define HEADER_BN_H + +# include +# ifndef OPENSSL_NO_STDIO +# include +# endif +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * 64-bit processor with LP64 ABI + */ +# ifdef SIXTY_FOUR_BIT_LONG +# define BN_ULONG unsigned long +# define BN_BYTES 8 +# endif + +/* + * 64-bit processor other than LP64 ABI + */ +# ifdef SIXTY_FOUR_BIT +# define BN_ULONG unsigned long long +# define BN_BYTES 8 +# endif + +# ifdef THIRTY_TWO_BIT +# define BN_ULONG unsigned int +# define BN_BYTES 4 +# endif + +# define BN_BITS2 (BN_BYTES * 8) +# define BN_BITS (BN_BITS2 * 2) +# define BN_TBIT ((BN_ULONG)1 << (BN_BITS2 - 1)) + +# define BN_FLG_MALLOCED 0x01 +# define BN_FLG_STATIC_DATA 0x02 + +/* + * avoid leaking exponent information through timing, + * BN_mod_exp_mont() will call BN_mod_exp_mont_consttime, + * BN_div() will call BN_div_no_branch, + * BN_mod_inverse() will call bn_mod_inverse_no_branch. + */ +# define BN_FLG_CONSTTIME 0x04 +# define BN_FLG_SECURE 0x08 + +# if OPENSSL_API_COMPAT < 0x00908000L +/* deprecated name for the flag */ +# define BN_FLG_EXP_CONSTTIME BN_FLG_CONSTTIME +# define BN_FLG_FREE 0x8000 /* used for debugging */ +# endif + +void BN_set_flags(BIGNUM *b, int n); +int BN_get_flags(const BIGNUM *b, int n); + +/* Values for |top| in BN_rand() */ +#define BN_RAND_TOP_ANY -1 +#define BN_RAND_TOP_ONE 0 +#define BN_RAND_TOP_TWO 1 + +/* Values for |bottom| in BN_rand() */ +#define BN_RAND_BOTTOM_ANY 0 +#define BN_RAND_BOTTOM_ODD 1 + +/* + * get a clone of a BIGNUM with changed flags, for *temporary* use only (the + * two BIGNUMs cannot be used in parallel!). Also only for *read only* use. The + * value |dest| should be a newly allocated BIGNUM obtained via BN_new() that + * has not been otherwise initialised or used. + */ +void BN_with_flags(BIGNUM *dest, const BIGNUM *b, int flags); + +/* Wrapper function to make using BN_GENCB easier */ +int BN_GENCB_call(BN_GENCB *cb, int a, int b); + +BN_GENCB *BN_GENCB_new(void); +void BN_GENCB_free(BN_GENCB *cb); + +/* Populate a BN_GENCB structure with an "old"-style callback */ +void BN_GENCB_set_old(BN_GENCB *gencb, void (*callback) (int, int, void *), + void *cb_arg); + +/* Populate a BN_GENCB structure with a "new"-style callback */ +void BN_GENCB_set(BN_GENCB *gencb, int (*callback) (int, int, BN_GENCB *), + void *cb_arg); + +void *BN_GENCB_get_arg(BN_GENCB *cb); + +# define BN_prime_checks 0 /* default: select number of iterations based + * on the size of the number */ + +/* + * BN_prime_checks_for_size() returns the number of Miller-Rabin iterations + * that will be done for checking that a random number is probably prime. The + * error rate for accepting a composite number as prime depends on the size of + * the prime |b|. The error rates used are for calculating an RSA key with 2 primes, + * and so the level is what you would expect for a key of double the size of the + * prime. + * + * This table is generated using the algorithm of FIPS PUB 186-4 + * Digital Signature Standard (DSS), section F.1, page 117. + * (https://dx.doi.org/10.6028/NIST.FIPS.186-4) + * + * The following magma script was used to generate the output: + * securitybits:=125; + * k:=1024; + * for t:=1 to 65 do + * for M:=3 to Floor(2*Sqrt(k-1)-1) do + * S:=0; + * // Sum over m + * for m:=3 to M do + * s:=0; + * // Sum over j + * for j:=2 to m do + * s+:=(RealField(32)!2)^-(j+(k-1)/j); + * end for; + * S+:=2^(m-(m-1)*t)*s; + * end for; + * A:=2^(k-2-M*t); + * B:=8*(Pi(RealField(32))^2-6)/3*2^(k-2)*S; + * pkt:=2.00743*Log(2)*k*2^-k*(A+B); + * seclevel:=Floor(-Log(2,pkt)); + * if seclevel ge securitybits then + * printf "k: %5o, security: %o bits (t: %o, M: %o)\n",k,seclevel,t,M; + * break; + * end if; + * end for; + * if seclevel ge securitybits then break; end if; + * end for; + * + * It can be run online at: + * http://magma.maths.usyd.edu.au/calc + * + * And will output: + * k: 1024, security: 129 bits (t: 6, M: 23) + * + * k is the number of bits of the prime, securitybits is the level we want to + * reach. + * + * prime length | RSA key size | # MR tests | security level + * -------------+--------------|------------+--------------- + * (b) >= 6394 | >= 12788 | 3 | 256 bit + * (b) >= 3747 | >= 7494 | 3 | 192 bit + * (b) >= 1345 | >= 2690 | 4 | 128 bit + * (b) >= 1080 | >= 2160 | 5 | 128 bit + * (b) >= 852 | >= 1704 | 5 | 112 bit + * (b) >= 476 | >= 952 | 5 | 80 bit + * (b) >= 400 | >= 800 | 6 | 80 bit + * (b) >= 347 | >= 694 | 7 | 80 bit + * (b) >= 308 | >= 616 | 8 | 80 bit + * (b) >= 55 | >= 110 | 27 | 64 bit + * (b) >= 6 | >= 12 | 34 | 64 bit + */ + +# define BN_prime_checks_for_size(b) ((b) >= 3747 ? 3 : \ + (b) >= 1345 ? 4 : \ + (b) >= 476 ? 5 : \ + (b) >= 400 ? 6 : \ + (b) >= 347 ? 7 : \ + (b) >= 308 ? 8 : \ + (b) >= 55 ? 27 : \ + /* b >= 6 */ 34) + +# define BN_num_bytes(a) ((BN_num_bits(a)+7)/8) + +int BN_abs_is_word(const BIGNUM *a, const BN_ULONG w); +int BN_is_zero(const BIGNUM *a); +int BN_is_one(const BIGNUM *a); +int BN_is_word(const BIGNUM *a, const BN_ULONG w); +int BN_is_odd(const BIGNUM *a); + +# define BN_one(a) (BN_set_word((a),1)) + +void BN_zero_ex(BIGNUM *a); + +# if OPENSSL_API_COMPAT >= 0x00908000L +# define BN_zero(a) BN_zero_ex(a) +# else +# define BN_zero(a) (BN_set_word((a),0)) +# endif + +const BIGNUM *BN_value_one(void); +char *BN_options(void); +BN_CTX *BN_CTX_new(void); +BN_CTX *BN_CTX_secure_new(void); +void BN_CTX_free(BN_CTX *c); +void BN_CTX_start(BN_CTX *ctx); +BIGNUM *BN_CTX_get(BN_CTX *ctx); +void BN_CTX_end(BN_CTX *ctx); +int BN_rand(BIGNUM *rnd, int bits, int top, int bottom); +int BN_priv_rand(BIGNUM *rnd, int bits, int top, int bottom); +int BN_rand_range(BIGNUM *rnd, const BIGNUM *range); +int BN_priv_rand_range(BIGNUM *rnd, const BIGNUM *range); +int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom); +int BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range); +int BN_num_bits(const BIGNUM *a); +int BN_num_bits_word(BN_ULONG l); +int BN_security_bits(int L, int N); +BIGNUM *BN_new(void); +BIGNUM *BN_secure_new(void); +void BN_clear_free(BIGNUM *a); +BIGNUM *BN_copy(BIGNUM *a, const BIGNUM *b); +void BN_swap(BIGNUM *a, BIGNUM *b); +BIGNUM *BN_bin2bn(const unsigned char *s, int len, BIGNUM *ret); +int BN_bn2bin(const BIGNUM *a, unsigned char *to); +int BN_bn2binpad(const BIGNUM *a, unsigned char *to, int tolen); +BIGNUM *BN_lebin2bn(const unsigned char *s, int len, BIGNUM *ret); +int BN_bn2lebinpad(const BIGNUM *a, unsigned char *to, int tolen); +BIGNUM *BN_mpi2bn(const unsigned char *s, int len, BIGNUM *ret); +int BN_bn2mpi(const BIGNUM *a, unsigned char *to); +int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); +int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); +int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); +int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); +int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); +int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); +/** BN_set_negative sets sign of a BIGNUM + * \param b pointer to the BIGNUM object + * \param n 0 if the BIGNUM b should be positive and a value != 0 otherwise + */ +void BN_set_negative(BIGNUM *b, int n); +/** BN_is_negative returns 1 if the BIGNUM is negative + * \param b pointer to the BIGNUM object + * \return 1 if a < 0 and 0 otherwise + */ +int BN_is_negative(const BIGNUM *b); + +int BN_div(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, const BIGNUM *d, + BN_CTX *ctx); +# define BN_mod(rem,m,d,ctx) BN_div(NULL,(rem),(m),(d),(ctx)) +int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx); +int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, + BN_CTX *ctx); +int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *m); +int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, + BN_CTX *ctx); +int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *m); +int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, + BN_CTX *ctx); +int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); +int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); +int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m); +int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, + BN_CTX *ctx); +int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m); + +BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w); +BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w); +int BN_mul_word(BIGNUM *a, BN_ULONG w); +int BN_add_word(BIGNUM *a, BN_ULONG w); +int BN_sub_word(BIGNUM *a, BN_ULONG w); +int BN_set_word(BIGNUM *a, BN_ULONG w); +BN_ULONG BN_get_word(const BIGNUM *a); + +int BN_cmp(const BIGNUM *a, const BIGNUM *b); +void BN_free(BIGNUM *a); +int BN_is_bit_set(const BIGNUM *a, int n); +int BN_lshift(BIGNUM *r, const BIGNUM *a, int n); +int BN_lshift1(BIGNUM *r, const BIGNUM *a); +int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); + +int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx); +int BN_mod_exp_mont(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); +int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, + BN_MONT_CTX *in_mont); +int BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); +int BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1, + const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m, + BN_CTX *ctx, BN_MONT_CTX *m_ctx); +int BN_mod_exp_simple(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx); + +int BN_mask_bits(BIGNUM *a, int n); +# ifndef OPENSSL_NO_STDIO +int BN_print_fp(FILE *fp, const BIGNUM *a); +# endif +int BN_print(BIO *bio, const BIGNUM *a); +int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx); +int BN_rshift(BIGNUM *r, const BIGNUM *a, int n); +int BN_rshift1(BIGNUM *r, const BIGNUM *a); +void BN_clear(BIGNUM *a); +BIGNUM *BN_dup(const BIGNUM *a); +int BN_ucmp(const BIGNUM *a, const BIGNUM *b); +int BN_set_bit(BIGNUM *a, int n); +int BN_clear_bit(BIGNUM *a, int n); +char *BN_bn2hex(const BIGNUM *a); +char *BN_bn2dec(const BIGNUM *a); +int BN_hex2bn(BIGNUM **a, const char *str); +int BN_dec2bn(BIGNUM **a, const char *str); +int BN_asc2bn(BIGNUM **a, const char *str); +int BN_gcd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); +int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); /* returns + * -2 for + * error */ +BIGNUM *BN_mod_inverse(BIGNUM *ret, + const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); +BIGNUM *BN_mod_sqrt(BIGNUM *ret, + const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); + +void BN_consttime_swap(BN_ULONG swap, BIGNUM *a, BIGNUM *b, int nwords); + +/* Deprecated versions */ +DEPRECATEDIN_0_9_8(BIGNUM *BN_generate_prime(BIGNUM *ret, int bits, int safe, + const BIGNUM *add, + const BIGNUM *rem, + void (*callback) (int, int, + void *), + void *cb_arg)) +DEPRECATEDIN_0_9_8(int + BN_is_prime(const BIGNUM *p, int nchecks, + void (*callback) (int, int, void *), + BN_CTX *ctx, void *cb_arg)) +DEPRECATEDIN_0_9_8(int + BN_is_prime_fasttest(const BIGNUM *p, int nchecks, + void (*callback) (int, int, void *), + BN_CTX *ctx, void *cb_arg, + int do_trial_division)) + +/* Newer versions */ +int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, + const BIGNUM *rem, BN_GENCB *cb); +int BN_is_prime_ex(const BIGNUM *p, int nchecks, BN_CTX *ctx, BN_GENCB *cb); +int BN_is_prime_fasttest_ex(const BIGNUM *p, int nchecks, BN_CTX *ctx, + int do_trial_division, BN_GENCB *cb); + +int BN_X931_generate_Xpq(BIGNUM *Xp, BIGNUM *Xq, int nbits, BN_CTX *ctx); + +int BN_X931_derive_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2, + const BIGNUM *Xp, const BIGNUM *Xp1, + const BIGNUM *Xp2, const BIGNUM *e, BN_CTX *ctx, + BN_GENCB *cb); +int BN_X931_generate_prime_ex(BIGNUM *p, BIGNUM *p1, BIGNUM *p2, BIGNUM *Xp1, + BIGNUM *Xp2, const BIGNUM *Xp, const BIGNUM *e, + BN_CTX *ctx, BN_GENCB *cb); + +BN_MONT_CTX *BN_MONT_CTX_new(void); +int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + BN_MONT_CTX *mont, BN_CTX *ctx); +int BN_to_montgomery(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont, + BN_CTX *ctx); +int BN_from_montgomery(BIGNUM *r, const BIGNUM *a, BN_MONT_CTX *mont, + BN_CTX *ctx); +void BN_MONT_CTX_free(BN_MONT_CTX *mont); +int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx); +BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, BN_MONT_CTX *from); +BN_MONT_CTX *BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_RWLOCK *lock, + const BIGNUM *mod, BN_CTX *ctx); + +/* BN_BLINDING flags */ +# define BN_BLINDING_NO_UPDATE 0x00000001 +# define BN_BLINDING_NO_RECREATE 0x00000002 + +BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod); +void BN_BLINDING_free(BN_BLINDING *b); +int BN_BLINDING_update(BN_BLINDING *b, BN_CTX *ctx); +int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx); +int BN_BLINDING_invert(BIGNUM *n, BN_BLINDING *b, BN_CTX *ctx); +int BN_BLINDING_convert_ex(BIGNUM *n, BIGNUM *r, BN_BLINDING *b, BN_CTX *); +int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b, + BN_CTX *); + +int BN_BLINDING_is_current_thread(BN_BLINDING *b); +void BN_BLINDING_set_current_thread(BN_BLINDING *b); +int BN_BLINDING_lock(BN_BLINDING *b); +int BN_BLINDING_unlock(BN_BLINDING *b); + +unsigned long BN_BLINDING_get_flags(const BN_BLINDING *); +void BN_BLINDING_set_flags(BN_BLINDING *, unsigned long); +BN_BLINDING *BN_BLINDING_create_param(BN_BLINDING *b, + const BIGNUM *e, BIGNUM *m, BN_CTX *ctx, + int (*bn_mod_exp) (BIGNUM *r, + const BIGNUM *a, + const BIGNUM *p, + const BIGNUM *m, + BN_CTX *ctx, + BN_MONT_CTX *m_ctx), + BN_MONT_CTX *m_ctx); + +DEPRECATEDIN_0_9_8(void BN_set_params(int mul, int high, int low, int mont)) +DEPRECATEDIN_0_9_8(int BN_get_params(int which)) /* 0, mul, 1 high, 2 low, 3 + * mont */ + +BN_RECP_CTX *BN_RECP_CTX_new(void); +void BN_RECP_CTX_free(BN_RECP_CTX *recp); +int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *rdiv, BN_CTX *ctx); +int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, + BN_RECP_CTX *recp, BN_CTX *ctx); +int BN_mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx); +int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, + BN_RECP_CTX *recp, BN_CTX *ctx); + +# ifndef OPENSSL_NO_EC2M + +/* + * Functions for arithmetic over binary polynomials represented by BIGNUMs. + * The BIGNUM::neg property of BIGNUMs representing binary polynomials is + * ignored. Note that input arguments are not const so that their bit arrays + * can be expanded to the appropriate size if needed. + */ + +/* + * r = a + b + */ +int BN_GF2m_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); +# define BN_GF2m_sub(r, a, b) BN_GF2m_add(r, a, b) +/* + * r=a mod p + */ +int BN_GF2m_mod(BIGNUM *r, const BIGNUM *a, const BIGNUM *p); +/* r = (a * b) mod p */ +int BN_GF2m_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *p, BN_CTX *ctx); +/* r = (a * a) mod p */ +int BN_GF2m_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); +/* r = (1 / b) mod p */ +int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *b, const BIGNUM *p, BN_CTX *ctx); +/* r = (a / b) mod p */ +int BN_GF2m_mod_div(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *p, BN_CTX *ctx); +/* r = (a ^ b) mod p */ +int BN_GF2m_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const BIGNUM *p, BN_CTX *ctx); +/* r = sqrt(a) mod p */ +int BN_GF2m_mod_sqrt(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + BN_CTX *ctx); +/* r^2 + r = a mod p */ +int BN_GF2m_mod_solve_quad(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + BN_CTX *ctx); +# define BN_GF2m_cmp(a, b) BN_ucmp((a), (b)) +/*- + * Some functions allow for representation of the irreducible polynomials + * as an unsigned int[], say p. The irreducible f(t) is then of the form: + * t^p[0] + t^p[1] + ... + t^p[k] + * where m = p[0] > p[1] > ... > p[k] = 0. + */ +/* r = a mod p */ +int BN_GF2m_mod_arr(BIGNUM *r, const BIGNUM *a, const int p[]); +/* r = (a * b) mod p */ +int BN_GF2m_mod_mul_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const int p[], BN_CTX *ctx); +/* r = (a * a) mod p */ +int BN_GF2m_mod_sqr_arr(BIGNUM *r, const BIGNUM *a, const int p[], + BN_CTX *ctx); +/* r = (1 / b) mod p */ +int BN_GF2m_mod_inv_arr(BIGNUM *r, const BIGNUM *b, const int p[], + BN_CTX *ctx); +/* r = (a / b) mod p */ +int BN_GF2m_mod_div_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const int p[], BN_CTX *ctx); +/* r = (a ^ b) mod p */ +int BN_GF2m_mod_exp_arr(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, + const int p[], BN_CTX *ctx); +/* r = sqrt(a) mod p */ +int BN_GF2m_mod_sqrt_arr(BIGNUM *r, const BIGNUM *a, + const int p[], BN_CTX *ctx); +/* r^2 + r = a mod p */ +int BN_GF2m_mod_solve_quad_arr(BIGNUM *r, const BIGNUM *a, + const int p[], BN_CTX *ctx); +int BN_GF2m_poly2arr(const BIGNUM *a, int p[], int max); +int BN_GF2m_arr2poly(const int p[], BIGNUM *a); + +# endif + +/* + * faster mod functions for the 'NIST primes' 0 <= a < p^2 + */ +int BN_nist_mod_192(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); +int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); +int BN_nist_mod_256(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); +int BN_nist_mod_384(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); +int BN_nist_mod_521(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); + +const BIGNUM *BN_get0_nist_prime_192(void); +const BIGNUM *BN_get0_nist_prime_224(void); +const BIGNUM *BN_get0_nist_prime_256(void); +const BIGNUM *BN_get0_nist_prime_384(void); +const BIGNUM *BN_get0_nist_prime_521(void); + +int (*BN_nist_mod_func(const BIGNUM *p)) (BIGNUM *r, const BIGNUM *a, + const BIGNUM *field, BN_CTX *ctx); + +int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, + const BIGNUM *priv, const unsigned char *message, + size_t message_len, BN_CTX *ctx); + +/* Primes from RFC 2409 */ +BIGNUM *BN_get_rfc2409_prime_768(BIGNUM *bn); +BIGNUM *BN_get_rfc2409_prime_1024(BIGNUM *bn); + +/* Primes from RFC 3526 */ +BIGNUM *BN_get_rfc3526_prime_1536(BIGNUM *bn); +BIGNUM *BN_get_rfc3526_prime_2048(BIGNUM *bn); +BIGNUM *BN_get_rfc3526_prime_3072(BIGNUM *bn); +BIGNUM *BN_get_rfc3526_prime_4096(BIGNUM *bn); +BIGNUM *BN_get_rfc3526_prime_6144(BIGNUM *bn); +BIGNUM *BN_get_rfc3526_prime_8192(BIGNUM *bn); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define get_rfc2409_prime_768 BN_get_rfc2409_prime_768 +# define get_rfc2409_prime_1024 BN_get_rfc2409_prime_1024 +# define get_rfc3526_prime_1536 BN_get_rfc3526_prime_1536 +# define get_rfc3526_prime_2048 BN_get_rfc3526_prime_2048 +# define get_rfc3526_prime_3072 BN_get_rfc3526_prime_3072 +# define get_rfc3526_prime_4096 BN_get_rfc3526_prime_4096 +# define get_rfc3526_prime_6144 BN_get_rfc3526_prime_6144 +# define get_rfc3526_prime_8192 BN_get_rfc3526_prime_8192 +# endif + +int BN_bntest_rand(BIGNUM *rnd, int bits, int top, int bottom); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/bnerr.h b/third_party/openssl/uos/sw_64/include/openssl/bnerr.h new file mode 100644 index 00000000..5c83777f --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/bnerr.h @@ -0,0 +1,101 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BNERR_H +# define HEADER_BNERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_BN_strings(void); + +/* + * BN function codes. + */ +# define BN_F_BNRAND 127 +# define BN_F_BNRAND_RANGE 138 +# define BN_F_BN_BLINDING_CONVERT_EX 100 +# define BN_F_BN_BLINDING_CREATE_PARAM 128 +# define BN_F_BN_BLINDING_INVERT_EX 101 +# define BN_F_BN_BLINDING_NEW 102 +# define BN_F_BN_BLINDING_UPDATE 103 +# define BN_F_BN_BN2DEC 104 +# define BN_F_BN_BN2HEX 105 +# define BN_F_BN_COMPUTE_WNAF 142 +# define BN_F_BN_CTX_GET 116 +# define BN_F_BN_CTX_NEW 106 +# define BN_F_BN_CTX_START 129 +# define BN_F_BN_DIV 107 +# define BN_F_BN_DIV_RECP 130 +# define BN_F_BN_EXP 123 +# define BN_F_BN_EXPAND_INTERNAL 120 +# define BN_F_BN_GENCB_NEW 143 +# define BN_F_BN_GENERATE_DSA_NONCE 140 +# define BN_F_BN_GENERATE_PRIME_EX 141 +# define BN_F_BN_GF2M_MOD 131 +# define BN_F_BN_GF2M_MOD_EXP 132 +# define BN_F_BN_GF2M_MOD_MUL 133 +# define BN_F_BN_GF2M_MOD_SOLVE_QUAD 134 +# define BN_F_BN_GF2M_MOD_SOLVE_QUAD_ARR 135 +# define BN_F_BN_GF2M_MOD_SQR 136 +# define BN_F_BN_GF2M_MOD_SQRT 137 +# define BN_F_BN_LSHIFT 145 +# define BN_F_BN_MOD_EXP2_MONT 118 +# define BN_F_BN_MOD_EXP_MONT 109 +# define BN_F_BN_MOD_EXP_MONT_CONSTTIME 124 +# define BN_F_BN_MOD_EXP_MONT_WORD 117 +# define BN_F_BN_MOD_EXP_RECP 125 +# define BN_F_BN_MOD_EXP_SIMPLE 126 +# define BN_F_BN_MOD_INVERSE 110 +# define BN_F_BN_MOD_INVERSE_NO_BRANCH 139 +# define BN_F_BN_MOD_LSHIFT_QUICK 119 +# define BN_F_BN_MOD_SQRT 121 +# define BN_F_BN_MONT_CTX_NEW 149 +# define BN_F_BN_MPI2BN 112 +# define BN_F_BN_NEW 113 +# define BN_F_BN_POOL_GET 147 +# define BN_F_BN_RAND 114 +# define BN_F_BN_RAND_RANGE 122 +# define BN_F_BN_RECP_CTX_NEW 150 +# define BN_F_BN_RSHIFT 146 +# define BN_F_BN_SET_WORDS 144 +# define BN_F_BN_STACK_PUSH 148 +# define BN_F_BN_USUB 115 +# define BN_F_OSSL_BN_RSA_DO_UNBLIND 151 + +/* + * BN reason codes. + */ +# define BN_R_ARG2_LT_ARG3 100 +# define BN_R_BAD_RECIPROCAL 101 +# define BN_R_BIGNUM_TOO_LONG 114 +# define BN_R_BITS_TOO_SMALL 118 +# define BN_R_CALLED_WITH_EVEN_MODULUS 102 +# define BN_R_DIV_BY_ZERO 103 +# define BN_R_ENCODING_ERROR 104 +# define BN_R_EXPAND_ON_STATIC_BIGNUM_DATA 105 +# define BN_R_INPUT_NOT_REDUCED 110 +# define BN_R_INVALID_LENGTH 106 +# define BN_R_INVALID_RANGE 115 +# define BN_R_INVALID_SHIFT 119 +# define BN_R_NOT_A_SQUARE 111 +# define BN_R_NOT_INITIALIZED 107 +# define BN_R_NO_INVERSE 108 +# define BN_R_NO_SOLUTION 116 +# define BN_R_PRIVATE_KEY_TOO_LARGE 117 +# define BN_R_P_IS_NOT_PRIME 112 +# define BN_R_TOO_MANY_ITERATIONS 113 +# define BN_R_TOO_MANY_TEMPORARY_VARIABLES 109 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/buffer.h b/third_party/openssl/uos/sw_64/include/openssl/buffer.h new file mode 100644 index 00000000..d2765766 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/buffer.h @@ -0,0 +1,58 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BUFFER_H +# define HEADER_BUFFER_H + +# include +# ifndef HEADER_CRYPTO_H +# include +# endif +# include + + +#ifdef __cplusplus +extern "C" { +#endif + +# include +# include + +/* + * These names are outdated as of OpenSSL 1.1; a future release + * will move them to be deprecated. + */ +# define BUF_strdup(s) OPENSSL_strdup(s) +# define BUF_strndup(s, size) OPENSSL_strndup(s, size) +# define BUF_memdup(data, size) OPENSSL_memdup(data, size) +# define BUF_strlcpy(dst, src, size) OPENSSL_strlcpy(dst, src, size) +# define BUF_strlcat(dst, src, size) OPENSSL_strlcat(dst, src, size) +# define BUF_strnlen(str, maxlen) OPENSSL_strnlen(str, maxlen) + +struct buf_mem_st { + size_t length; /* current number of bytes */ + char *data; + size_t max; /* size of buffer */ + unsigned long flags; +}; + +# define BUF_MEM_FLAG_SECURE 0x01 + +BUF_MEM *BUF_MEM_new(void); +BUF_MEM *BUF_MEM_new_ex(unsigned long flags); +void BUF_MEM_free(BUF_MEM *a); +size_t BUF_MEM_grow(BUF_MEM *str, size_t len); +size_t BUF_MEM_grow_clean(BUF_MEM *str, size_t len); +void BUF_reverse(unsigned char *out, const unsigned char *in, size_t siz); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/buffererr.h b/third_party/openssl/uos/sw_64/include/openssl/buffererr.h new file mode 100644 index 00000000..04f6ff7a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/buffererr.h @@ -0,0 +1,34 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_BUFERR_H +# define HEADER_BUFERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_BUF_strings(void); + +/* + * BUF function codes. + */ +# define BUF_F_BUF_MEM_GROW 100 +# define BUF_F_BUF_MEM_GROW_CLEAN 105 +# define BUF_F_BUF_MEM_NEW 101 + +/* + * BUF reason codes. + */ + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/camellia.h b/third_party/openssl/uos/sw_64/include/openssl/camellia.h new file mode 100644 index 00000000..151f3c13 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/camellia.h @@ -0,0 +1,83 @@ +/* + * Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CAMELLIA_H +# define HEADER_CAMELLIA_H + +# include + +# ifndef OPENSSL_NO_CAMELLIA +# include +#ifdef __cplusplus +extern "C" { +#endif + +# define CAMELLIA_ENCRYPT 1 +# define CAMELLIA_DECRYPT 0 + +/* + * Because array size can't be a const in C, the following two are macros. + * Both sizes are in bytes. + */ + +/* This should be a hidden type, but EVP requires that the size be known */ + +# define CAMELLIA_BLOCK_SIZE 16 +# define CAMELLIA_TABLE_BYTE_LEN 272 +# define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4) + +typedef unsigned int KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN]; /* to match + * with WORD */ + +struct camellia_key_st { + union { + double d; /* ensures 64-bit align */ + KEY_TABLE_TYPE rd_key; + } u; + int grand_rounds; +}; +typedef struct camellia_key_st CAMELLIA_KEY; + +int Camellia_set_key(const unsigned char *userKey, const int bits, + CAMELLIA_KEY *key); + +void Camellia_encrypt(const unsigned char *in, unsigned char *out, + const CAMELLIA_KEY *key); +void Camellia_decrypt(const unsigned char *in, unsigned char *out, + const CAMELLIA_KEY *key); + +void Camellia_ecb_encrypt(const unsigned char *in, unsigned char *out, + const CAMELLIA_KEY *key, const int enc); +void Camellia_cbc_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char *ivec, const int enc); +void Camellia_cfb128_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char *ivec, int *num, const int enc); +void Camellia_cfb1_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char *ivec, int *num, const int enc); +void Camellia_cfb8_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char *ivec, int *num, const int enc); +void Camellia_ofb128_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char *ivec, int *num); +void Camellia_ctr128_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const CAMELLIA_KEY *key, + unsigned char ivec[CAMELLIA_BLOCK_SIZE], + unsigned char ecount_buf[CAMELLIA_BLOCK_SIZE], + unsigned int *num); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cast.h b/third_party/openssl/uos/sw_64/include/openssl/cast.h new file mode 100644 index 00000000..2cc89ae0 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cast.h @@ -0,0 +1,53 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CAST_H +# define HEADER_CAST_H + +# include + +# ifndef OPENSSL_NO_CAST +# ifdef __cplusplus +extern "C" { +# endif + +# define CAST_ENCRYPT 1 +# define CAST_DECRYPT 0 + +# define CAST_LONG unsigned int + +# define CAST_BLOCK 8 +# define CAST_KEY_LENGTH 16 + +typedef struct cast_key_st { + CAST_LONG data[32]; + int short_key; /* Use reduced rounds for short key */ +} CAST_KEY; + +void CAST_set_key(CAST_KEY *key, int len, const unsigned char *data); +void CAST_ecb_encrypt(const unsigned char *in, unsigned char *out, + const CAST_KEY *key, int enc); +void CAST_encrypt(CAST_LONG *data, const CAST_KEY *key); +void CAST_decrypt(CAST_LONG *data, const CAST_KEY *key); +void CAST_cbc_encrypt(const unsigned char *in, unsigned char *out, + long length, const CAST_KEY *ks, unsigned char *iv, + int enc); +void CAST_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, const CAST_KEY *schedule, + unsigned char *ivec, int *num, int enc); +void CAST_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, const CAST_KEY *schedule, + unsigned char *ivec, int *num); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cmac.h b/third_party/openssl/uos/sw_64/include/openssl/cmac.h new file mode 100644 index 00000000..3535a9ab --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cmac.h @@ -0,0 +1,41 @@ +/* + * Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CMAC_H +# define HEADER_CMAC_H + +# ifndef OPENSSL_NO_CMAC + +#ifdef __cplusplus +extern "C" { +#endif + +# include + +/* Opaque */ +typedef struct CMAC_CTX_st CMAC_CTX; + +CMAC_CTX *CMAC_CTX_new(void); +void CMAC_CTX_cleanup(CMAC_CTX *ctx); +void CMAC_CTX_free(CMAC_CTX *ctx); +EVP_CIPHER_CTX *CMAC_CTX_get0_cipher_ctx(CMAC_CTX *ctx); +int CMAC_CTX_copy(CMAC_CTX *out, const CMAC_CTX *in); + +int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t keylen, + const EVP_CIPHER *cipher, ENGINE *impl); +int CMAC_Update(CMAC_CTX *ctx, const void *data, size_t dlen); +int CMAC_Final(CMAC_CTX *ctx, unsigned char *out, size_t *poutlen); +int CMAC_resume(CMAC_CTX *ctx); + +#ifdef __cplusplus +} +#endif + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cms.h b/third_party/openssl/uos/sw_64/include/openssl/cms.h new file mode 100644 index 00000000..c7627968 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cms.h @@ -0,0 +1,339 @@ +/* + * Copyright 2008-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CMS_H +# define HEADER_CMS_H + +# include + +# ifndef OPENSSL_NO_CMS +# include +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +typedef struct CMS_ContentInfo_st CMS_ContentInfo; +typedef struct CMS_SignerInfo_st CMS_SignerInfo; +typedef struct CMS_CertificateChoices CMS_CertificateChoices; +typedef struct CMS_RevocationInfoChoice_st CMS_RevocationInfoChoice; +typedef struct CMS_RecipientInfo_st CMS_RecipientInfo; +typedef struct CMS_ReceiptRequest_st CMS_ReceiptRequest; +typedef struct CMS_Receipt_st CMS_Receipt; +typedef struct CMS_RecipientEncryptedKey_st CMS_RecipientEncryptedKey; +typedef struct CMS_OtherKeyAttribute_st CMS_OtherKeyAttribute; + +DEFINE_STACK_OF(CMS_SignerInfo) +DEFINE_STACK_OF(CMS_RecipientEncryptedKey) +DEFINE_STACK_OF(CMS_RecipientInfo) +DEFINE_STACK_OF(CMS_RevocationInfoChoice) +DECLARE_ASN1_FUNCTIONS(CMS_ContentInfo) +DECLARE_ASN1_FUNCTIONS(CMS_ReceiptRequest) +DECLARE_ASN1_PRINT_FUNCTION(CMS_ContentInfo) + +# define CMS_SIGNERINFO_ISSUER_SERIAL 0 +# define CMS_SIGNERINFO_KEYIDENTIFIER 1 + +# define CMS_RECIPINFO_NONE -1 +# define CMS_RECIPINFO_TRANS 0 +# define CMS_RECIPINFO_AGREE 1 +# define CMS_RECIPINFO_KEK 2 +# define CMS_RECIPINFO_PASS 3 +# define CMS_RECIPINFO_OTHER 4 + +/* S/MIME related flags */ + +# define CMS_TEXT 0x1 +# define CMS_NOCERTS 0x2 +# define CMS_NO_CONTENT_VERIFY 0x4 +# define CMS_NO_ATTR_VERIFY 0x8 +# define CMS_NOSIGS \ + (CMS_NO_CONTENT_VERIFY|CMS_NO_ATTR_VERIFY) +# define CMS_NOINTERN 0x10 +# define CMS_NO_SIGNER_CERT_VERIFY 0x20 +# define CMS_NOVERIFY 0x20 +# define CMS_DETACHED 0x40 +# define CMS_BINARY 0x80 +# define CMS_NOATTR 0x100 +# define CMS_NOSMIMECAP 0x200 +# define CMS_NOOLDMIMETYPE 0x400 +# define CMS_CRLFEOL 0x800 +# define CMS_STREAM 0x1000 +# define CMS_NOCRL 0x2000 +# define CMS_PARTIAL 0x4000 +# define CMS_REUSE_DIGEST 0x8000 +# define CMS_USE_KEYID 0x10000 +# define CMS_DEBUG_DECRYPT 0x20000 +# define CMS_KEY_PARAM 0x40000 +# define CMS_ASCIICRLF 0x80000 + +const ASN1_OBJECT *CMS_get0_type(const CMS_ContentInfo *cms); + +BIO *CMS_dataInit(CMS_ContentInfo *cms, BIO *icont); +int CMS_dataFinal(CMS_ContentInfo *cms, BIO *bio); + +ASN1_OCTET_STRING **CMS_get0_content(CMS_ContentInfo *cms); +int CMS_is_detached(CMS_ContentInfo *cms); +int CMS_set_detached(CMS_ContentInfo *cms, int detached); + +# ifdef HEADER_PEM_H +DECLARE_PEM_rw_const(CMS, CMS_ContentInfo) +# endif +int CMS_stream(unsigned char ***boundary, CMS_ContentInfo *cms); +CMS_ContentInfo *d2i_CMS_bio(BIO *bp, CMS_ContentInfo **cms); +int i2d_CMS_bio(BIO *bp, CMS_ContentInfo *cms); + +BIO *BIO_new_CMS(BIO *out, CMS_ContentInfo *cms); +int i2d_CMS_bio_stream(BIO *out, CMS_ContentInfo *cms, BIO *in, int flags); +int PEM_write_bio_CMS_stream(BIO *out, CMS_ContentInfo *cms, BIO *in, + int flags); +CMS_ContentInfo *SMIME_read_CMS(BIO *bio, BIO **bcont); +int SMIME_write_CMS(BIO *bio, CMS_ContentInfo *cms, BIO *data, int flags); + +int CMS_final(CMS_ContentInfo *cms, BIO *data, BIO *dcont, + unsigned int flags); + +CMS_ContentInfo *CMS_sign(X509 *signcert, EVP_PKEY *pkey, + STACK_OF(X509) *certs, BIO *data, + unsigned int flags); + +CMS_ContentInfo *CMS_sign_receipt(CMS_SignerInfo *si, + X509 *signcert, EVP_PKEY *pkey, + STACK_OF(X509) *certs, unsigned int flags); + +int CMS_data(CMS_ContentInfo *cms, BIO *out, unsigned int flags); +CMS_ContentInfo *CMS_data_create(BIO *in, unsigned int flags); + +int CMS_digest_verify(CMS_ContentInfo *cms, BIO *dcont, BIO *out, + unsigned int flags); +CMS_ContentInfo *CMS_digest_create(BIO *in, const EVP_MD *md, + unsigned int flags); + +int CMS_EncryptedData_decrypt(CMS_ContentInfo *cms, + const unsigned char *key, size_t keylen, + BIO *dcont, BIO *out, unsigned int flags); + +CMS_ContentInfo *CMS_EncryptedData_encrypt(BIO *in, const EVP_CIPHER *cipher, + const unsigned char *key, + size_t keylen, unsigned int flags); + +int CMS_EncryptedData_set1_key(CMS_ContentInfo *cms, const EVP_CIPHER *ciph, + const unsigned char *key, size_t keylen); + +int CMS_verify(CMS_ContentInfo *cms, STACK_OF(X509) *certs, + X509_STORE *store, BIO *dcont, BIO *out, unsigned int flags); + +int CMS_verify_receipt(CMS_ContentInfo *rcms, CMS_ContentInfo *ocms, + STACK_OF(X509) *certs, + X509_STORE *store, unsigned int flags); + +STACK_OF(X509) *CMS_get0_signers(CMS_ContentInfo *cms); + +CMS_ContentInfo *CMS_encrypt(STACK_OF(X509) *certs, BIO *in, + const EVP_CIPHER *cipher, unsigned int flags); + +int CMS_decrypt(CMS_ContentInfo *cms, EVP_PKEY *pkey, X509 *cert, + BIO *dcont, BIO *out, unsigned int flags); + +int CMS_decrypt_set1_pkey(CMS_ContentInfo *cms, EVP_PKEY *pk, X509 *cert); +int CMS_decrypt_set1_key(CMS_ContentInfo *cms, + unsigned char *key, size_t keylen, + const unsigned char *id, size_t idlen); +int CMS_decrypt_set1_password(CMS_ContentInfo *cms, + unsigned char *pass, ossl_ssize_t passlen); + +STACK_OF(CMS_RecipientInfo) *CMS_get0_RecipientInfos(CMS_ContentInfo *cms); +int CMS_RecipientInfo_type(CMS_RecipientInfo *ri); +EVP_PKEY_CTX *CMS_RecipientInfo_get0_pkey_ctx(CMS_RecipientInfo *ri); +CMS_ContentInfo *CMS_EnvelopedData_create(const EVP_CIPHER *cipher); +CMS_RecipientInfo *CMS_add1_recipient_cert(CMS_ContentInfo *cms, + X509 *recip, unsigned int flags); +int CMS_RecipientInfo_set0_pkey(CMS_RecipientInfo *ri, EVP_PKEY *pkey); +int CMS_RecipientInfo_ktri_cert_cmp(CMS_RecipientInfo *ri, X509 *cert); +int CMS_RecipientInfo_ktri_get0_algs(CMS_RecipientInfo *ri, + EVP_PKEY **pk, X509 **recip, + X509_ALGOR **palg); +int CMS_RecipientInfo_ktri_get0_signer_id(CMS_RecipientInfo *ri, + ASN1_OCTET_STRING **keyid, + X509_NAME **issuer, + ASN1_INTEGER **sno); + +CMS_RecipientInfo *CMS_add0_recipient_key(CMS_ContentInfo *cms, int nid, + unsigned char *key, size_t keylen, + unsigned char *id, size_t idlen, + ASN1_GENERALIZEDTIME *date, + ASN1_OBJECT *otherTypeId, + ASN1_TYPE *otherType); + +int CMS_RecipientInfo_kekri_get0_id(CMS_RecipientInfo *ri, + X509_ALGOR **palg, + ASN1_OCTET_STRING **pid, + ASN1_GENERALIZEDTIME **pdate, + ASN1_OBJECT **potherid, + ASN1_TYPE **pothertype); + +int CMS_RecipientInfo_set0_key(CMS_RecipientInfo *ri, + unsigned char *key, size_t keylen); + +int CMS_RecipientInfo_kekri_id_cmp(CMS_RecipientInfo *ri, + const unsigned char *id, size_t idlen); + +int CMS_RecipientInfo_set0_password(CMS_RecipientInfo *ri, + unsigned char *pass, + ossl_ssize_t passlen); + +CMS_RecipientInfo *CMS_add0_recipient_password(CMS_ContentInfo *cms, + int iter, int wrap_nid, + int pbe_nid, + unsigned char *pass, + ossl_ssize_t passlen, + const EVP_CIPHER *kekciph); + +int CMS_RecipientInfo_decrypt(CMS_ContentInfo *cms, CMS_RecipientInfo *ri); +int CMS_RecipientInfo_encrypt(CMS_ContentInfo *cms, CMS_RecipientInfo *ri); + +int CMS_uncompress(CMS_ContentInfo *cms, BIO *dcont, BIO *out, + unsigned int flags); +CMS_ContentInfo *CMS_compress(BIO *in, int comp_nid, unsigned int flags); + +int CMS_set1_eContentType(CMS_ContentInfo *cms, const ASN1_OBJECT *oid); +const ASN1_OBJECT *CMS_get0_eContentType(CMS_ContentInfo *cms); + +CMS_CertificateChoices *CMS_add0_CertificateChoices(CMS_ContentInfo *cms); +int CMS_add0_cert(CMS_ContentInfo *cms, X509 *cert); +int CMS_add1_cert(CMS_ContentInfo *cms, X509 *cert); +STACK_OF(X509) *CMS_get1_certs(CMS_ContentInfo *cms); + +CMS_RevocationInfoChoice *CMS_add0_RevocationInfoChoice(CMS_ContentInfo *cms); +int CMS_add0_crl(CMS_ContentInfo *cms, X509_CRL *crl); +int CMS_add1_crl(CMS_ContentInfo *cms, X509_CRL *crl); +STACK_OF(X509_CRL) *CMS_get1_crls(CMS_ContentInfo *cms); + +int CMS_SignedData_init(CMS_ContentInfo *cms); +CMS_SignerInfo *CMS_add1_signer(CMS_ContentInfo *cms, + X509 *signer, EVP_PKEY *pk, const EVP_MD *md, + unsigned int flags); +EVP_PKEY_CTX *CMS_SignerInfo_get0_pkey_ctx(CMS_SignerInfo *si); +EVP_MD_CTX *CMS_SignerInfo_get0_md_ctx(CMS_SignerInfo *si); +STACK_OF(CMS_SignerInfo) *CMS_get0_SignerInfos(CMS_ContentInfo *cms); + +void CMS_SignerInfo_set1_signer_cert(CMS_SignerInfo *si, X509 *signer); +int CMS_SignerInfo_get0_signer_id(CMS_SignerInfo *si, + ASN1_OCTET_STRING **keyid, + X509_NAME **issuer, ASN1_INTEGER **sno); +int CMS_SignerInfo_cert_cmp(CMS_SignerInfo *si, X509 *cert); +int CMS_set1_signers_certs(CMS_ContentInfo *cms, STACK_OF(X509) *certs, + unsigned int flags); +void CMS_SignerInfo_get0_algs(CMS_SignerInfo *si, EVP_PKEY **pk, + X509 **signer, X509_ALGOR **pdig, + X509_ALGOR **psig); +ASN1_OCTET_STRING *CMS_SignerInfo_get0_signature(CMS_SignerInfo *si); +int CMS_SignerInfo_sign(CMS_SignerInfo *si); +int CMS_SignerInfo_verify(CMS_SignerInfo *si); +int CMS_SignerInfo_verify_content(CMS_SignerInfo *si, BIO *chain); + +int CMS_add_smimecap(CMS_SignerInfo *si, STACK_OF(X509_ALGOR) *algs); +int CMS_add_simple_smimecap(STACK_OF(X509_ALGOR) **algs, + int algnid, int keysize); +int CMS_add_standard_smimecap(STACK_OF(X509_ALGOR) **smcap); + +int CMS_signed_get_attr_count(const CMS_SignerInfo *si); +int CMS_signed_get_attr_by_NID(const CMS_SignerInfo *si, int nid, + int lastpos); +int CMS_signed_get_attr_by_OBJ(const CMS_SignerInfo *si, const ASN1_OBJECT *obj, + int lastpos); +X509_ATTRIBUTE *CMS_signed_get_attr(const CMS_SignerInfo *si, int loc); +X509_ATTRIBUTE *CMS_signed_delete_attr(CMS_SignerInfo *si, int loc); +int CMS_signed_add1_attr(CMS_SignerInfo *si, X509_ATTRIBUTE *attr); +int CMS_signed_add1_attr_by_OBJ(CMS_SignerInfo *si, + const ASN1_OBJECT *obj, int type, + const void *bytes, int len); +int CMS_signed_add1_attr_by_NID(CMS_SignerInfo *si, + int nid, int type, + const void *bytes, int len); +int CMS_signed_add1_attr_by_txt(CMS_SignerInfo *si, + const char *attrname, int type, + const void *bytes, int len); +void *CMS_signed_get0_data_by_OBJ(CMS_SignerInfo *si, const ASN1_OBJECT *oid, + int lastpos, int type); + +int CMS_unsigned_get_attr_count(const CMS_SignerInfo *si); +int CMS_unsigned_get_attr_by_NID(const CMS_SignerInfo *si, int nid, + int lastpos); +int CMS_unsigned_get_attr_by_OBJ(const CMS_SignerInfo *si, + const ASN1_OBJECT *obj, int lastpos); +X509_ATTRIBUTE *CMS_unsigned_get_attr(const CMS_SignerInfo *si, int loc); +X509_ATTRIBUTE *CMS_unsigned_delete_attr(CMS_SignerInfo *si, int loc); +int CMS_unsigned_add1_attr(CMS_SignerInfo *si, X509_ATTRIBUTE *attr); +int CMS_unsigned_add1_attr_by_OBJ(CMS_SignerInfo *si, + const ASN1_OBJECT *obj, int type, + const void *bytes, int len); +int CMS_unsigned_add1_attr_by_NID(CMS_SignerInfo *si, + int nid, int type, + const void *bytes, int len); +int CMS_unsigned_add1_attr_by_txt(CMS_SignerInfo *si, + const char *attrname, int type, + const void *bytes, int len); +void *CMS_unsigned_get0_data_by_OBJ(CMS_SignerInfo *si, ASN1_OBJECT *oid, + int lastpos, int type); + +int CMS_get1_ReceiptRequest(CMS_SignerInfo *si, CMS_ReceiptRequest **prr); +CMS_ReceiptRequest *CMS_ReceiptRequest_create0(unsigned char *id, int idlen, + int allorfirst, + STACK_OF(GENERAL_NAMES) + *receiptList, STACK_OF(GENERAL_NAMES) + *receiptsTo); +int CMS_add1_ReceiptRequest(CMS_SignerInfo *si, CMS_ReceiptRequest *rr); +void CMS_ReceiptRequest_get0_values(CMS_ReceiptRequest *rr, + ASN1_STRING **pcid, + int *pallorfirst, + STACK_OF(GENERAL_NAMES) **plist, + STACK_OF(GENERAL_NAMES) **prto); +int CMS_RecipientInfo_kari_get0_alg(CMS_RecipientInfo *ri, + X509_ALGOR **palg, + ASN1_OCTET_STRING **pukm); +STACK_OF(CMS_RecipientEncryptedKey) +*CMS_RecipientInfo_kari_get0_reks(CMS_RecipientInfo *ri); + +int CMS_RecipientInfo_kari_get0_orig_id(CMS_RecipientInfo *ri, + X509_ALGOR **pubalg, + ASN1_BIT_STRING **pubkey, + ASN1_OCTET_STRING **keyid, + X509_NAME **issuer, + ASN1_INTEGER **sno); + +int CMS_RecipientInfo_kari_orig_id_cmp(CMS_RecipientInfo *ri, X509 *cert); + +int CMS_RecipientEncryptedKey_get0_id(CMS_RecipientEncryptedKey *rek, + ASN1_OCTET_STRING **keyid, + ASN1_GENERALIZEDTIME **tm, + CMS_OtherKeyAttribute **other, + X509_NAME **issuer, ASN1_INTEGER **sno); +int CMS_RecipientEncryptedKey_cert_cmp(CMS_RecipientEncryptedKey *rek, + X509 *cert); +int CMS_RecipientInfo_kari_set0_pkey(CMS_RecipientInfo *ri, EVP_PKEY *pk); +EVP_CIPHER_CTX *CMS_RecipientInfo_kari_get0_ctx(CMS_RecipientInfo *ri); +int CMS_RecipientInfo_kari_decrypt(CMS_ContentInfo *cms, + CMS_RecipientInfo *ri, + CMS_RecipientEncryptedKey *rek); + +int CMS_SharedInfo_encode(unsigned char **pder, X509_ALGOR *kekalg, + ASN1_OCTET_STRING *ukm, int keylen); + +/* Backward compatibility for spelling errors. */ +# define CMS_R_UNKNOWN_DIGEST_ALGORITM CMS_R_UNKNOWN_DIGEST_ALGORITHM +# define CMS_R_UNSUPPORTED_RECPIENTINFO_TYPE \ + CMS_R_UNSUPPORTED_RECIPIENTINFO_TYPE + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cmserr.h b/third_party/openssl/uos/sw_64/include/openssl/cmserr.h new file mode 100644 index 00000000..d589f592 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cmserr.h @@ -0,0 +1,203 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CMSERR_H +# define HEADER_CMSERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_CMS + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_CMS_strings(void); + +/* + * CMS function codes. + */ +# define CMS_F_CHECK_CONTENT 99 +# define CMS_F_CMS_ADD0_CERT 164 +# define CMS_F_CMS_ADD0_RECIPIENT_KEY 100 +# define CMS_F_CMS_ADD0_RECIPIENT_PASSWORD 165 +# define CMS_F_CMS_ADD1_RECEIPTREQUEST 158 +# define CMS_F_CMS_ADD1_RECIPIENT_CERT 101 +# define CMS_F_CMS_ADD1_SIGNER 102 +# define CMS_F_CMS_ADD1_SIGNINGTIME 103 +# define CMS_F_CMS_COMPRESS 104 +# define CMS_F_CMS_COMPRESSEDDATA_CREATE 105 +# define CMS_F_CMS_COMPRESSEDDATA_INIT_BIO 106 +# define CMS_F_CMS_COPY_CONTENT 107 +# define CMS_F_CMS_COPY_MESSAGEDIGEST 108 +# define CMS_F_CMS_DATA 109 +# define CMS_F_CMS_DATAFINAL 110 +# define CMS_F_CMS_DATAINIT 111 +# define CMS_F_CMS_DECRYPT 112 +# define CMS_F_CMS_DECRYPT_SET1_KEY 113 +# define CMS_F_CMS_DECRYPT_SET1_PASSWORD 166 +# define CMS_F_CMS_DECRYPT_SET1_PKEY 114 +# define CMS_F_CMS_DIGESTALGORITHM_FIND_CTX 115 +# define CMS_F_CMS_DIGESTALGORITHM_INIT_BIO 116 +# define CMS_F_CMS_DIGESTEDDATA_DO_FINAL 117 +# define CMS_F_CMS_DIGEST_VERIFY 118 +# define CMS_F_CMS_ENCODE_RECEIPT 161 +# define CMS_F_CMS_ENCRYPT 119 +# define CMS_F_CMS_ENCRYPTEDCONTENT_INIT 179 +# define CMS_F_CMS_ENCRYPTEDCONTENT_INIT_BIO 120 +# define CMS_F_CMS_ENCRYPTEDDATA_DECRYPT 121 +# define CMS_F_CMS_ENCRYPTEDDATA_ENCRYPT 122 +# define CMS_F_CMS_ENCRYPTEDDATA_SET1_KEY 123 +# define CMS_F_CMS_ENVELOPEDDATA_CREATE 124 +# define CMS_F_CMS_ENVELOPEDDATA_INIT_BIO 125 +# define CMS_F_CMS_ENVELOPED_DATA_INIT 126 +# define CMS_F_CMS_ENV_ASN1_CTRL 171 +# define CMS_F_CMS_FINAL 127 +# define CMS_F_CMS_GET0_CERTIFICATE_CHOICES 128 +# define CMS_F_CMS_GET0_CONTENT 129 +# define CMS_F_CMS_GET0_ECONTENT_TYPE 130 +# define CMS_F_CMS_GET0_ENVELOPED 131 +# define CMS_F_CMS_GET0_REVOCATION_CHOICES 132 +# define CMS_F_CMS_GET0_SIGNED 133 +# define CMS_F_CMS_MSGSIGDIGEST_ADD1 162 +# define CMS_F_CMS_RECEIPTREQUEST_CREATE0 159 +# define CMS_F_CMS_RECEIPT_VERIFY 160 +# define CMS_F_CMS_RECIPIENTINFO_DECRYPT 134 +# define CMS_F_CMS_RECIPIENTINFO_ENCRYPT 169 +# define CMS_F_CMS_RECIPIENTINFO_KARI_ENCRYPT 178 +# define CMS_F_CMS_RECIPIENTINFO_KARI_GET0_ALG 175 +# define CMS_F_CMS_RECIPIENTINFO_KARI_GET0_ORIG_ID 173 +# define CMS_F_CMS_RECIPIENTINFO_KARI_GET0_REKS 172 +# define CMS_F_CMS_RECIPIENTINFO_KARI_ORIG_ID_CMP 174 +# define CMS_F_CMS_RECIPIENTINFO_KEKRI_DECRYPT 135 +# define CMS_F_CMS_RECIPIENTINFO_KEKRI_ENCRYPT 136 +# define CMS_F_CMS_RECIPIENTINFO_KEKRI_GET0_ID 137 +# define CMS_F_CMS_RECIPIENTINFO_KEKRI_ID_CMP 138 +# define CMS_F_CMS_RECIPIENTINFO_KTRI_CERT_CMP 139 +# define CMS_F_CMS_RECIPIENTINFO_KTRI_DECRYPT 140 +# define CMS_F_CMS_RECIPIENTINFO_KTRI_ENCRYPT 141 +# define CMS_F_CMS_RECIPIENTINFO_KTRI_GET0_ALGS 142 +# define CMS_F_CMS_RECIPIENTINFO_KTRI_GET0_SIGNER_ID 143 +# define CMS_F_CMS_RECIPIENTINFO_PWRI_CRYPT 167 +# define CMS_F_CMS_RECIPIENTINFO_SET0_KEY 144 +# define CMS_F_CMS_RECIPIENTINFO_SET0_PASSWORD 168 +# define CMS_F_CMS_RECIPIENTINFO_SET0_PKEY 145 +# define CMS_F_CMS_SD_ASN1_CTRL 170 +# define CMS_F_CMS_SET1_IAS 176 +# define CMS_F_CMS_SET1_KEYID 177 +# define CMS_F_CMS_SET1_SIGNERIDENTIFIER 146 +# define CMS_F_CMS_SET_DETACHED 147 +# define CMS_F_CMS_SIGN 148 +# define CMS_F_CMS_SIGNED_DATA_INIT 149 +# define CMS_F_CMS_SIGNERINFO_CONTENT_SIGN 150 +# define CMS_F_CMS_SIGNERINFO_SIGN 151 +# define CMS_F_CMS_SIGNERINFO_VERIFY 152 +# define CMS_F_CMS_SIGNERINFO_VERIFY_CERT 153 +# define CMS_F_CMS_SIGNERINFO_VERIFY_CONTENT 154 +# define CMS_F_CMS_SIGN_RECEIPT 163 +# define CMS_F_CMS_SI_CHECK_ATTRIBUTES 183 +# define CMS_F_CMS_STREAM 155 +# define CMS_F_CMS_UNCOMPRESS 156 +# define CMS_F_CMS_VERIFY 157 +# define CMS_F_KEK_UNWRAP_KEY 180 + +/* + * CMS reason codes. + */ +# define CMS_R_ADD_SIGNER_ERROR 99 +# define CMS_R_ATTRIBUTE_ERROR 161 +# define CMS_R_CERTIFICATE_ALREADY_PRESENT 175 +# define CMS_R_CERTIFICATE_HAS_NO_KEYID 160 +# define CMS_R_CERTIFICATE_VERIFY_ERROR 100 +# define CMS_R_CIPHER_INITIALISATION_ERROR 101 +# define CMS_R_CIPHER_PARAMETER_INITIALISATION_ERROR 102 +# define CMS_R_CMS_DATAFINAL_ERROR 103 +# define CMS_R_CMS_LIB 104 +# define CMS_R_CONTENTIDENTIFIER_MISMATCH 170 +# define CMS_R_CONTENT_NOT_FOUND 105 +# define CMS_R_CONTENT_TYPE_MISMATCH 171 +# define CMS_R_CONTENT_TYPE_NOT_COMPRESSED_DATA 106 +# define CMS_R_CONTENT_TYPE_NOT_ENVELOPED_DATA 107 +# define CMS_R_CONTENT_TYPE_NOT_SIGNED_DATA 108 +# define CMS_R_CONTENT_VERIFY_ERROR 109 +# define CMS_R_CTRL_ERROR 110 +# define CMS_R_CTRL_FAILURE 111 +# define CMS_R_DECRYPT_ERROR 112 +# define CMS_R_ERROR_GETTING_PUBLIC_KEY 113 +# define CMS_R_ERROR_READING_MESSAGEDIGEST_ATTRIBUTE 114 +# define CMS_R_ERROR_SETTING_KEY 115 +# define CMS_R_ERROR_SETTING_RECIPIENTINFO 116 +# define CMS_R_INVALID_ENCRYPTED_KEY_LENGTH 117 +# define CMS_R_INVALID_KEY_ENCRYPTION_PARAMETER 176 +# define CMS_R_INVALID_KEY_LENGTH 118 +# define CMS_R_MD_BIO_INIT_ERROR 119 +# define CMS_R_MESSAGEDIGEST_ATTRIBUTE_WRONG_LENGTH 120 +# define CMS_R_MESSAGEDIGEST_WRONG_LENGTH 121 +# define CMS_R_MSGSIGDIGEST_ERROR 172 +# define CMS_R_MSGSIGDIGEST_VERIFICATION_FAILURE 162 +# define CMS_R_MSGSIGDIGEST_WRONG_LENGTH 163 +# define CMS_R_NEED_ONE_SIGNER 164 +# define CMS_R_NOT_A_SIGNED_RECEIPT 165 +# define CMS_R_NOT_ENCRYPTED_DATA 122 +# define CMS_R_NOT_KEK 123 +# define CMS_R_NOT_KEY_AGREEMENT 181 +# define CMS_R_NOT_KEY_TRANSPORT 124 +# define CMS_R_NOT_PWRI 177 +# define CMS_R_NOT_SUPPORTED_FOR_THIS_KEY_TYPE 125 +# define CMS_R_NO_CIPHER 126 +# define CMS_R_NO_CONTENT 127 +# define CMS_R_NO_CONTENT_TYPE 173 +# define CMS_R_NO_DEFAULT_DIGEST 128 +# define CMS_R_NO_DIGEST_SET 129 +# define CMS_R_NO_KEY 130 +# define CMS_R_NO_KEY_OR_CERT 174 +# define CMS_R_NO_MATCHING_DIGEST 131 +# define CMS_R_NO_MATCHING_RECIPIENT 132 +# define CMS_R_NO_MATCHING_SIGNATURE 166 +# define CMS_R_NO_MSGSIGDIGEST 167 +# define CMS_R_NO_PASSWORD 178 +# define CMS_R_NO_PRIVATE_KEY 133 +# define CMS_R_NO_PUBLIC_KEY 134 +# define CMS_R_NO_RECEIPT_REQUEST 168 +# define CMS_R_NO_SIGNERS 135 +# define CMS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE 136 +# define CMS_R_RECEIPT_DECODE_ERROR 169 +# define CMS_R_RECIPIENT_ERROR 137 +# define CMS_R_SIGNER_CERTIFICATE_NOT_FOUND 138 +# define CMS_R_SIGNFINAL_ERROR 139 +# define CMS_R_SMIME_TEXT_ERROR 140 +# define CMS_R_STORE_INIT_ERROR 141 +# define CMS_R_TYPE_NOT_COMPRESSED_DATA 142 +# define CMS_R_TYPE_NOT_DATA 143 +# define CMS_R_TYPE_NOT_DIGESTED_DATA 144 +# define CMS_R_TYPE_NOT_ENCRYPTED_DATA 145 +# define CMS_R_TYPE_NOT_ENVELOPED_DATA 146 +# define CMS_R_UNABLE_TO_FINALIZE_CONTEXT 147 +# define CMS_R_UNKNOWN_CIPHER 148 +# define CMS_R_UNKNOWN_DIGEST_ALGORITHM 149 +# define CMS_R_UNKNOWN_ID 150 +# define CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM 151 +# define CMS_R_UNSUPPORTED_CONTENT_ENCRYPTION_ALGORITHM 194 +# define CMS_R_UNSUPPORTED_CONTENT_TYPE 152 +# define CMS_R_UNSUPPORTED_KEK_ALGORITHM 153 +# define CMS_R_UNSUPPORTED_KEY_ENCRYPTION_ALGORITHM 179 +# define CMS_R_UNSUPPORTED_RECIPIENTINFO_TYPE 155 +# define CMS_R_UNSUPPORTED_RECIPIENT_TYPE 154 +# define CMS_R_UNSUPPORTED_TYPE 156 +# define CMS_R_UNWRAP_ERROR 157 +# define CMS_R_UNWRAP_FAILURE 180 +# define CMS_R_VERIFICATION_FAILURE 158 +# define CMS_R_WRAP_ERROR 159 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/comp.h b/third_party/openssl/uos/sw_64/include/openssl/comp.h new file mode 100644 index 00000000..d814d3cf --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/comp.h @@ -0,0 +1,53 @@ +/* + * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_COMP_H +# define HEADER_COMP_H + +# include + +# ifndef OPENSSL_NO_COMP +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + + + +COMP_CTX *COMP_CTX_new(COMP_METHOD *meth); +const COMP_METHOD *COMP_CTX_get_method(const COMP_CTX *ctx); +int COMP_CTX_get_type(const COMP_CTX* comp); +int COMP_get_type(const COMP_METHOD *meth); +const char *COMP_get_name(const COMP_METHOD *meth); +void COMP_CTX_free(COMP_CTX *ctx); + +int COMP_compress_block(COMP_CTX *ctx, unsigned char *out, int olen, + unsigned char *in, int ilen); +int COMP_expand_block(COMP_CTX *ctx, unsigned char *out, int olen, + unsigned char *in, int ilen); + +COMP_METHOD *COMP_zlib(void); + +#if OPENSSL_API_COMPAT < 0x10100000L +#define COMP_zlib_cleanup() while(0) continue +#endif + +# ifdef HEADER_BIO_H +# ifdef ZLIB +const BIO_METHOD *BIO_f_zlib(void); +# endif +# endif + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/comperr.h b/third_party/openssl/uos/sw_64/include/openssl/comperr.h new file mode 100644 index 00000000..90231e9a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/comperr.h @@ -0,0 +1,44 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_COMPERR_H +# define HEADER_COMPERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_COMP + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_COMP_strings(void); + +/* + * COMP function codes. + */ +# define COMP_F_BIO_ZLIB_FLUSH 99 +# define COMP_F_BIO_ZLIB_NEW 100 +# define COMP_F_BIO_ZLIB_READ 101 +# define COMP_F_BIO_ZLIB_WRITE 102 +# define COMP_F_COMP_CTX_NEW 103 + +/* + * COMP reason codes. + */ +# define COMP_R_ZLIB_DEFLATE_ERROR 99 +# define COMP_R_ZLIB_INFLATE_ERROR 100 +# define COMP_R_ZLIB_NOT_SUPPORTED 101 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/conf.h b/third_party/openssl/uos/sw_64/include/openssl/conf.h new file mode 100644 index 00000000..7336cd2f --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/conf.h @@ -0,0 +1,168 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CONF_H +# define HEADER_CONF_H + +# include +# include +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + char *section; + char *name; + char *value; +} CONF_VALUE; + +DEFINE_STACK_OF(CONF_VALUE) +DEFINE_LHASH_OF(CONF_VALUE); + +struct conf_st; +struct conf_method_st; +typedef struct conf_method_st CONF_METHOD; + +struct conf_method_st { + const char *name; + CONF *(*create) (CONF_METHOD *meth); + int (*init) (CONF *conf); + int (*destroy) (CONF *conf); + int (*destroy_data) (CONF *conf); + int (*load_bio) (CONF *conf, BIO *bp, long *eline); + int (*dump) (const CONF *conf, BIO *bp); + int (*is_number) (const CONF *conf, char c); + int (*to_int) (const CONF *conf, char c); + int (*load) (CONF *conf, const char *name, long *eline); +}; + +/* Module definitions */ + +typedef struct conf_imodule_st CONF_IMODULE; +typedef struct conf_module_st CONF_MODULE; + +DEFINE_STACK_OF(CONF_MODULE) +DEFINE_STACK_OF(CONF_IMODULE) + +/* DSO module function typedefs */ +typedef int conf_init_func (CONF_IMODULE *md, const CONF *cnf); +typedef void conf_finish_func (CONF_IMODULE *md); + +# define CONF_MFLAGS_IGNORE_ERRORS 0x1 +# define CONF_MFLAGS_IGNORE_RETURN_CODES 0x2 +# define CONF_MFLAGS_SILENT 0x4 +# define CONF_MFLAGS_NO_DSO 0x8 +# define CONF_MFLAGS_IGNORE_MISSING_FILE 0x10 +# define CONF_MFLAGS_DEFAULT_SECTION 0x20 + +int CONF_set_default_method(CONF_METHOD *meth); +void CONF_set_nconf(CONF *conf, LHASH_OF(CONF_VALUE) *hash); +LHASH_OF(CONF_VALUE) *CONF_load(LHASH_OF(CONF_VALUE) *conf, const char *file, + long *eline); +# ifndef OPENSSL_NO_STDIO +LHASH_OF(CONF_VALUE) *CONF_load_fp(LHASH_OF(CONF_VALUE) *conf, FILE *fp, + long *eline); +# endif +LHASH_OF(CONF_VALUE) *CONF_load_bio(LHASH_OF(CONF_VALUE) *conf, BIO *bp, + long *eline); +STACK_OF(CONF_VALUE) *CONF_get_section(LHASH_OF(CONF_VALUE) *conf, + const char *section); +char *CONF_get_string(LHASH_OF(CONF_VALUE) *conf, const char *group, + const char *name); +long CONF_get_number(LHASH_OF(CONF_VALUE) *conf, const char *group, + const char *name); +void CONF_free(LHASH_OF(CONF_VALUE) *conf); +#ifndef OPENSSL_NO_STDIO +int CONF_dump_fp(LHASH_OF(CONF_VALUE) *conf, FILE *out); +#endif +int CONF_dump_bio(LHASH_OF(CONF_VALUE) *conf, BIO *out); + +DEPRECATEDIN_1_1_0(void OPENSSL_config(const char *config_name)) + +#if OPENSSL_API_COMPAT < 0x10100000L +# define OPENSSL_no_config() \ + OPENSSL_init_crypto(OPENSSL_INIT_NO_LOAD_CONFIG, NULL) +#endif + +/* + * New conf code. The semantics are different from the functions above. If + * that wasn't the case, the above functions would have been replaced + */ + +struct conf_st { + CONF_METHOD *meth; + void *meth_data; + LHASH_OF(CONF_VALUE) *data; +}; + +CONF *NCONF_new(CONF_METHOD *meth); +CONF_METHOD *NCONF_default(void); +CONF_METHOD *NCONF_WIN32(void); +void NCONF_free(CONF *conf); +void NCONF_free_data(CONF *conf); + +int NCONF_load(CONF *conf, const char *file, long *eline); +# ifndef OPENSSL_NO_STDIO +int NCONF_load_fp(CONF *conf, FILE *fp, long *eline); +# endif +int NCONF_load_bio(CONF *conf, BIO *bp, long *eline); +STACK_OF(CONF_VALUE) *NCONF_get_section(const CONF *conf, + const char *section); +char *NCONF_get_string(const CONF *conf, const char *group, const char *name); +int NCONF_get_number_e(const CONF *conf, const char *group, const char *name, + long *result); +#ifndef OPENSSL_NO_STDIO +int NCONF_dump_fp(const CONF *conf, FILE *out); +#endif +int NCONF_dump_bio(const CONF *conf, BIO *out); + +#define NCONF_get_number(c,g,n,r) NCONF_get_number_e(c,g,n,r) + +/* Module functions */ + +int CONF_modules_load(const CONF *cnf, const char *appname, + unsigned long flags); +int CONF_modules_load_file(const char *filename, const char *appname, + unsigned long flags); +void CONF_modules_unload(int all); +void CONF_modules_finish(void); +#if OPENSSL_API_COMPAT < 0x10100000L +# define CONF_modules_free() while(0) continue +#endif +int CONF_module_add(const char *name, conf_init_func *ifunc, + conf_finish_func *ffunc); + +const char *CONF_imodule_get_name(const CONF_IMODULE *md); +const char *CONF_imodule_get_value(const CONF_IMODULE *md); +void *CONF_imodule_get_usr_data(const CONF_IMODULE *md); +void CONF_imodule_set_usr_data(CONF_IMODULE *md, void *usr_data); +CONF_MODULE *CONF_imodule_get_module(const CONF_IMODULE *md); +unsigned long CONF_imodule_get_flags(const CONF_IMODULE *md); +void CONF_imodule_set_flags(CONF_IMODULE *md, unsigned long flags); +void *CONF_module_get_usr_data(CONF_MODULE *pmod); +void CONF_module_set_usr_data(CONF_MODULE *pmod, void *usr_data); + +char *CONF_get1_default_config_file(void); + +int CONF_parse_list(const char *list, int sep, int nospc, + int (*list_cb) (const char *elem, int len, void *usr), + void *arg); + +void OPENSSL_load_builtin_modules(void); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/conf_api.h b/third_party/openssl/uos/sw_64/include/openssl/conf_api.h new file mode 100644 index 00000000..a0275ad7 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/conf_api.h @@ -0,0 +1,40 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CONF_API_H +# define HEADER_CONF_API_H + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Up until OpenSSL 0.9.5a, this was new_section */ +CONF_VALUE *_CONF_new_section(CONF *conf, const char *section); +/* Up until OpenSSL 0.9.5a, this was get_section */ +CONF_VALUE *_CONF_get_section(const CONF *conf, const char *section); +/* Up until OpenSSL 0.9.5a, this was CONF_get_section */ +STACK_OF(CONF_VALUE) *_CONF_get_section_values(const CONF *conf, + const char *section); + +int _CONF_add_string(CONF *conf, CONF_VALUE *section, CONF_VALUE *value); +char *_CONF_get_string(const CONF *conf, const char *section, + const char *name); +long _CONF_get_number(const CONF *conf, const char *section, + const char *name); + +int _CONF_new_data(CONF *conf); +void _CONF_free_data(CONF *conf); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/conferr.h b/third_party/openssl/uos/sw_64/include/openssl/conferr.h new file mode 100644 index 00000000..32b92291 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/conferr.h @@ -0,0 +1,76 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CONFERR_H +# define HEADER_CONFERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_CONF_strings(void); + +/* + * CONF function codes. + */ +# define CONF_F_CONF_DUMP_FP 104 +# define CONF_F_CONF_LOAD 100 +# define CONF_F_CONF_LOAD_FP 103 +# define CONF_F_CONF_PARSE_LIST 119 +# define CONF_F_DEF_LOAD 120 +# define CONF_F_DEF_LOAD_BIO 121 +# define CONF_F_GET_NEXT_FILE 107 +# define CONF_F_MODULE_ADD 122 +# define CONF_F_MODULE_INIT 115 +# define CONF_F_MODULE_LOAD_DSO 117 +# define CONF_F_MODULE_RUN 118 +# define CONF_F_NCONF_DUMP_BIO 105 +# define CONF_F_NCONF_DUMP_FP 106 +# define CONF_F_NCONF_GET_NUMBER_E 112 +# define CONF_F_NCONF_GET_SECTION 108 +# define CONF_F_NCONF_GET_STRING 109 +# define CONF_F_NCONF_LOAD 113 +# define CONF_F_NCONF_LOAD_BIO 110 +# define CONF_F_NCONF_LOAD_FP 114 +# define CONF_F_NCONF_NEW 111 +# define CONF_F_PROCESS_INCLUDE 116 +# define CONF_F_SSL_MODULE_INIT 123 +# define CONF_F_STR_COPY 101 + +/* + * CONF reason codes. + */ +# define CONF_R_ERROR_LOADING_DSO 110 +# define CONF_R_LIST_CANNOT_BE_NULL 115 +# define CONF_R_MISSING_CLOSE_SQUARE_BRACKET 100 +# define CONF_R_MISSING_EQUAL_SIGN 101 +# define CONF_R_MISSING_INIT_FUNCTION 112 +# define CONF_R_MODULE_INITIALIZATION_ERROR 109 +# define CONF_R_NO_CLOSE_BRACE 102 +# define CONF_R_NO_CONF 105 +# define CONF_R_NO_CONF_OR_ENVIRONMENT_VARIABLE 106 +# define CONF_R_NO_SECTION 107 +# define CONF_R_NO_SUCH_FILE 114 +# define CONF_R_NO_VALUE 108 +# define CONF_R_NUMBER_TOO_LARGE 121 +# define CONF_R_RECURSIVE_DIRECTORY_INCLUDE 111 +# define CONF_R_SSL_COMMAND_SECTION_EMPTY 117 +# define CONF_R_SSL_COMMAND_SECTION_NOT_FOUND 118 +# define CONF_R_SSL_SECTION_EMPTY 119 +# define CONF_R_SSL_SECTION_NOT_FOUND 120 +# define CONF_R_UNABLE_TO_CREATE_NEW_SECTION 103 +# define CONF_R_UNKNOWN_MODULE_NAME 113 +# define CONF_R_VARIABLE_EXPANSION_TOO_LONG 116 +# define CONF_R_VARIABLE_HAS_NO_VALUE 104 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/crypto.h b/third_party/openssl/uos/sw_64/include/openssl/crypto.h new file mode 100644 index 00000000..7d0b5262 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/crypto.h @@ -0,0 +1,445 @@ +/* + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CRYPTO_H +# define HEADER_CRYPTO_H + +# include +# include + +# include + +# ifndef OPENSSL_NO_STDIO +# include +# endif + +# include +# include +# include +# include +# include + +# ifdef CHARSET_EBCDIC +# include +# endif + +/* + * Resolve problems on some operating systems with symbol names that clash + * one way or another + */ +# include + +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif + +#ifdef __cplusplus +extern "C" { +#endif + +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSLeay OpenSSL_version_num +# define SSLeay_version OpenSSL_version +# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER +# define SSLEAY_VERSION OPENSSL_VERSION +# define SSLEAY_CFLAGS OPENSSL_CFLAGS +# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON +# define SSLEAY_PLATFORM OPENSSL_PLATFORM +# define SSLEAY_DIR OPENSSL_DIR + +/* + * Old type for allocating dynamic locks. No longer used. Use the new thread + * API instead. + */ +typedef struct { + int dummy; +} CRYPTO_dynlock; + +# endif /* OPENSSL_API_COMPAT */ + +typedef void CRYPTO_RWLOCK; + +CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void); +int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock); +int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock); +int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock); +void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock); + +int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock); + +/* + * The following can be used to detect memory leaks in the library. If + * used, it turns on malloc checking + */ +# define CRYPTO_MEM_CHECK_OFF 0x0 /* Control only */ +# define CRYPTO_MEM_CHECK_ON 0x1 /* Control and mode bit */ +# define CRYPTO_MEM_CHECK_ENABLE 0x2 /* Control and mode bit */ +# define CRYPTO_MEM_CHECK_DISABLE 0x3 /* Control only */ + +struct crypto_ex_data_st { + STACK_OF(void) *sk; +}; +DEFINE_STACK_OF(void) + +/* + * Per class, we have a STACK of function pointers. + */ +# define CRYPTO_EX_INDEX_SSL 0 +# define CRYPTO_EX_INDEX_SSL_CTX 1 +# define CRYPTO_EX_INDEX_SSL_SESSION 2 +# define CRYPTO_EX_INDEX_X509 3 +# define CRYPTO_EX_INDEX_X509_STORE 4 +# define CRYPTO_EX_INDEX_X509_STORE_CTX 5 +# define CRYPTO_EX_INDEX_DH 6 +# define CRYPTO_EX_INDEX_DSA 7 +# define CRYPTO_EX_INDEX_EC_KEY 8 +# define CRYPTO_EX_INDEX_RSA 9 +# define CRYPTO_EX_INDEX_ENGINE 10 +# define CRYPTO_EX_INDEX_UI 11 +# define CRYPTO_EX_INDEX_BIO 12 +# define CRYPTO_EX_INDEX_APP 13 +# define CRYPTO_EX_INDEX_UI_METHOD 14 +# define CRYPTO_EX_INDEX_DRBG 15 +# define CRYPTO_EX_INDEX__COUNT 16 + +/* No longer needed, so this is a no-op */ +#define OPENSSL_malloc_init() while(0) continue + +int CRYPTO_mem_ctrl(int mode); + +# define OPENSSL_malloc(num) \ + CRYPTO_malloc(num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_zalloc(num) \ + CRYPTO_zalloc(num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_realloc(addr, num) \ + CRYPTO_realloc(addr, num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_clear_realloc(addr, old_num, num) \ + CRYPTO_clear_realloc(addr, old_num, num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_clear_free(addr, num) \ + CRYPTO_clear_free(addr, num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_free(addr) \ + CRYPTO_free(addr, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_memdup(str, s) \ + CRYPTO_memdup((str), s, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_strdup(str) \ + CRYPTO_strdup(str, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_strndup(str, n) \ + CRYPTO_strndup(str, n, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_secure_malloc(num) \ + CRYPTO_secure_malloc(num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_secure_zalloc(num) \ + CRYPTO_secure_zalloc(num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_secure_free(addr) \ + CRYPTO_secure_free(addr, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_secure_clear_free(addr, num) \ + CRYPTO_secure_clear_free(addr, num, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_secure_actual_size(ptr) \ + CRYPTO_secure_actual_size(ptr) + +size_t OPENSSL_strlcpy(char *dst, const char *src, size_t siz); +size_t OPENSSL_strlcat(char *dst, const char *src, size_t siz); +size_t OPENSSL_strnlen(const char *str, size_t maxlen); +char *OPENSSL_buf2hexstr(const unsigned char *buffer, long len); +unsigned char *OPENSSL_hexstr2buf(const char *str, long *len); +int OPENSSL_hexchar2int(unsigned char c); + +# define OPENSSL_MALLOC_MAX_NELEMS(type) (((1U<<(sizeof(int)*8-1))-1)/sizeof(type)) + +unsigned long OpenSSL_version_num(void); +const char *OpenSSL_version(int type); +# define OPENSSL_VERSION 0 +# define OPENSSL_CFLAGS 1 +# define OPENSSL_BUILT_ON 2 +# define OPENSSL_PLATFORM 3 +# define OPENSSL_DIR 4 +# define OPENSSL_ENGINES_DIR 5 + +int OPENSSL_issetugid(void); + +typedef void CRYPTO_EX_new (void *parent, void *ptr, CRYPTO_EX_DATA *ad, + int idx, long argl, void *argp); +typedef void CRYPTO_EX_free (void *parent, void *ptr, CRYPTO_EX_DATA *ad, + int idx, long argl, void *argp); +typedef int CRYPTO_EX_dup (CRYPTO_EX_DATA *to, const CRYPTO_EX_DATA *from, + void *from_d, int idx, long argl, void *argp); +__owur int CRYPTO_get_ex_new_index(int class_index, long argl, void *argp, + CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func, + CRYPTO_EX_free *free_func); +/* No longer use an index. */ +int CRYPTO_free_ex_index(int class_index, int idx); + +/* + * Initialise/duplicate/free CRYPTO_EX_DATA variables corresponding to a + * given class (invokes whatever per-class callbacks are applicable) + */ +int CRYPTO_new_ex_data(int class_index, void *obj, CRYPTO_EX_DATA *ad); +int CRYPTO_dup_ex_data(int class_index, CRYPTO_EX_DATA *to, + const CRYPTO_EX_DATA *from); + +void CRYPTO_free_ex_data(int class_index, void *obj, CRYPTO_EX_DATA *ad); + +/* + * Get/set data in a CRYPTO_EX_DATA variable corresponding to a particular + * index (relative to the class type involved) + */ +int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int idx, void *val); +void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx); + +# if OPENSSL_API_COMPAT < 0x10100000L +/* + * This function cleans up all "ex_data" state. It mustn't be called under + * potential race-conditions. + */ +# define CRYPTO_cleanup_all_ex_data() while(0) continue + +/* + * The old locking functions have been removed completely without compatibility + * macros. This is because the old functions either could not properly report + * errors, or the returned error values were not clearly documented. + * Replacing the locking functions with no-ops would cause race condition + * issues in the affected applications. It is far better for them to fail at + * compile time. + * On the other hand, the locking callbacks are no longer used. Consequently, + * the callback management functions can be safely replaced with no-op macros. + */ +# define CRYPTO_num_locks() (1) +# define CRYPTO_set_locking_callback(func) +# define CRYPTO_get_locking_callback() (NULL) +# define CRYPTO_set_add_lock_callback(func) +# define CRYPTO_get_add_lock_callback() (NULL) + +/* + * These defines where used in combination with the old locking callbacks, + * they are not called anymore, but old code that's not called might still + * use them. + */ +# define CRYPTO_LOCK 1 +# define CRYPTO_UNLOCK 2 +# define CRYPTO_READ 4 +# define CRYPTO_WRITE 8 + +/* This structure is no longer used */ +typedef struct crypto_threadid_st { + int dummy; +} CRYPTO_THREADID; +/* Only use CRYPTO_THREADID_set_[numeric|pointer]() within callbacks */ +# define CRYPTO_THREADID_set_numeric(id, val) +# define CRYPTO_THREADID_set_pointer(id, ptr) +# define CRYPTO_THREADID_set_callback(threadid_func) (0) +# define CRYPTO_THREADID_get_callback() (NULL) +# define CRYPTO_THREADID_current(id) +# define CRYPTO_THREADID_cmp(a, b) (-1) +# define CRYPTO_THREADID_cpy(dest, src) +# define CRYPTO_THREADID_hash(id) (0UL) + +# if OPENSSL_API_COMPAT < 0x10000000L +# define CRYPTO_set_id_callback(func) +# define CRYPTO_get_id_callback() (NULL) +# define CRYPTO_thread_id() (0UL) +# endif /* OPENSSL_API_COMPAT < 0x10000000L */ + +# define CRYPTO_set_dynlock_create_callback(dyn_create_function) +# define CRYPTO_set_dynlock_lock_callback(dyn_lock_function) +# define CRYPTO_set_dynlock_destroy_callback(dyn_destroy_function) +# define CRYPTO_get_dynlock_create_callback() (NULL) +# define CRYPTO_get_dynlock_lock_callback() (NULL) +# define CRYPTO_get_dynlock_destroy_callback() (NULL) +# endif /* OPENSSL_API_COMPAT < 0x10100000L */ + +int CRYPTO_set_mem_functions( + void *(*m) (size_t, const char *, int), + void *(*r) (void *, size_t, const char *, int), + void (*f) (void *, const char *, int)); +int CRYPTO_set_mem_debug(int flag); +void CRYPTO_get_mem_functions( + void *(**m) (size_t, const char *, int), + void *(**r) (void *, size_t, const char *, int), + void (**f) (void *, const char *, int)); + +void *CRYPTO_malloc(size_t num, const char *file, int line); +void *CRYPTO_zalloc(size_t num, const char *file, int line); +void *CRYPTO_memdup(const void *str, size_t siz, const char *file, int line); +char *CRYPTO_strdup(const char *str, const char *file, int line); +char *CRYPTO_strndup(const char *str, size_t s, const char *file, int line); +void CRYPTO_free(void *ptr, const char *file, int line); +void CRYPTO_clear_free(void *ptr, size_t num, const char *file, int line); +void *CRYPTO_realloc(void *addr, size_t num, const char *file, int line); +void *CRYPTO_clear_realloc(void *addr, size_t old_num, size_t num, + const char *file, int line); + +int CRYPTO_secure_malloc_init(size_t sz, int minsize); +int CRYPTO_secure_malloc_done(void); +void *CRYPTO_secure_malloc(size_t num, const char *file, int line); +void *CRYPTO_secure_zalloc(size_t num, const char *file, int line); +void CRYPTO_secure_free(void *ptr, const char *file, int line); +void CRYPTO_secure_clear_free(void *ptr, size_t num, + const char *file, int line); +int CRYPTO_secure_allocated(const void *ptr); +int CRYPTO_secure_malloc_initialized(void); +size_t CRYPTO_secure_actual_size(void *ptr); +size_t CRYPTO_secure_used(void); + +void OPENSSL_cleanse(void *ptr, size_t len); + +# ifndef OPENSSL_NO_CRYPTO_MDEBUG +# define OPENSSL_mem_debug_push(info) \ + CRYPTO_mem_debug_push(info, OPENSSL_FILE, OPENSSL_LINE) +# define OPENSSL_mem_debug_pop() \ + CRYPTO_mem_debug_pop() +int CRYPTO_mem_debug_push(const char *info, const char *file, int line); +int CRYPTO_mem_debug_pop(void); +void CRYPTO_get_alloc_counts(int *mcount, int *rcount, int *fcount); + +/*- + * Debugging functions (enabled by CRYPTO_set_mem_debug(1)) + * The flag argument has the following significance: + * 0: called before the actual memory allocation has taken place + * 1: called after the actual memory allocation has taken place + */ +void CRYPTO_mem_debug_malloc(void *addr, size_t num, int flag, + const char *file, int line); +void CRYPTO_mem_debug_realloc(void *addr1, void *addr2, size_t num, int flag, + const char *file, int line); +void CRYPTO_mem_debug_free(void *addr, int flag, + const char *file, int line); + +int CRYPTO_mem_leaks_cb(int (*cb) (const char *str, size_t len, void *u), + void *u); +# ifndef OPENSSL_NO_STDIO +int CRYPTO_mem_leaks_fp(FILE *); +# endif +int CRYPTO_mem_leaks(BIO *bio); +# endif + +/* die if we have to */ +ossl_noreturn void OPENSSL_die(const char *assertion, const char *file, int line); +# if OPENSSL_API_COMPAT < 0x10100000L +# define OpenSSLDie(f,l,a) OPENSSL_die((a),(f),(l)) +# endif +# define OPENSSL_assert(e) \ + (void)((e) ? 0 : (OPENSSL_die("assertion failed: " #e, OPENSSL_FILE, OPENSSL_LINE), 1)) + +int OPENSSL_isservice(void); + +int FIPS_mode(void); +int FIPS_mode_set(int r); + +void OPENSSL_init(void); +# ifdef OPENSSL_SYS_UNIX +void OPENSSL_fork_prepare(void); +void OPENSSL_fork_parent(void); +void OPENSSL_fork_child(void); +# endif + +struct tm *OPENSSL_gmtime(const time_t *timer, struct tm *result); +int OPENSSL_gmtime_adj(struct tm *tm, int offset_day, long offset_sec); +int OPENSSL_gmtime_diff(int *pday, int *psec, + const struct tm *from, const struct tm *to); + +/* + * CRYPTO_memcmp returns zero iff the |len| bytes at |a| and |b| are equal. + * It takes an amount of time dependent on |len|, but independent of the + * contents of |a| and |b|. Unlike memcmp, it cannot be used to put elements + * into a defined order as the return value when a != b is undefined, other + * than to be non-zero. + */ +int CRYPTO_memcmp(const void * in_a, const void * in_b, size_t len); + +/* Standard initialisation options */ +# define OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS 0x00000001L +# define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0x00000002L +# define OPENSSL_INIT_ADD_ALL_CIPHERS 0x00000004L +# define OPENSSL_INIT_ADD_ALL_DIGESTS 0x00000008L +# define OPENSSL_INIT_NO_ADD_ALL_CIPHERS 0x00000010L +# define OPENSSL_INIT_NO_ADD_ALL_DIGESTS 0x00000020L +# define OPENSSL_INIT_LOAD_CONFIG 0x00000040L +# define OPENSSL_INIT_NO_LOAD_CONFIG 0x00000080L +# define OPENSSL_INIT_ASYNC 0x00000100L +# define OPENSSL_INIT_ENGINE_RDRAND 0x00000200L +# define OPENSSL_INIT_ENGINE_DYNAMIC 0x00000400L +# define OPENSSL_INIT_ENGINE_OPENSSL 0x00000800L +# define OPENSSL_INIT_ENGINE_CRYPTODEV 0x00001000L +# define OPENSSL_INIT_ENGINE_CAPI 0x00002000L +# define OPENSSL_INIT_ENGINE_PADLOCK 0x00004000L +# define OPENSSL_INIT_ENGINE_AFALG 0x00008000L +/* OPENSSL_INIT_ZLIB 0x00010000L */ +# define OPENSSL_INIT_ATFORK 0x00020000L +/* OPENSSL_INIT_BASE_ONLY 0x00040000L */ +# define OPENSSL_INIT_NO_ATEXIT 0x00080000L +/* OPENSSL_INIT flag range 0xfff00000 reserved for OPENSSL_init_ssl() */ +/* Max OPENSSL_INIT flag value is 0x80000000 */ + +/* openssl and dasync not counted as builtin */ +# define OPENSSL_INIT_ENGINE_ALL_BUILTIN \ + (OPENSSL_INIT_ENGINE_RDRAND | OPENSSL_INIT_ENGINE_DYNAMIC \ + | OPENSSL_INIT_ENGINE_CRYPTODEV | OPENSSL_INIT_ENGINE_CAPI | \ + OPENSSL_INIT_ENGINE_PADLOCK) + + +/* Library initialisation functions */ +void OPENSSL_cleanup(void); +int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); +int OPENSSL_atexit(void (*handler)(void)); +void OPENSSL_thread_stop(void); + +/* Low-level control of initialization */ +OPENSSL_INIT_SETTINGS *OPENSSL_INIT_new(void); +# ifndef OPENSSL_NO_STDIO +int OPENSSL_INIT_set_config_filename(OPENSSL_INIT_SETTINGS *settings, + const char *config_filename); +void OPENSSL_INIT_set_config_file_flags(OPENSSL_INIT_SETTINGS *settings, + unsigned long flags); +int OPENSSL_INIT_set_config_appname(OPENSSL_INIT_SETTINGS *settings, + const char *config_appname); +# endif +void OPENSSL_INIT_free(OPENSSL_INIT_SETTINGS *settings); + +# if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) +# if defined(_WIN32) +# if defined(BASETYPES) || defined(_WINDEF_H) +/* application has to include in order to use this */ +typedef DWORD CRYPTO_THREAD_LOCAL; +typedef DWORD CRYPTO_THREAD_ID; + +typedef LONG CRYPTO_ONCE; +# define CRYPTO_ONCE_STATIC_INIT 0 +# endif +# else +# include +typedef pthread_once_t CRYPTO_ONCE; +typedef pthread_key_t CRYPTO_THREAD_LOCAL; +typedef pthread_t CRYPTO_THREAD_ID; + +# define CRYPTO_ONCE_STATIC_INIT PTHREAD_ONCE_INIT +# endif +# endif + +# if !defined(CRYPTO_ONCE_STATIC_INIT) +typedef unsigned int CRYPTO_ONCE; +typedef unsigned int CRYPTO_THREAD_LOCAL; +typedef unsigned int CRYPTO_THREAD_ID; +# define CRYPTO_ONCE_STATIC_INIT 0 +# endif + +int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void)); + +int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *)); +void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key); +int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val); +int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key); + +CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void); +int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cryptoerr.h b/third_party/openssl/uos/sw_64/include/openssl/cryptoerr.h new file mode 100644 index 00000000..3db5a4ee --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cryptoerr.h @@ -0,0 +1,57 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CRYPTOERR_H +# define HEADER_CRYPTOERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_CRYPTO_strings(void); + +/* + * CRYPTO function codes. + */ +# define CRYPTO_F_CMAC_CTX_NEW 120 +# define CRYPTO_F_CRYPTO_DUP_EX_DATA 110 +# define CRYPTO_F_CRYPTO_FREE_EX_DATA 111 +# define CRYPTO_F_CRYPTO_GET_EX_NEW_INDEX 100 +# define CRYPTO_F_CRYPTO_MEMDUP 115 +# define CRYPTO_F_CRYPTO_NEW_EX_DATA 112 +# define CRYPTO_F_CRYPTO_OCB128_COPY_CTX 121 +# define CRYPTO_F_CRYPTO_OCB128_INIT 122 +# define CRYPTO_F_CRYPTO_SET_EX_DATA 102 +# define CRYPTO_F_FIPS_MODE_SET 109 +# define CRYPTO_F_GET_AND_LOCK 113 +# define CRYPTO_F_OPENSSL_ATEXIT 114 +# define CRYPTO_F_OPENSSL_BUF2HEXSTR 117 +# define CRYPTO_F_OPENSSL_FOPEN 119 +# define CRYPTO_F_OPENSSL_HEXSTR2BUF 118 +# define CRYPTO_F_OPENSSL_INIT_CRYPTO 116 +# define CRYPTO_F_OPENSSL_LH_NEW 126 +# define CRYPTO_F_OPENSSL_SK_DEEP_COPY 127 +# define CRYPTO_F_OPENSSL_SK_DUP 128 +# define CRYPTO_F_PKEY_HMAC_INIT 123 +# define CRYPTO_F_PKEY_POLY1305_INIT 124 +# define CRYPTO_F_PKEY_SIPHASH_INIT 125 +# define CRYPTO_F_SK_RESERVE 129 + +/* + * CRYPTO reason codes. + */ +# define CRYPTO_R_FIPS_MODE_NOT_SUPPORTED 101 +# define CRYPTO_R_ILLEGAL_HEX_DIGIT 102 +# define CRYPTO_R_ODD_NUMBER_OF_DIGITS 103 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ct.h b/third_party/openssl/uos/sw_64/include/openssl/ct.h new file mode 100644 index 00000000..ebdba34d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ct.h @@ -0,0 +1,474 @@ +/* + * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CT_H +# define HEADER_CT_H + +# include + +# ifndef OPENSSL_NO_CT +# include +# include +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + + +/* Minimum RSA key size, from RFC6962 */ +# define SCT_MIN_RSA_BITS 2048 + +/* All hashes are SHA256 in v1 of Certificate Transparency */ +# define CT_V1_HASHLEN SHA256_DIGEST_LENGTH + +typedef enum { + CT_LOG_ENTRY_TYPE_NOT_SET = -1, + CT_LOG_ENTRY_TYPE_X509 = 0, + CT_LOG_ENTRY_TYPE_PRECERT = 1 +} ct_log_entry_type_t; + +typedef enum { + SCT_VERSION_NOT_SET = -1, + SCT_VERSION_V1 = 0 +} sct_version_t; + +typedef enum { + SCT_SOURCE_UNKNOWN, + SCT_SOURCE_TLS_EXTENSION, + SCT_SOURCE_X509V3_EXTENSION, + SCT_SOURCE_OCSP_STAPLED_RESPONSE +} sct_source_t; + +typedef enum { + SCT_VALIDATION_STATUS_NOT_SET, + SCT_VALIDATION_STATUS_UNKNOWN_LOG, + SCT_VALIDATION_STATUS_VALID, + SCT_VALIDATION_STATUS_INVALID, + SCT_VALIDATION_STATUS_UNVERIFIED, + SCT_VALIDATION_STATUS_UNKNOWN_VERSION +} sct_validation_status_t; + +DEFINE_STACK_OF(SCT) +DEFINE_STACK_OF(CTLOG) + +/****************************************** + * CT policy evaluation context functions * + ******************************************/ + +/* + * Creates a new, empty policy evaluation context. + * The caller is responsible for calling CT_POLICY_EVAL_CTX_free when finished + * with the CT_POLICY_EVAL_CTX. + */ +CT_POLICY_EVAL_CTX *CT_POLICY_EVAL_CTX_new(void); + +/* Deletes a policy evaluation context and anything it owns. */ +void CT_POLICY_EVAL_CTX_free(CT_POLICY_EVAL_CTX *ctx); + +/* Gets the peer certificate that the SCTs are for */ +X509* CT_POLICY_EVAL_CTX_get0_cert(const CT_POLICY_EVAL_CTX *ctx); + +/* + * Sets the certificate associated with the received SCTs. + * Increments the reference count of cert. + * Returns 1 on success, 0 otherwise. + */ +int CT_POLICY_EVAL_CTX_set1_cert(CT_POLICY_EVAL_CTX *ctx, X509 *cert); + +/* Gets the issuer of the aforementioned certificate */ +X509* CT_POLICY_EVAL_CTX_get0_issuer(const CT_POLICY_EVAL_CTX *ctx); + +/* + * Sets the issuer of the certificate associated with the received SCTs. + * Increments the reference count of issuer. + * Returns 1 on success, 0 otherwise. + */ +int CT_POLICY_EVAL_CTX_set1_issuer(CT_POLICY_EVAL_CTX *ctx, X509 *issuer); + +/* Gets the CT logs that are trusted sources of SCTs */ +const CTLOG_STORE *CT_POLICY_EVAL_CTX_get0_log_store(const CT_POLICY_EVAL_CTX *ctx); + +/* Sets the log store that is in use. It must outlive the CT_POLICY_EVAL_CTX. */ +void CT_POLICY_EVAL_CTX_set_shared_CTLOG_STORE(CT_POLICY_EVAL_CTX *ctx, + CTLOG_STORE *log_store); + +/* + * Gets the time, in milliseconds since the Unix epoch, that will be used as the + * current time when checking whether an SCT was issued in the future. + * Such SCTs will fail validation, as required by RFC6962. + */ +uint64_t CT_POLICY_EVAL_CTX_get_time(const CT_POLICY_EVAL_CTX *ctx); + +/* + * Sets the time to evaluate SCTs against, in milliseconds since the Unix epoch. + * If an SCT's timestamp is after this time, it will be interpreted as having + * been issued in the future. RFC6962 states that "TLS clients MUST reject SCTs + * whose timestamp is in the future", so an SCT will not validate in this case. + */ +void CT_POLICY_EVAL_CTX_set_time(CT_POLICY_EVAL_CTX *ctx, uint64_t time_in_ms); + +/***************** + * SCT functions * + *****************/ + +/* + * Creates a new, blank SCT. + * The caller is responsible for calling SCT_free when finished with the SCT. + */ +SCT *SCT_new(void); + +/* + * Creates a new SCT from some base64-encoded strings. + * The caller is responsible for calling SCT_free when finished with the SCT. + */ +SCT *SCT_new_from_base64(unsigned char version, + const char *logid_base64, + ct_log_entry_type_t entry_type, + uint64_t timestamp, + const char *extensions_base64, + const char *signature_base64); + +/* + * Frees the SCT and the underlying data structures. + */ +void SCT_free(SCT *sct); + +/* + * Free a stack of SCTs, and the underlying SCTs themselves. + * Intended to be compatible with X509V3_EXT_FREE. + */ +void SCT_LIST_free(STACK_OF(SCT) *a); + +/* + * Returns the version of the SCT. + */ +sct_version_t SCT_get_version(const SCT *sct); + +/* + * Set the version of an SCT. + * Returns 1 on success, 0 if the version is unrecognized. + */ +__owur int SCT_set_version(SCT *sct, sct_version_t version); + +/* + * Returns the log entry type of the SCT. + */ +ct_log_entry_type_t SCT_get_log_entry_type(const SCT *sct); + +/* + * Set the log entry type of an SCT. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set_log_entry_type(SCT *sct, ct_log_entry_type_t entry_type); + +/* + * Gets the ID of the log that an SCT came from. + * Ownership of the log ID remains with the SCT. + * Returns the length of the log ID. + */ +size_t SCT_get0_log_id(const SCT *sct, unsigned char **log_id); + +/* + * Set the log ID of an SCT to point directly to the *log_id specified. + * The SCT takes ownership of the specified pointer. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set0_log_id(SCT *sct, unsigned char *log_id, size_t log_id_len); + +/* + * Set the log ID of an SCT. + * This makes a copy of the log_id. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set1_log_id(SCT *sct, const unsigned char *log_id, + size_t log_id_len); + +/* + * Returns the timestamp for the SCT (epoch time in milliseconds). + */ +uint64_t SCT_get_timestamp(const SCT *sct); + +/* + * Set the timestamp of an SCT (epoch time in milliseconds). + */ +void SCT_set_timestamp(SCT *sct, uint64_t timestamp); + +/* + * Return the NID for the signature used by the SCT. + * For CT v1, this will be either NID_sha256WithRSAEncryption or + * NID_ecdsa_with_SHA256 (or NID_undef if incorrect/unset). + */ +int SCT_get_signature_nid(const SCT *sct); + +/* + * Set the signature type of an SCT + * For CT v1, this should be either NID_sha256WithRSAEncryption or + * NID_ecdsa_with_SHA256. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set_signature_nid(SCT *sct, int nid); + +/* + * Set *ext to point to the extension data for the SCT. ext must not be NULL. + * The SCT retains ownership of this pointer. + * Returns length of the data pointed to. + */ +size_t SCT_get0_extensions(const SCT *sct, unsigned char **ext); + +/* + * Set the extensions of an SCT to point directly to the *ext specified. + * The SCT takes ownership of the specified pointer. + */ +void SCT_set0_extensions(SCT *sct, unsigned char *ext, size_t ext_len); + +/* + * Set the extensions of an SCT. + * This takes a copy of the ext. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set1_extensions(SCT *sct, const unsigned char *ext, + size_t ext_len); + +/* + * Set *sig to point to the signature for the SCT. sig must not be NULL. + * The SCT retains ownership of this pointer. + * Returns length of the data pointed to. + */ +size_t SCT_get0_signature(const SCT *sct, unsigned char **sig); + +/* + * Set the signature of an SCT to point directly to the *sig specified. + * The SCT takes ownership of the specified pointer. + */ +void SCT_set0_signature(SCT *sct, unsigned char *sig, size_t sig_len); + +/* + * Set the signature of an SCT to be a copy of the *sig specified. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set1_signature(SCT *sct, const unsigned char *sig, + size_t sig_len); + +/* + * The origin of this SCT, e.g. TLS extension, OCSP response, etc. + */ +sct_source_t SCT_get_source(const SCT *sct); + +/* + * Set the origin of this SCT, e.g. TLS extension, OCSP response, etc. + * Returns 1 on success, 0 otherwise. + */ +__owur int SCT_set_source(SCT *sct, sct_source_t source); + +/* + * Returns a text string describing the validation status of |sct|. + */ +const char *SCT_validation_status_string(const SCT *sct); + +/* + * Pretty-prints an |sct| to |out|. + * It will be indented by the number of spaces specified by |indent|. + * If |logs| is not NULL, it will be used to lookup the CT log that the SCT came + * from, so that the log name can be printed. + */ +void SCT_print(const SCT *sct, BIO *out, int indent, const CTLOG_STORE *logs); + +/* + * Pretty-prints an |sct_list| to |out|. + * It will be indented by the number of spaces specified by |indent|. + * SCTs will be delimited by |separator|. + * If |logs| is not NULL, it will be used to lookup the CT log that each SCT + * came from, so that the log names can be printed. + */ +void SCT_LIST_print(const STACK_OF(SCT) *sct_list, BIO *out, int indent, + const char *separator, const CTLOG_STORE *logs); + +/* + * Gets the last result of validating this SCT. + * If it has not been validated yet, returns SCT_VALIDATION_STATUS_NOT_SET. + */ +sct_validation_status_t SCT_get_validation_status(const SCT *sct); + +/* + * Validates the given SCT with the provided context. + * Sets the "validation_status" field of the SCT. + * Returns 1 if the SCT is valid and the signature verifies. + * Returns 0 if the SCT is invalid or could not be verified. + * Returns -1 if an error occurs. + */ +__owur int SCT_validate(SCT *sct, const CT_POLICY_EVAL_CTX *ctx); + +/* + * Validates the given list of SCTs with the provided context. + * Sets the "validation_status" field of each SCT. + * Returns 1 if there are no invalid SCTs and all signatures verify. + * Returns 0 if at least one SCT is invalid or could not be verified. + * Returns a negative integer if an error occurs. + */ +__owur int SCT_LIST_validate(const STACK_OF(SCT) *scts, + CT_POLICY_EVAL_CTX *ctx); + + +/********************************* + * SCT parsing and serialisation * + *********************************/ + +/* + * Serialize (to TLS format) a stack of SCTs and return the length. + * "a" must not be NULL. + * If "pp" is NULL, just return the length of what would have been serialized. + * If "pp" is not NULL and "*pp" is null, function will allocate a new pointer + * for data that caller is responsible for freeing (only if function returns + * successfully). + * If "pp" is NULL and "*pp" is not NULL, caller is responsible for ensuring + * that "*pp" is large enough to accept all of the serialized data. + * Returns < 0 on error, >= 0 indicating bytes written (or would have been) + * on success. + */ +__owur int i2o_SCT_LIST(const STACK_OF(SCT) *a, unsigned char **pp); + +/* + * Convert TLS format SCT list to a stack of SCTs. + * If "a" or "*a" is NULL, a new stack will be created that the caller is + * responsible for freeing (by calling SCT_LIST_free). + * "**pp" and "*pp" must not be NULL. + * Upon success, "*pp" will point to after the last bytes read, and a stack + * will be returned. + * Upon failure, a NULL pointer will be returned, and the position of "*pp" is + * not defined. + */ +STACK_OF(SCT) *o2i_SCT_LIST(STACK_OF(SCT) **a, const unsigned char **pp, + size_t len); + +/* + * Serialize (to DER format) a stack of SCTs and return the length. + * "a" must not be NULL. + * If "pp" is NULL, just returns the length of what would have been serialized. + * If "pp" is not NULL and "*pp" is null, function will allocate a new pointer + * for data that caller is responsible for freeing (only if function returns + * successfully). + * If "pp" is NULL and "*pp" is not NULL, caller is responsible for ensuring + * that "*pp" is large enough to accept all of the serialized data. + * Returns < 0 on error, >= 0 indicating bytes written (or would have been) + * on success. + */ +__owur int i2d_SCT_LIST(const STACK_OF(SCT) *a, unsigned char **pp); + +/* + * Parses an SCT list in DER format and returns it. + * If "a" or "*a" is NULL, a new stack will be created that the caller is + * responsible for freeing (by calling SCT_LIST_free). + * "**pp" and "*pp" must not be NULL. + * Upon success, "*pp" will point to after the last bytes read, and a stack + * will be returned. + * Upon failure, a NULL pointer will be returned, and the position of "*pp" is + * not defined. + */ +STACK_OF(SCT) *d2i_SCT_LIST(STACK_OF(SCT) **a, const unsigned char **pp, + long len); + +/* + * Serialize (to TLS format) an |sct| and write it to |out|. + * If |out| is null, no SCT will be output but the length will still be returned. + * If |out| points to a null pointer, a string will be allocated to hold the + * TLS-format SCT. It is the responsibility of the caller to free it. + * If |out| points to an allocated string, the TLS-format SCT will be written + * to it. + * The length of the SCT in TLS format will be returned. + */ +__owur int i2o_SCT(const SCT *sct, unsigned char **out); + +/* + * Parses an SCT in TLS format and returns it. + * If |psct| is not null, it will end up pointing to the parsed SCT. If it + * already points to a non-null pointer, the pointer will be free'd. + * |in| should be a pointer to a string containing the TLS-format SCT. + * |in| will be advanced to the end of the SCT if parsing succeeds. + * |len| should be the length of the SCT in |in|. + * Returns NULL if an error occurs. + * If the SCT is an unsupported version, only the SCT's 'sct' and 'sct_len' + * fields will be populated (with |in| and |len| respectively). + */ +SCT *o2i_SCT(SCT **psct, const unsigned char **in, size_t len); + +/******************** + * CT log functions * + ********************/ + +/* + * Creates a new CT log instance with the given |public_key| and |name|. + * Takes ownership of |public_key| but copies |name|. + * Returns NULL if malloc fails or if |public_key| cannot be converted to DER. + * Should be deleted by the caller using CTLOG_free when no longer needed. + */ +CTLOG *CTLOG_new(EVP_PKEY *public_key, const char *name); + +/* + * Creates a new CTLOG instance with the base64-encoded SubjectPublicKeyInfo DER + * in |pkey_base64|. The |name| is a string to help users identify this log. + * Returns 1 on success, 0 on failure. + * Should be deleted by the caller using CTLOG_free when no longer needed. + */ +int CTLOG_new_from_base64(CTLOG ** ct_log, + const char *pkey_base64, const char *name); + +/* + * Deletes a CT log instance and its fields. + */ +void CTLOG_free(CTLOG *log); + +/* Gets the name of the CT log */ +const char *CTLOG_get0_name(const CTLOG *log); +/* Gets the ID of the CT log */ +void CTLOG_get0_log_id(const CTLOG *log, const uint8_t **log_id, + size_t *log_id_len); +/* Gets the public key of the CT log */ +EVP_PKEY *CTLOG_get0_public_key(const CTLOG *log); + +/************************** + * CT log store functions * + **************************/ + +/* + * Creates a new CT log store. + * Should be deleted by the caller using CTLOG_STORE_free when no longer needed. + */ +CTLOG_STORE *CTLOG_STORE_new(void); + +/* + * Deletes a CT log store and all of the CT log instances held within. + */ +void CTLOG_STORE_free(CTLOG_STORE *store); + +/* + * Finds a CT log in the store based on its log ID. + * Returns the CT log, or NULL if no match is found. + */ +const CTLOG *CTLOG_STORE_get0_log_by_id(const CTLOG_STORE *store, + const uint8_t *log_id, + size_t log_id_len); + +/* + * Loads a CT log list into a |store| from a |file|. + * Returns 1 if loading is successful, or 0 otherwise. + */ +__owur int CTLOG_STORE_load_file(CTLOG_STORE *store, const char *file); + +/* + * Loads the default CT log list into a |store|. + * Returns 1 if loading is successful, or 0 otherwise. + */ +__owur int CTLOG_STORE_load_default_file(CTLOG_STORE *store); + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/cterr.h b/third_party/openssl/uos/sw_64/include/openssl/cterr.h new file mode 100644 index 00000000..feb7bc56 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/cterr.h @@ -0,0 +1,80 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_CTERR_H +# define HEADER_CTERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_CT + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_CT_strings(void); + +/* + * CT function codes. + */ +# define CT_F_CTLOG_NEW 117 +# define CT_F_CTLOG_NEW_FROM_BASE64 118 +# define CT_F_CTLOG_NEW_FROM_CONF 119 +# define CT_F_CTLOG_STORE_LOAD_CTX_NEW 122 +# define CT_F_CTLOG_STORE_LOAD_FILE 123 +# define CT_F_CTLOG_STORE_LOAD_LOG 130 +# define CT_F_CTLOG_STORE_NEW 131 +# define CT_F_CT_BASE64_DECODE 124 +# define CT_F_CT_POLICY_EVAL_CTX_NEW 133 +# define CT_F_CT_V1_LOG_ID_FROM_PKEY 125 +# define CT_F_I2O_SCT 107 +# define CT_F_I2O_SCT_LIST 108 +# define CT_F_I2O_SCT_SIGNATURE 109 +# define CT_F_O2I_SCT 110 +# define CT_F_O2I_SCT_LIST 111 +# define CT_F_O2I_SCT_SIGNATURE 112 +# define CT_F_SCT_CTX_NEW 126 +# define CT_F_SCT_CTX_VERIFY 128 +# define CT_F_SCT_NEW 100 +# define CT_F_SCT_NEW_FROM_BASE64 127 +# define CT_F_SCT_SET0_LOG_ID 101 +# define CT_F_SCT_SET1_EXTENSIONS 114 +# define CT_F_SCT_SET1_LOG_ID 115 +# define CT_F_SCT_SET1_SIGNATURE 116 +# define CT_F_SCT_SET_LOG_ENTRY_TYPE 102 +# define CT_F_SCT_SET_SIGNATURE_NID 103 +# define CT_F_SCT_SET_VERSION 104 + +/* + * CT reason codes. + */ +# define CT_R_BASE64_DECODE_ERROR 108 +# define CT_R_INVALID_LOG_ID_LENGTH 100 +# define CT_R_LOG_CONF_INVALID 109 +# define CT_R_LOG_CONF_INVALID_KEY 110 +# define CT_R_LOG_CONF_MISSING_DESCRIPTION 111 +# define CT_R_LOG_CONF_MISSING_KEY 112 +# define CT_R_LOG_KEY_INVALID 113 +# define CT_R_SCT_FUTURE_TIMESTAMP 116 +# define CT_R_SCT_INVALID 104 +# define CT_R_SCT_INVALID_SIGNATURE 107 +# define CT_R_SCT_LIST_INVALID 105 +# define CT_R_SCT_LOG_ID_MISMATCH 114 +# define CT_R_SCT_NOT_SET 106 +# define CT_R_SCT_UNSUPPORTED_VERSION 115 +# define CT_R_UNRECOGNIZED_SIGNATURE_NID 101 +# define CT_R_UNSUPPORTED_ENTRY_TYPE 102 +# define CT_R_UNSUPPORTED_VERSION 103 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/des.h b/third_party/openssl/uos/sw_64/include/openssl/des.h new file mode 100644 index 00000000..be4abbdf --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/des.h @@ -0,0 +1,174 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DES_H +# define HEADER_DES_H + +# include + +# ifndef OPENSSL_NO_DES +# ifdef __cplusplus +extern "C" { +# endif +# include + +typedef unsigned int DES_LONG; + +# ifdef OPENSSL_BUILD_SHLIBCRYPTO +# undef OPENSSL_EXTERN +# define OPENSSL_EXTERN OPENSSL_EXPORT +# endif + +typedef unsigned char DES_cblock[8]; +typedef /* const */ unsigned char const_DES_cblock[8]; +/* + * With "const", gcc 2.8.1 on Solaris thinks that DES_cblock * and + * const_DES_cblock * are incompatible pointer types. + */ + +typedef struct DES_ks { + union { + DES_cblock cblock; + /* + * make sure things are correct size on machines with 8 byte longs + */ + DES_LONG deslong[2]; + } ks[16]; +} DES_key_schedule; + +# define DES_KEY_SZ (sizeof(DES_cblock)) +# define DES_SCHEDULE_SZ (sizeof(DES_key_schedule)) + +# define DES_ENCRYPT 1 +# define DES_DECRYPT 0 + +# define DES_CBC_MODE 0 +# define DES_PCBC_MODE 1 + +# define DES_ecb2_encrypt(i,o,k1,k2,e) \ + DES_ecb3_encrypt((i),(o),(k1),(k2),(k1),(e)) + +# define DES_ede2_cbc_encrypt(i,o,l,k1,k2,iv,e) \ + DES_ede3_cbc_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(e)) + +# define DES_ede2_cfb64_encrypt(i,o,l,k1,k2,iv,n,e) \ + DES_ede3_cfb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n),(e)) + +# define DES_ede2_ofb64_encrypt(i,o,l,k1,k2,iv,n) \ + DES_ede3_ofb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n)) + +OPENSSL_DECLARE_GLOBAL(int, DES_check_key); /* defaults to false */ +# define DES_check_key OPENSSL_GLOBAL_REF(DES_check_key) + +const char *DES_options(void); +void DES_ecb3_encrypt(const_DES_cblock *input, DES_cblock *output, + DES_key_schedule *ks1, DES_key_schedule *ks2, + DES_key_schedule *ks3, int enc); +DES_LONG DES_cbc_cksum(const unsigned char *input, DES_cblock *output, + long length, DES_key_schedule *schedule, + const_DES_cblock *ivec); +/* DES_cbc_encrypt does not update the IV! Use DES_ncbc_encrypt instead. */ +void DES_cbc_encrypt(const unsigned char *input, unsigned char *output, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int enc); +void DES_ncbc_encrypt(const unsigned char *input, unsigned char *output, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int enc); +void DES_xcbc_encrypt(const unsigned char *input, unsigned char *output, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, const_DES_cblock *inw, + const_DES_cblock *outw, int enc); +void DES_cfb_encrypt(const unsigned char *in, unsigned char *out, int numbits, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int enc); +void DES_ecb_encrypt(const_DES_cblock *input, DES_cblock *output, + DES_key_schedule *ks, int enc); + +/* + * This is the DES encryption function that gets called by just about every + * other DES routine in the library. You should not use this function except + * to implement 'modes' of DES. I say this because the functions that call + * this routine do the conversion from 'char *' to long, and this needs to be + * done to make sure 'non-aligned' memory access do not occur. The + * characters are loaded 'little endian'. Data is a pointer to 2 unsigned + * long's and ks is the DES_key_schedule to use. enc, is non zero specifies + * encryption, zero if decryption. + */ +void DES_encrypt1(DES_LONG *data, DES_key_schedule *ks, int enc); + +/* + * This functions is the same as DES_encrypt1() except that the DES initial + * permutation (IP) and final permutation (FP) have been left out. As for + * DES_encrypt1(), you should not use this function. It is used by the + * routines in the library that implement triple DES. IP() DES_encrypt2() + * DES_encrypt2() DES_encrypt2() FP() is the same as DES_encrypt1() + * DES_encrypt1() DES_encrypt1() except faster :-). + */ +void DES_encrypt2(DES_LONG *data, DES_key_schedule *ks, int enc); + +void DES_encrypt3(DES_LONG *data, DES_key_schedule *ks1, + DES_key_schedule *ks2, DES_key_schedule *ks3); +void DES_decrypt3(DES_LONG *data, DES_key_schedule *ks1, + DES_key_schedule *ks2, DES_key_schedule *ks3); +void DES_ede3_cbc_encrypt(const unsigned char *input, unsigned char *output, + long length, + DES_key_schedule *ks1, DES_key_schedule *ks2, + DES_key_schedule *ks3, DES_cblock *ivec, int enc); +void DES_ede3_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, DES_key_schedule *ks1, + DES_key_schedule *ks2, DES_key_schedule *ks3, + DES_cblock *ivec, int *num, int enc); +void DES_ede3_cfb_encrypt(const unsigned char *in, unsigned char *out, + int numbits, long length, DES_key_schedule *ks1, + DES_key_schedule *ks2, DES_key_schedule *ks3, + DES_cblock *ivec, int enc); +void DES_ede3_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, DES_key_schedule *ks1, + DES_key_schedule *ks2, DES_key_schedule *ks3, + DES_cblock *ivec, int *num); +char *DES_fcrypt(const char *buf, const char *salt, char *ret); +char *DES_crypt(const char *buf, const char *salt); +void DES_ofb_encrypt(const unsigned char *in, unsigned char *out, int numbits, + long length, DES_key_schedule *schedule, + DES_cblock *ivec); +void DES_pcbc_encrypt(const unsigned char *input, unsigned char *output, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int enc); +DES_LONG DES_quad_cksum(const unsigned char *input, DES_cblock output[], + long length, int out_count, DES_cblock *seed); +int DES_random_key(DES_cblock *ret); +void DES_set_odd_parity(DES_cblock *key); +int DES_check_key_parity(const_DES_cblock *key); +int DES_is_weak_key(const_DES_cblock *key); +/* + * DES_set_key (= set_key = DES_key_sched = key_sched) calls + * DES_set_key_checked if global variable DES_check_key is set, + * DES_set_key_unchecked otherwise. + */ +int DES_set_key(const_DES_cblock *key, DES_key_schedule *schedule); +int DES_key_sched(const_DES_cblock *key, DES_key_schedule *schedule); +int DES_set_key_checked(const_DES_cblock *key, DES_key_schedule *schedule); +void DES_set_key_unchecked(const_DES_cblock *key, DES_key_schedule *schedule); +void DES_string_to_key(const char *str, DES_cblock *key); +void DES_string_to_2keys(const char *str, DES_cblock *key1, DES_cblock *key2); +void DES_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int *num, int enc); +void DES_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, DES_key_schedule *schedule, + DES_cblock *ivec, int *num); + +# define DES_fixup_key_parity DES_set_odd_parity + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/dh.h b/third_party/openssl/uos/sw_64/include/openssl/dh.h new file mode 100644 index 00000000..6c6ff363 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/dh.h @@ -0,0 +1,343 @@ +/* + * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DH_H +# define HEADER_DH_H + +# include + +# ifndef OPENSSL_NO_DH +# include +# include +# include +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif +# include + +# ifdef __cplusplus +extern "C" { +# endif + +# ifndef OPENSSL_DH_MAX_MODULUS_BITS +# define OPENSSL_DH_MAX_MODULUS_BITS 10000 +# endif +# ifndef OPENSSL_DH_CHECK_MAX_MODULUS_BITS +# define OPENSSL_DH_CHECK_MAX_MODULUS_BITS 32768 +# endif + +# define OPENSSL_DH_FIPS_MIN_MODULUS_BITS 1024 + +# define DH_FLAG_CACHE_MONT_P 0x01 + +# if OPENSSL_API_COMPAT < 0x10100000L +/* + * Does nothing. Previously this switched off constant time behaviour. + */ +# define DH_FLAG_NO_EXP_CONSTTIME 0x00 +# endif + +/* + * If this flag is set the DH method is FIPS compliant and can be used in + * FIPS mode. This is set in the validated module method. If an application + * sets this flag in its own methods it is its responsibility to ensure the + * result is compliant. + */ + +# define DH_FLAG_FIPS_METHOD 0x0400 + +/* + * If this flag is set the operations normally disabled in FIPS mode are + * permitted it is then the applications responsibility to ensure that the + * usage is compliant. + */ + +# define DH_FLAG_NON_FIPS_ALLOW 0x0400 + +/* Already defined in ossl_typ.h */ +/* typedef struct dh_st DH; */ +/* typedef struct dh_method DH_METHOD; */ + +DECLARE_ASN1_ITEM(DHparams) + +# define DH_GENERATOR_2 2 +/* #define DH_GENERATOR_3 3 */ +# define DH_GENERATOR_5 5 + +/* DH_check error codes */ +# define DH_CHECK_P_NOT_PRIME 0x01 +# define DH_CHECK_P_NOT_SAFE_PRIME 0x02 +# define DH_UNABLE_TO_CHECK_GENERATOR 0x04 +# define DH_NOT_SUITABLE_GENERATOR 0x08 +# define DH_CHECK_Q_NOT_PRIME 0x10 +# define DH_CHECK_INVALID_Q_VALUE 0x20 +# define DH_CHECK_INVALID_J_VALUE 0x40 + +/* DH_check_pub_key error codes */ +# define DH_CHECK_PUBKEY_TOO_SMALL 0x01 +# define DH_CHECK_PUBKEY_TOO_LARGE 0x02 +# define DH_CHECK_PUBKEY_INVALID 0x04 + +/* + * primes p where (p-1)/2 is prime too are called "safe"; we define this for + * backward compatibility: + */ +# define DH_CHECK_P_NOT_STRONG_PRIME DH_CHECK_P_NOT_SAFE_PRIME + +# define d2i_DHparams_fp(fp,x) \ + (DH *)ASN1_d2i_fp((char *(*)())DH_new, \ + (char *(*)())d2i_DHparams, \ + (fp), \ + (unsigned char **)(x)) +# define i2d_DHparams_fp(fp,x) \ + ASN1_i2d_fp(i2d_DHparams,(fp), (unsigned char *)(x)) +# define d2i_DHparams_bio(bp,x) \ + ASN1_d2i_bio_of(DH, DH_new, d2i_DHparams, bp, x) +# define i2d_DHparams_bio(bp,x) \ + ASN1_i2d_bio_of_const(DH,i2d_DHparams,bp,x) + +# define d2i_DHxparams_fp(fp,x) \ + (DH *)ASN1_d2i_fp((char *(*)())DH_new, \ + (char *(*)())d2i_DHxparams, \ + (fp), \ + (unsigned char **)(x)) +# define i2d_DHxparams_fp(fp,x) \ + ASN1_i2d_fp(i2d_DHxparams,(fp), (unsigned char *)(x)) +# define d2i_DHxparams_bio(bp,x) \ + ASN1_d2i_bio_of(DH, DH_new, d2i_DHxparams, bp, x) +# define i2d_DHxparams_bio(bp,x) \ + ASN1_i2d_bio_of_const(DH, i2d_DHxparams, bp, x) + +DH *DHparams_dup(DH *); + +const DH_METHOD *DH_OpenSSL(void); + +void DH_set_default_method(const DH_METHOD *meth); +const DH_METHOD *DH_get_default_method(void); +int DH_set_method(DH *dh, const DH_METHOD *meth); +DH *DH_new_method(ENGINE *engine); + +DH *DH_new(void); +void DH_free(DH *dh); +int DH_up_ref(DH *dh); +int DH_bits(const DH *dh); +int DH_size(const DH *dh); +int DH_security_bits(const DH *dh); +#define DH_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_DH, l, p, newf, dupf, freef) +int DH_set_ex_data(DH *d, int idx, void *arg); +void *DH_get_ex_data(DH *d, int idx); + +/* Deprecated version */ +DEPRECATEDIN_0_9_8(DH *DH_generate_parameters(int prime_len, int generator, + void (*callback) (int, int, + void *), + void *cb_arg)) + +/* New version */ +int DH_generate_parameters_ex(DH *dh, int prime_len, int generator, + BN_GENCB *cb); + +int DH_check_params_ex(const DH *dh); +int DH_check_ex(const DH *dh); +int DH_check_pub_key_ex(const DH *dh, const BIGNUM *pub_key); +int DH_check_params(const DH *dh, int *ret); +int DH_check(const DH *dh, int *codes); +int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *codes); +int DH_generate_key(DH *dh); +int DH_compute_key(unsigned char *key, const BIGNUM *pub_key, DH *dh); +int DH_compute_key_padded(unsigned char *key, const BIGNUM *pub_key, DH *dh); +DH *d2i_DHparams(DH **a, const unsigned char **pp, long length); +int i2d_DHparams(const DH *a, unsigned char **pp); +DH *d2i_DHxparams(DH **a, const unsigned char **pp, long length); +int i2d_DHxparams(const DH *a, unsigned char **pp); +# ifndef OPENSSL_NO_STDIO +int DHparams_print_fp(FILE *fp, const DH *x); +# endif +int DHparams_print(BIO *bp, const DH *x); + +/* RFC 5114 parameters */ +DH *DH_get_1024_160(void); +DH *DH_get_2048_224(void); +DH *DH_get_2048_256(void); + +/* Named parameters, currently RFC7919 */ +DH *DH_new_by_nid(int nid); +int DH_get_nid(const DH *dh); + +# ifndef OPENSSL_NO_CMS +/* RFC2631 KDF */ +int DH_KDF_X9_42(unsigned char *out, size_t outlen, + const unsigned char *Z, size_t Zlen, + ASN1_OBJECT *key_oid, + const unsigned char *ukm, size_t ukmlen, const EVP_MD *md); +# endif + +void DH_get0_pqg(const DH *dh, + const BIGNUM **p, const BIGNUM **q, const BIGNUM **g); +int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g); +void DH_get0_key(const DH *dh, + const BIGNUM **pub_key, const BIGNUM **priv_key); +int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); +const BIGNUM *DH_get0_p(const DH *dh); +const BIGNUM *DH_get0_q(const DH *dh); +const BIGNUM *DH_get0_g(const DH *dh); +const BIGNUM *DH_get0_priv_key(const DH *dh); +const BIGNUM *DH_get0_pub_key(const DH *dh); +void DH_clear_flags(DH *dh, int flags); +int DH_test_flags(const DH *dh, int flags); +void DH_set_flags(DH *dh, int flags); +ENGINE *DH_get0_engine(DH *d); +long DH_get_length(const DH *dh); +int DH_set_length(DH *dh, long length); + +DH_METHOD *DH_meth_new(const char *name, int flags); +void DH_meth_free(DH_METHOD *dhm); +DH_METHOD *DH_meth_dup(const DH_METHOD *dhm); +const char *DH_meth_get0_name(const DH_METHOD *dhm); +int DH_meth_set1_name(DH_METHOD *dhm, const char *name); +int DH_meth_get_flags(const DH_METHOD *dhm); +int DH_meth_set_flags(DH_METHOD *dhm, int flags); +void *DH_meth_get0_app_data(const DH_METHOD *dhm); +int DH_meth_set0_app_data(DH_METHOD *dhm, void *app_data); +int (*DH_meth_get_generate_key(const DH_METHOD *dhm)) (DH *); +int DH_meth_set_generate_key(DH_METHOD *dhm, int (*generate_key) (DH *)); +int (*DH_meth_get_compute_key(const DH_METHOD *dhm)) + (unsigned char *key, const BIGNUM *pub_key, DH *dh); +int DH_meth_set_compute_key(DH_METHOD *dhm, + int (*compute_key) (unsigned char *key, const BIGNUM *pub_key, DH *dh)); +int (*DH_meth_get_bn_mod_exp(const DH_METHOD *dhm)) + (const DH *, BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, + BN_CTX *, BN_MONT_CTX *); +int DH_meth_set_bn_mod_exp(DH_METHOD *dhm, + int (*bn_mod_exp) (const DH *, BIGNUM *, const BIGNUM *, const BIGNUM *, + const BIGNUM *, BN_CTX *, BN_MONT_CTX *)); +int (*DH_meth_get_init(const DH_METHOD *dhm))(DH *); +int DH_meth_set_init(DH_METHOD *dhm, int (*init)(DH *)); +int (*DH_meth_get_finish(const DH_METHOD *dhm)) (DH *); +int DH_meth_set_finish(DH_METHOD *dhm, int (*finish) (DH *)); +int (*DH_meth_get_generate_params(const DH_METHOD *dhm)) + (DH *, int, int, BN_GENCB *); +int DH_meth_set_generate_params(DH_METHOD *dhm, + int (*generate_params) (DH *, int, int, BN_GENCB *)); + + +# define EVP_PKEY_CTX_set_dh_paramgen_prime_len(ctx, len) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_PARAMGEN_PRIME_LEN, len, NULL) + +# define EVP_PKEY_CTX_set_dh_paramgen_subprime_len(ctx, len) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_PARAMGEN_SUBPRIME_LEN, len, NULL) + +# define EVP_PKEY_CTX_set_dh_paramgen_type(ctx, typ) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_PARAMGEN_TYPE, typ, NULL) + +# define EVP_PKEY_CTX_set_dh_paramgen_generator(ctx, gen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_PARAMGEN_GENERATOR, gen, NULL) + +# define EVP_PKEY_CTX_set_dh_rfc5114(ctx, gen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_RFC5114, gen, NULL) + +# define EVP_PKEY_CTX_set_dhx_rfc5114(ctx, gen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DH_RFC5114, gen, NULL) + +# define EVP_PKEY_CTX_set_dh_nid(ctx, nid) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, \ + EVP_PKEY_OP_PARAMGEN | EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_DH_NID, nid, NULL) + +# define EVP_PKEY_CTX_set_dh_pad(ctx, pad) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DH, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_PAD, pad, NULL) + +# define EVP_PKEY_CTX_set_dh_kdf_type(ctx, kdf) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_TYPE, kdf, NULL) + +# define EVP_PKEY_CTX_get_dh_kdf_type(ctx) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_TYPE, -2, NULL) + +# define EVP_PKEY_CTX_set0_dh_kdf_oid(ctx, oid) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_OID, 0, (void *)(oid)) + +# define EVP_PKEY_CTX_get0_dh_kdf_oid(ctx, poid) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_DH_KDF_OID, 0, (void *)(poid)) + +# define EVP_PKEY_CTX_set_dh_kdf_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_get_dh_kdf_md(ctx, pmd) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_DH_KDF_MD, 0, (void *)(pmd)) + +# define EVP_PKEY_CTX_set_dh_kdf_outlen(ctx, len) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_OUTLEN, len, NULL) + +# define EVP_PKEY_CTX_get_dh_kdf_outlen(ctx, plen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_DH_KDF_OUTLEN, 0, (void *)(plen)) + +# define EVP_PKEY_CTX_set0_dh_kdf_ukm(ctx, p, plen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_DH_KDF_UKM, plen, (void *)(p)) + +# define EVP_PKEY_CTX_get0_dh_kdf_ukm(ctx, p) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DHX, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_DH_KDF_UKM, 0, (void *)(p)) + +# define EVP_PKEY_CTRL_DH_PARAMGEN_PRIME_LEN (EVP_PKEY_ALG_CTRL + 1) +# define EVP_PKEY_CTRL_DH_PARAMGEN_GENERATOR (EVP_PKEY_ALG_CTRL + 2) +# define EVP_PKEY_CTRL_DH_RFC5114 (EVP_PKEY_ALG_CTRL + 3) +# define EVP_PKEY_CTRL_DH_PARAMGEN_SUBPRIME_LEN (EVP_PKEY_ALG_CTRL + 4) +# define EVP_PKEY_CTRL_DH_PARAMGEN_TYPE (EVP_PKEY_ALG_CTRL + 5) +# define EVP_PKEY_CTRL_DH_KDF_TYPE (EVP_PKEY_ALG_CTRL + 6) +# define EVP_PKEY_CTRL_DH_KDF_MD (EVP_PKEY_ALG_CTRL + 7) +# define EVP_PKEY_CTRL_GET_DH_KDF_MD (EVP_PKEY_ALG_CTRL + 8) +# define EVP_PKEY_CTRL_DH_KDF_OUTLEN (EVP_PKEY_ALG_CTRL + 9) +# define EVP_PKEY_CTRL_GET_DH_KDF_OUTLEN (EVP_PKEY_ALG_CTRL + 10) +# define EVP_PKEY_CTRL_DH_KDF_UKM (EVP_PKEY_ALG_CTRL + 11) +# define EVP_PKEY_CTRL_GET_DH_KDF_UKM (EVP_PKEY_ALG_CTRL + 12) +# define EVP_PKEY_CTRL_DH_KDF_OID (EVP_PKEY_ALG_CTRL + 13) +# define EVP_PKEY_CTRL_GET_DH_KDF_OID (EVP_PKEY_ALG_CTRL + 14) +# define EVP_PKEY_CTRL_DH_NID (EVP_PKEY_ALG_CTRL + 15) +# define EVP_PKEY_CTRL_DH_PAD (EVP_PKEY_ALG_CTRL + 16) + +/* KDF types */ +# define EVP_PKEY_DH_KDF_NONE 1 +# ifndef OPENSSL_NO_CMS +# define EVP_PKEY_DH_KDF_X9_42 2 +# endif + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/dherr.h b/third_party/openssl/uos/sw_64/include/openssl/dherr.h new file mode 100644 index 00000000..528c8198 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/dherr.h @@ -0,0 +1,89 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DHERR_H +# define HEADER_DHERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_DH + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_DH_strings(void); + +/* + * DH function codes. + */ +# define DH_F_COMPUTE_KEY 102 +# define DH_F_DHPARAMS_PRINT_FP 101 +# define DH_F_DH_BUILTIN_GENPARAMS 106 +# define DH_F_DH_CHECK 126 +# define DH_F_DH_CHECK_EX 121 +# define DH_F_DH_CHECK_PARAMS_EX 122 +# define DH_F_DH_CHECK_PUB_KEY_EX 123 +# define DH_F_DH_CMS_DECRYPT 114 +# define DH_F_DH_CMS_SET_PEERKEY 115 +# define DH_F_DH_CMS_SET_SHARED_INFO 116 +# define DH_F_DH_METH_DUP 117 +# define DH_F_DH_METH_NEW 118 +# define DH_F_DH_METH_SET1_NAME 119 +# define DH_F_DH_NEW_BY_NID 104 +# define DH_F_DH_NEW_METHOD 105 +# define DH_F_DH_PARAM_DECODE 107 +# define DH_F_DH_PKEY_PUBLIC_CHECK 124 +# define DH_F_DH_PRIV_DECODE 110 +# define DH_F_DH_PRIV_ENCODE 111 +# define DH_F_DH_PUB_DECODE 108 +# define DH_F_DH_PUB_ENCODE 109 +# define DH_F_DO_DH_PRINT 100 +# define DH_F_GENERATE_KEY 103 +# define DH_F_PKEY_DH_CTRL_STR 120 +# define DH_F_PKEY_DH_DERIVE 112 +# define DH_F_PKEY_DH_INIT 125 +# define DH_F_PKEY_DH_KEYGEN 113 + +/* + * DH reason codes. + */ +# define DH_R_BAD_GENERATOR 101 +# define DH_R_BN_DECODE_ERROR 109 +# define DH_R_BN_ERROR 106 +# define DH_R_CHECK_INVALID_J_VALUE 115 +# define DH_R_CHECK_INVALID_Q_VALUE 116 +# define DH_R_CHECK_PUBKEY_INVALID 122 +# define DH_R_CHECK_PUBKEY_TOO_LARGE 123 +# define DH_R_CHECK_PUBKEY_TOO_SMALL 124 +# define DH_R_CHECK_P_NOT_PRIME 117 +# define DH_R_CHECK_P_NOT_SAFE_PRIME 118 +# define DH_R_CHECK_Q_NOT_PRIME 119 +# define DH_R_DECODE_ERROR 104 +# define DH_R_INVALID_PARAMETER_NAME 110 +# define DH_R_INVALID_PARAMETER_NID 114 +# define DH_R_INVALID_PUBKEY 102 +# define DH_R_KDF_PARAMETER_ERROR 112 +# define DH_R_KEYS_NOT_SET 108 +# define DH_R_MISSING_PUBKEY 125 +# define DH_R_MODULUS_TOO_LARGE 103 +# define DH_R_NOT_SUITABLE_GENERATOR 120 +# define DH_R_NO_PARAMETERS_SET 107 +# define DH_R_NO_PRIVATE_VALUE 100 +# define DH_R_PARAMETER_ENCODING_ERROR 105 +# define DH_R_PEER_KEY_ERROR 111 +# define DH_R_SHARED_INFO_ERROR 113 +# define DH_R_UNABLE_TO_CHECK_GENERATOR 121 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/dsa.h b/third_party/openssl/uos/sw_64/include/openssl/dsa.h new file mode 100644 index 00000000..6d8a18a4 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/dsa.h @@ -0,0 +1,244 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DSA_H +# define HEADER_DSA_H + +# include + +# ifndef OPENSSL_NO_DSA +# ifdef __cplusplus +extern "C" { +# endif +# include +# include +# include +# include +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif +# include + +# ifndef OPENSSL_DSA_MAX_MODULUS_BITS +# define OPENSSL_DSA_MAX_MODULUS_BITS 10000 +# endif + +# define OPENSSL_DSA_FIPS_MIN_MODULUS_BITS 1024 + +# define DSA_FLAG_CACHE_MONT_P 0x01 +# if OPENSSL_API_COMPAT < 0x10100000L +/* + * Does nothing. Previously this switched off constant time behaviour. + */ +# define DSA_FLAG_NO_EXP_CONSTTIME 0x00 +# endif + +/* + * If this flag is set the DSA method is FIPS compliant and can be used in + * FIPS mode. This is set in the validated module method. If an application + * sets this flag in its own methods it is its responsibility to ensure the + * result is compliant. + */ + +# define DSA_FLAG_FIPS_METHOD 0x0400 + +/* + * If this flag is set the operations normally disabled in FIPS mode are + * permitted it is then the applications responsibility to ensure that the + * usage is compliant. + */ + +# define DSA_FLAG_NON_FIPS_ALLOW 0x0400 +# define DSA_FLAG_FIPS_CHECKED 0x0800 + +/* Already defined in ossl_typ.h */ +/* typedef struct dsa_st DSA; */ +/* typedef struct dsa_method DSA_METHOD; */ + +typedef struct DSA_SIG_st DSA_SIG; + +# define d2i_DSAparams_fp(fp,x) (DSA *)ASN1_d2i_fp((char *(*)())DSA_new, \ + (char *(*)())d2i_DSAparams,(fp),(unsigned char **)(x)) +# define i2d_DSAparams_fp(fp,x) ASN1_i2d_fp(i2d_DSAparams,(fp), \ + (unsigned char *)(x)) +# define d2i_DSAparams_bio(bp,x) ASN1_d2i_bio_of(DSA,DSA_new,d2i_DSAparams,bp,x) +# define i2d_DSAparams_bio(bp,x) ASN1_i2d_bio_of_const(DSA,i2d_DSAparams,bp,x) + +DSA *DSAparams_dup(DSA *x); +DSA_SIG *DSA_SIG_new(void); +void DSA_SIG_free(DSA_SIG *a); +int i2d_DSA_SIG(const DSA_SIG *a, unsigned char **pp); +DSA_SIG *d2i_DSA_SIG(DSA_SIG **v, const unsigned char **pp, long length); +void DSA_SIG_get0(const DSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps); +int DSA_SIG_set0(DSA_SIG *sig, BIGNUM *r, BIGNUM *s); + +DSA_SIG *DSA_do_sign(const unsigned char *dgst, int dlen, DSA *dsa); +int DSA_do_verify(const unsigned char *dgst, int dgst_len, + DSA_SIG *sig, DSA *dsa); + +const DSA_METHOD *DSA_OpenSSL(void); + +void DSA_set_default_method(const DSA_METHOD *); +const DSA_METHOD *DSA_get_default_method(void); +int DSA_set_method(DSA *dsa, const DSA_METHOD *); +const DSA_METHOD *DSA_get_method(DSA *d); + +DSA *DSA_new(void); +DSA *DSA_new_method(ENGINE *engine); +void DSA_free(DSA *r); +/* "up" the DSA object's reference count */ +int DSA_up_ref(DSA *r); +int DSA_size(const DSA *); +int DSA_bits(const DSA *d); +int DSA_security_bits(const DSA *d); + /* next 4 return -1 on error */ +DEPRECATEDIN_1_2_0(int DSA_sign_setup(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp, BIGNUM **rp)) +int DSA_sign(int type, const unsigned char *dgst, int dlen, + unsigned char *sig, unsigned int *siglen, DSA *dsa); +int DSA_verify(int type, const unsigned char *dgst, int dgst_len, + const unsigned char *sigbuf, int siglen, DSA *dsa); +#define DSA_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_DSA, l, p, newf, dupf, freef) +int DSA_set_ex_data(DSA *d, int idx, void *arg); +void *DSA_get_ex_data(DSA *d, int idx); + +DSA *d2i_DSAPublicKey(DSA **a, const unsigned char **pp, long length); +DSA *d2i_DSAPrivateKey(DSA **a, const unsigned char **pp, long length); +DSA *d2i_DSAparams(DSA **a, const unsigned char **pp, long length); + +/* Deprecated version */ +DEPRECATEDIN_0_9_8(DSA *DSA_generate_parameters(int bits, + unsigned char *seed, + int seed_len, + int *counter_ret, + unsigned long *h_ret, void + (*callback) (int, int, + void *), + void *cb_arg)) + +/* New version */ +int DSA_generate_parameters_ex(DSA *dsa, int bits, + const unsigned char *seed, int seed_len, + int *counter_ret, unsigned long *h_ret, + BN_GENCB *cb); + +int DSA_generate_key(DSA *a); +int i2d_DSAPublicKey(const DSA *a, unsigned char **pp); +int i2d_DSAPrivateKey(const DSA *a, unsigned char **pp); +int i2d_DSAparams(const DSA *a, unsigned char **pp); + +int DSAparams_print(BIO *bp, const DSA *x); +int DSA_print(BIO *bp, const DSA *x, int off); +# ifndef OPENSSL_NO_STDIO +int DSAparams_print_fp(FILE *fp, const DSA *x); +int DSA_print_fp(FILE *bp, const DSA *x, int off); +# endif + +# define DSS_prime_checks 64 +/* + * Primality test according to FIPS PUB 186-4, Appendix C.3. Since we only + * have one value here we set the number of checks to 64 which is the 128 bit + * security level that is the highest level and valid for creating a 3072 bit + * DSA key. + */ +# define DSA_is_prime(n, callback, cb_arg) \ + BN_is_prime(n, DSS_prime_checks, callback, NULL, cb_arg) + +# ifndef OPENSSL_NO_DH +/* + * Convert DSA structure (key or just parameters) into DH structure (be + * careful to avoid small subgroup attacks when using this!) + */ +DH *DSA_dup_DH(const DSA *r); +# endif + +# define EVP_PKEY_CTX_set_dsa_paramgen_bits(ctx, nbits) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DSA, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DSA_PARAMGEN_BITS, nbits, NULL) +# define EVP_PKEY_CTX_set_dsa_paramgen_q_bits(ctx, qbits) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DSA, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DSA_PARAMGEN_Q_BITS, qbits, NULL) +# define EVP_PKEY_CTX_set_dsa_paramgen_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DSA, EVP_PKEY_OP_PARAMGEN, \ + EVP_PKEY_CTRL_DSA_PARAMGEN_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTRL_DSA_PARAMGEN_BITS (EVP_PKEY_ALG_CTRL + 1) +# define EVP_PKEY_CTRL_DSA_PARAMGEN_Q_BITS (EVP_PKEY_ALG_CTRL + 2) +# define EVP_PKEY_CTRL_DSA_PARAMGEN_MD (EVP_PKEY_ALG_CTRL + 3) + +void DSA_get0_pqg(const DSA *d, + const BIGNUM **p, const BIGNUM **q, const BIGNUM **g); +int DSA_set0_pqg(DSA *d, BIGNUM *p, BIGNUM *q, BIGNUM *g); +void DSA_get0_key(const DSA *d, + const BIGNUM **pub_key, const BIGNUM **priv_key); +int DSA_set0_key(DSA *d, BIGNUM *pub_key, BIGNUM *priv_key); +const BIGNUM *DSA_get0_p(const DSA *d); +const BIGNUM *DSA_get0_q(const DSA *d); +const BIGNUM *DSA_get0_g(const DSA *d); +const BIGNUM *DSA_get0_pub_key(const DSA *d); +const BIGNUM *DSA_get0_priv_key(const DSA *d); +void DSA_clear_flags(DSA *d, int flags); +int DSA_test_flags(const DSA *d, int flags); +void DSA_set_flags(DSA *d, int flags); +ENGINE *DSA_get0_engine(DSA *d); + +DSA_METHOD *DSA_meth_new(const char *name, int flags); +void DSA_meth_free(DSA_METHOD *dsam); +DSA_METHOD *DSA_meth_dup(const DSA_METHOD *dsam); +const char *DSA_meth_get0_name(const DSA_METHOD *dsam); +int DSA_meth_set1_name(DSA_METHOD *dsam, const char *name); +int DSA_meth_get_flags(const DSA_METHOD *dsam); +int DSA_meth_set_flags(DSA_METHOD *dsam, int flags); +void *DSA_meth_get0_app_data(const DSA_METHOD *dsam); +int DSA_meth_set0_app_data(DSA_METHOD *dsam, void *app_data); +DSA_SIG *(*DSA_meth_get_sign(const DSA_METHOD *dsam)) + (const unsigned char *, int, DSA *); +int DSA_meth_set_sign(DSA_METHOD *dsam, + DSA_SIG *(*sign) (const unsigned char *, int, DSA *)); +int (*DSA_meth_get_sign_setup(const DSA_METHOD *dsam)) + (DSA *, BN_CTX *, BIGNUM **, BIGNUM **); +int DSA_meth_set_sign_setup(DSA_METHOD *dsam, + int (*sign_setup) (DSA *, BN_CTX *, BIGNUM **, BIGNUM **)); +int (*DSA_meth_get_verify(const DSA_METHOD *dsam)) + (const unsigned char *, int, DSA_SIG *, DSA *); +int DSA_meth_set_verify(DSA_METHOD *dsam, + int (*verify) (const unsigned char *, int, DSA_SIG *, DSA *)); +int (*DSA_meth_get_mod_exp(const DSA_METHOD *dsam)) + (DSA *, BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, + const BIGNUM *, const BIGNUM *, BN_CTX *, BN_MONT_CTX *); +int DSA_meth_set_mod_exp(DSA_METHOD *dsam, + int (*mod_exp) (DSA *, BIGNUM *, const BIGNUM *, const BIGNUM *, + const BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *, + BN_MONT_CTX *)); +int (*DSA_meth_get_bn_mod_exp(const DSA_METHOD *dsam)) + (DSA *, BIGNUM *, const BIGNUM *, const BIGNUM *, const BIGNUM *, + BN_CTX *, BN_MONT_CTX *); +int DSA_meth_set_bn_mod_exp(DSA_METHOD *dsam, + int (*bn_mod_exp) (DSA *, BIGNUM *, const BIGNUM *, const BIGNUM *, + const BIGNUM *, BN_CTX *, BN_MONT_CTX *)); +int (*DSA_meth_get_init(const DSA_METHOD *dsam))(DSA *); +int DSA_meth_set_init(DSA_METHOD *dsam, int (*init)(DSA *)); +int (*DSA_meth_get_finish(const DSA_METHOD *dsam)) (DSA *); +int DSA_meth_set_finish(DSA_METHOD *dsam, int (*finish) (DSA *)); +int (*DSA_meth_get_paramgen(const DSA_METHOD *dsam)) + (DSA *, int, const unsigned char *, int, int *, unsigned long *, + BN_GENCB *); +int DSA_meth_set_paramgen(DSA_METHOD *dsam, + int (*paramgen) (DSA *, int, const unsigned char *, int, int *, + unsigned long *, BN_GENCB *)); +int (*DSA_meth_get_keygen(const DSA_METHOD *dsam)) (DSA *); +int DSA_meth_set_keygen(DSA_METHOD *dsam, int (*keygen) (DSA *)); + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/dsaerr.h b/third_party/openssl/uos/sw_64/include/openssl/dsaerr.h new file mode 100644 index 00000000..495a1ac8 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/dsaerr.h @@ -0,0 +1,72 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DSAERR_H +# define HEADER_DSAERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_DSA + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_DSA_strings(void); + +/* + * DSA function codes. + */ +# define DSA_F_DSAPARAMS_PRINT 100 +# define DSA_F_DSAPARAMS_PRINT_FP 101 +# define DSA_F_DSA_BUILTIN_PARAMGEN 125 +# define DSA_F_DSA_BUILTIN_PARAMGEN2 126 +# define DSA_F_DSA_DO_SIGN 112 +# define DSA_F_DSA_DO_VERIFY 113 +# define DSA_F_DSA_METH_DUP 127 +# define DSA_F_DSA_METH_NEW 128 +# define DSA_F_DSA_METH_SET1_NAME 129 +# define DSA_F_DSA_NEW_METHOD 103 +# define DSA_F_DSA_PARAM_DECODE 119 +# define DSA_F_DSA_PRINT_FP 105 +# define DSA_F_DSA_PRIV_DECODE 115 +# define DSA_F_DSA_PRIV_ENCODE 116 +# define DSA_F_DSA_PUB_DECODE 117 +# define DSA_F_DSA_PUB_ENCODE 118 +# define DSA_F_DSA_SIGN 106 +# define DSA_F_DSA_SIGN_SETUP 107 +# define DSA_F_DSA_SIG_NEW 102 +# define DSA_F_OLD_DSA_PRIV_DECODE 122 +# define DSA_F_PKEY_DSA_CTRL 120 +# define DSA_F_PKEY_DSA_CTRL_STR 104 +# define DSA_F_PKEY_DSA_KEYGEN 121 + +/* + * DSA reason codes. + */ +# define DSA_R_BAD_Q_VALUE 102 +# define DSA_R_BN_DECODE_ERROR 108 +# define DSA_R_BN_ERROR 109 +# define DSA_R_DECODE_ERROR 104 +# define DSA_R_INVALID_DIGEST_TYPE 106 +# define DSA_R_INVALID_PARAMETERS 112 +# define DSA_R_MISSING_PARAMETERS 101 +# define DSA_R_MISSING_PRIVATE_KEY 111 +# define DSA_R_MODULUS_TOO_LARGE 103 +# define DSA_R_NO_PARAMETERS_SET 107 +# define DSA_R_PARAMETER_ENCODING_ERROR 105 +# define DSA_R_Q_NOT_PRIME 113 +# define DSA_R_SEED_LEN_SMALL 110 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/dtls1.h b/third_party/openssl/uos/sw_64/include/openssl/dtls1.h new file mode 100644 index 00000000..d55ca9c3 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/dtls1.h @@ -0,0 +1,55 @@ +/* + * Copyright 2005-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DTLS1_H +# define HEADER_DTLS1_H + +#ifdef __cplusplus +extern "C" { +#endif + +# define DTLS1_VERSION 0xFEFF +# define DTLS1_2_VERSION 0xFEFD +# define DTLS_MIN_VERSION DTLS1_VERSION +# define DTLS_MAX_VERSION DTLS1_2_VERSION +# define DTLS1_VERSION_MAJOR 0xFE + +# define DTLS1_BAD_VER 0x0100 + +/* Special value for method supporting multiple versions */ +# define DTLS_ANY_VERSION 0x1FFFF + +/* lengths of messages */ +/* + * Actually the max cookie length in DTLS is 255. But we can't change this now + * due to compatibility concerns. + */ +# define DTLS1_COOKIE_LENGTH 256 + +# define DTLS1_RT_HEADER_LENGTH 13 + +# define DTLS1_HM_HEADER_LENGTH 12 + +# define DTLS1_HM_BAD_FRAGMENT -2 +# define DTLS1_HM_FRAGMENT_RETRY -3 + +# define DTLS1_CCS_HEADER_LENGTH 1 + +# define DTLS1_AL_HEADER_LENGTH 2 + +/* Timeout multipliers */ +# define DTLS1_TMO_READ_COUNT 2 +# define DTLS1_TMO_WRITE_COUNT 2 + +# define DTLS1_TMO_ALERT_COUNT 12 + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/e_os2.h b/third_party/openssl/uos/sw_64/include/openssl/e_os2.h new file mode 100644 index 00000000..5c88e519 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/e_os2.h @@ -0,0 +1,301 @@ +/* + * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_E_OS2_H +# define HEADER_E_OS2_H + +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + * Detect operating systems. This probably needs completing. + * The result is that at least one OPENSSL_SYS_os macro should be defined. + * However, if none is defined, Unix is assumed. + **/ + +# define OPENSSL_SYS_UNIX + +/* --------------------- Microsoft operating systems ---------------------- */ + +/* + * Note that MSDOS actually denotes 32-bit environments running on top of + * MS-DOS, such as DJGPP one. + */ +# if defined(OPENSSL_SYS_MSDOS) +# undef OPENSSL_SYS_UNIX +# endif + +/* + * For 32 bit environment, there seems to be the CygWin environment and then + * all the others that try to do the same thing Microsoft does... + */ +/* + * UEFI lives here because it might be built with a Microsoft toolchain and + * we need to avoid the false positive match on Windows. + */ +# if defined(OPENSSL_SYS_UEFI) +# undef OPENSSL_SYS_UNIX +# elif defined(OPENSSL_SYS_UWIN) +# undef OPENSSL_SYS_UNIX +# define OPENSSL_SYS_WIN32_UWIN +# else +# if defined(__CYGWIN__) || defined(OPENSSL_SYS_CYGWIN) +# define OPENSSL_SYS_WIN32_CYGWIN +# else +# if defined(_WIN32) || defined(OPENSSL_SYS_WIN32) +# undef OPENSSL_SYS_UNIX +# if !defined(OPENSSL_SYS_WIN32) +# define OPENSSL_SYS_WIN32 +# endif +# endif +# if defined(_WIN64) || defined(OPENSSL_SYS_WIN64) +# undef OPENSSL_SYS_UNIX +# if !defined(OPENSSL_SYS_WIN64) +# define OPENSSL_SYS_WIN64 +# endif +# endif +# if defined(OPENSSL_SYS_WINNT) +# undef OPENSSL_SYS_UNIX +# endif +# if defined(OPENSSL_SYS_WINCE) +# undef OPENSSL_SYS_UNIX +# endif +# endif +# endif + +/* Anything that tries to look like Microsoft is "Windows" */ +# if defined(OPENSSL_SYS_WIN32) || defined(OPENSSL_SYS_WIN64) || defined(OPENSSL_SYS_WINNT) || defined(OPENSSL_SYS_WINCE) +# undef OPENSSL_SYS_UNIX +# define OPENSSL_SYS_WINDOWS +# ifndef OPENSSL_SYS_MSDOS +# define OPENSSL_SYS_MSDOS +# endif +# endif + +/* + * DLL settings. This part is a bit tough, because it's up to the + * application implementor how he or she will link the application, so it + * requires some macro to be used. + */ +# ifdef OPENSSL_SYS_WINDOWS +# ifndef OPENSSL_OPT_WINDLL +# if defined(_WINDLL) /* This is used when building OpenSSL to + * indicate that DLL linkage should be used */ +# define OPENSSL_OPT_WINDLL +# endif +# endif +# endif + +/* ------------------------------- OpenVMS -------------------------------- */ +# if defined(__VMS) || defined(VMS) || defined(OPENSSL_SYS_VMS) +# if !defined(OPENSSL_SYS_VMS) +# undef OPENSSL_SYS_UNIX +# endif +# define OPENSSL_SYS_VMS +# if defined(__DECC) +# define OPENSSL_SYS_VMS_DECC +# elif defined(__DECCXX) +# define OPENSSL_SYS_VMS_DECC +# define OPENSSL_SYS_VMS_DECCXX +# else +# define OPENSSL_SYS_VMS_NODECC +# endif +# endif + +/* -------------------------------- Unix ---------------------------------- */ +# ifdef OPENSSL_SYS_UNIX +# if defined(linux) || defined(__linux__) && !defined(OPENSSL_SYS_LINUX) +# define OPENSSL_SYS_LINUX +# endif +# if defined(_AIX) && !defined(OPENSSL_SYS_AIX) +# define OPENSSL_SYS_AIX +# endif +# endif + +/* -------------------------------- VOS ----------------------------------- */ +# if defined(__VOS__) && !defined(OPENSSL_SYS_VOS) +# define OPENSSL_SYS_VOS +# ifdef __HPPA__ +# define OPENSSL_SYS_VOS_HPPA +# endif +# ifdef __IA32__ +# define OPENSSL_SYS_VOS_IA32 +# endif +# endif + +/** + * That's it for OS-specific stuff + *****************************************************************************/ + +/* Specials for I/O an exit */ +# ifdef OPENSSL_SYS_MSDOS +# define OPENSSL_UNISTD_IO +# define OPENSSL_DECLARE_EXIT extern void exit(int); +# else +# define OPENSSL_UNISTD_IO OPENSSL_UNISTD +# define OPENSSL_DECLARE_EXIT /* declared in unistd.h */ +# endif + +/*- + * OPENSSL_EXTERN is normally used to declare a symbol with possible extra + * attributes to handle its presence in a shared library. + * OPENSSL_EXPORT is used to define a symbol with extra possible attributes + * to make it visible in a shared library. + * Care needs to be taken when a header file is used both to declare and + * define symbols. Basically, for any library that exports some global + * variables, the following code must be present in the header file that + * declares them, before OPENSSL_EXTERN is used: + * + * #ifdef SOME_BUILD_FLAG_MACRO + * # undef OPENSSL_EXTERN + * # define OPENSSL_EXTERN OPENSSL_EXPORT + * #endif + * + * The default is to have OPENSSL_EXPORT and OPENSSL_EXTERN + * have some generally sensible values. + */ + +# if defined(OPENSSL_SYS_WINDOWS) && defined(OPENSSL_OPT_WINDLL) +# define OPENSSL_EXPORT extern __declspec(dllexport) +# define OPENSSL_EXTERN extern __declspec(dllimport) +# else +# define OPENSSL_EXPORT extern +# define OPENSSL_EXTERN extern +# endif + +/*- + * Macros to allow global variables to be reached through function calls when + * required (if a shared library version requires it, for example. + * The way it's done allows definitions like this: + * + * // in foobar.c + * OPENSSL_IMPLEMENT_GLOBAL(int,foobar,0) + * // in foobar.h + * OPENSSL_DECLARE_GLOBAL(int,foobar); + * #define foobar OPENSSL_GLOBAL_REF(foobar) + */ +# ifdef OPENSSL_EXPORT_VAR_AS_FUNCTION +# define OPENSSL_IMPLEMENT_GLOBAL(type,name,value) \ + type *_shadow_##name(void) \ + { static type _hide_##name=value; return &_hide_##name; } +# define OPENSSL_DECLARE_GLOBAL(type,name) type *_shadow_##name(void) +# define OPENSSL_GLOBAL_REF(name) (*(_shadow_##name())) +# else +# define OPENSSL_IMPLEMENT_GLOBAL(type,name,value) type _shadow_##name=value; +# define OPENSSL_DECLARE_GLOBAL(type,name) OPENSSL_EXPORT type _shadow_##name +# define OPENSSL_GLOBAL_REF(name) _shadow_##name +# endif + +# ifdef _WIN32 +# ifdef _WIN64 +# define ossl_ssize_t __int64 +# define OSSL_SSIZE_MAX _I64_MAX +# else +# define ossl_ssize_t int +# define OSSL_SSIZE_MAX INT_MAX +# endif +# endif + +# if defined(OPENSSL_SYS_UEFI) && !defined(ossl_ssize_t) +# define ossl_ssize_t INTN +# define OSSL_SSIZE_MAX MAX_INTN +# endif + +# ifndef ossl_ssize_t +# define ossl_ssize_t ssize_t +# if defined(SSIZE_MAX) +# define OSSL_SSIZE_MAX SSIZE_MAX +# elif defined(_POSIX_SSIZE_MAX) +# define OSSL_SSIZE_MAX _POSIX_SSIZE_MAX +# else +# define OSSL_SSIZE_MAX ((ssize_t)(SIZE_MAX>>1)) +# endif +# endif + +# ifdef DEBUG_UNUSED +# define __owur __attribute__((__warn_unused_result__)) +# else +# define __owur +# endif + +/* Standard integer types */ +# if defined(OPENSSL_SYS_UEFI) +typedef INT8 int8_t; +typedef UINT8 uint8_t; +typedef INT16 int16_t; +typedef UINT16 uint16_t; +typedef INT32 int32_t; +typedef UINT32 uint32_t; +typedef INT64 int64_t; +typedef UINT64 uint64_t; +# elif (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + defined(__osf__) || defined(__sgi) || defined(__hpux) || \ + defined(OPENSSL_SYS_VMS) || defined (__OpenBSD__) +# include +# elif defined(_MSC_VER) && _MSC_VER<1600 +/* + * minimally required typdefs for systems not supporting inttypes.h or + * stdint.h: currently just older VC++ + */ +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; +typedef unsigned short uint16_t; +typedef int int32_t; +typedef unsigned int uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +# else +# include +# endif + +/* ossl_inline: portable inline definition usable in public headers */ +# if !defined(inline) && !defined(__cplusplus) +# if defined(__STDC_VERSION__) && __STDC_VERSION__>=199901L + /* just use inline */ +# define ossl_inline inline +# elif defined(__GNUC__) && __GNUC__>=2 +# define ossl_inline __inline__ +# elif defined(_MSC_VER) + /* + * Visual Studio: inline is available in C++ only, however + * __inline is available for C, see + * http://msdn.microsoft.com/en-us/library/z8y1yy88.aspx + */ +# define ossl_inline __inline +# else +# define ossl_inline +# endif +# else +# define ossl_inline inline +# endif + +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && \ + !defined(__cplusplus) +# define ossl_noreturn _Noreturn +# elif defined(__GNUC__) && __GNUC__ >= 2 +# define ossl_noreturn __attribute__((noreturn)) +# else +# define ossl_noreturn +# endif + +/* ossl_unused: portable unused attribute for use in public headers */ +# if defined(__GNUC__) +# define ossl_unused __attribute__((unused)) +# else +# define ossl_unused +# endif + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ebcdic.h b/third_party/openssl/uos/sw_64/include/openssl/ebcdic.h new file mode 100644 index 00000000..aa012855 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ebcdic.h @@ -0,0 +1,33 @@ +/* + * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_EBCDIC_H +# define HEADER_EBCDIC_H + +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Avoid name clashes with other applications */ +# define os_toascii _openssl_os_toascii +# define os_toebcdic _openssl_os_toebcdic +# define ebcdic2ascii _openssl_ebcdic2ascii +# define ascii2ebcdic _openssl_ascii2ebcdic + +extern const unsigned char os_toascii[256]; +extern const unsigned char os_toebcdic[256]; +void *ebcdic2ascii(void *dest, const void *srce, size_t count); +void *ascii2ebcdic(void *dest, const void *srce, size_t count); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ec.h b/third_party/openssl/uos/sw_64/include/openssl/ec.h new file mode 100644 index 00000000..24baf53c --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ec.h @@ -0,0 +1,1484 @@ +/* + * Copyright 2002-2021 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_EC_H +# define HEADER_EC_H + +# include + +# ifndef OPENSSL_NO_EC +# include +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif +# include +# ifdef __cplusplus +extern "C" { +# endif + +# ifndef OPENSSL_ECC_MAX_FIELD_BITS +# define OPENSSL_ECC_MAX_FIELD_BITS 661 +# endif + +/** Enum for the point conversion form as defined in X9.62 (ECDSA) + * for the encoding of a elliptic curve point (x,y) */ +typedef enum { + /** the point is encoded as z||x, where the octet z specifies + * which solution of the quadratic equation y is */ + POINT_CONVERSION_COMPRESSED = 2, + /** the point is encoded as z||x||y, where z is the octet 0x04 */ + POINT_CONVERSION_UNCOMPRESSED = 4, + /** the point is encoded as z||x||y, where the octet z specifies + * which solution of the quadratic equation y is */ + POINT_CONVERSION_HYBRID = 6 +} point_conversion_form_t; + +typedef struct ec_method_st EC_METHOD; +typedef struct ec_group_st EC_GROUP; +typedef struct ec_point_st EC_POINT; +typedef struct ecpk_parameters_st ECPKPARAMETERS; +typedef struct ec_parameters_st ECPARAMETERS; + +/********************************************************************/ +/* EC_METHODs for curves over GF(p) */ +/********************************************************************/ + +/** Returns the basic GFp ec methods which provides the basis for the + * optimized methods. + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_simple_method(void); + +/** Returns GFp methods using montgomery multiplication. + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_mont_method(void); + +/** Returns GFp methods using optimized methods for NIST recommended curves + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_nist_method(void); + +# ifndef OPENSSL_NO_EC_NISTP_64_GCC_128 +/** Returns 64-bit optimized methods for nistp224 + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_nistp224_method(void); + +/** Returns 64-bit optimized methods for nistp256 + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_nistp256_method(void); + +/** Returns 64-bit optimized methods for nistp521 + * \return EC_METHOD object + */ +const EC_METHOD *EC_GFp_nistp521_method(void); +# endif + +# ifndef OPENSSL_NO_EC2M +/********************************************************************/ +/* EC_METHOD for curves over GF(2^m) */ +/********************************************************************/ + +/** Returns the basic GF2m ec method + * \return EC_METHOD object + */ +const EC_METHOD *EC_GF2m_simple_method(void); + +# endif + +/********************************************************************/ +/* EC_GROUP functions */ +/********************************************************************/ + +/** Creates a new EC_GROUP object + * \param meth EC_METHOD to use + * \return newly created EC_GROUP object or NULL in case of an error. + */ +EC_GROUP *EC_GROUP_new(const EC_METHOD *meth); + +/** Frees a EC_GROUP object + * \param group EC_GROUP object to be freed. + */ +void EC_GROUP_free(EC_GROUP *group); + +/** Clears and frees a EC_GROUP object + * \param group EC_GROUP object to be cleared and freed. + */ +void EC_GROUP_clear_free(EC_GROUP *group); + +/** Copies EC_GROUP objects. Note: both EC_GROUPs must use the same EC_METHOD. + * \param dst destination EC_GROUP object + * \param src source EC_GROUP object + * \return 1 on success and 0 if an error occurred. + */ +int EC_GROUP_copy(EC_GROUP *dst, const EC_GROUP *src); + +/** Creates a new EC_GROUP object and copies the copies the content + * form src to the newly created EC_KEY object + * \param src source EC_GROUP object + * \return newly created EC_GROUP object or NULL in case of an error. + */ +EC_GROUP *EC_GROUP_dup(const EC_GROUP *src); + +/** Returns the EC_METHOD of the EC_GROUP object. + * \param group EC_GROUP object + * \return EC_METHOD used in this EC_GROUP object. + */ +const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group); + +/** Returns the field type of the EC_METHOD. + * \param meth EC_METHOD object + * \return NID of the underlying field type OID. + */ +int EC_METHOD_get_field_type(const EC_METHOD *meth); + +/** Sets the generator and its order/cofactor of a EC_GROUP object. + * \param group EC_GROUP object + * \param generator EC_POINT object with the generator. + * \param order the order of the group generated by the generator. + * \param cofactor the index of the sub-group generated by the generator + * in the group of all points on the elliptic curve. + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, + const BIGNUM *order, const BIGNUM *cofactor); + +/** Returns the generator of a EC_GROUP object. + * \param group EC_GROUP object + * \return the currently used generator (possibly NULL). + */ +const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group); + +/** Returns the montgomery data for order(Generator) + * \param group EC_GROUP object + * \return the currently used montgomery data (possibly NULL). +*/ +BN_MONT_CTX *EC_GROUP_get_mont_data(const EC_GROUP *group); + +/** Gets the order of a EC_GROUP + * \param group EC_GROUP object + * \param order BIGNUM to which the order is copied + * \param ctx unused + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx); + +/** Gets the order of an EC_GROUP + * \param group EC_GROUP object + * \return the group order + */ +const BIGNUM *EC_GROUP_get0_order(const EC_GROUP *group); + +/** Gets the number of bits of the order of an EC_GROUP + * \param group EC_GROUP object + * \return number of bits of group order. + */ +int EC_GROUP_order_bits(const EC_GROUP *group); + +/** Gets the cofactor of a EC_GROUP + * \param group EC_GROUP object + * \param cofactor BIGNUM to which the cofactor is copied + * \param ctx unused + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, + BN_CTX *ctx); + +/** Gets the cofactor of an EC_GROUP + * \param group EC_GROUP object + * \return the group cofactor + */ +const BIGNUM *EC_GROUP_get0_cofactor(const EC_GROUP *group); + +/** Sets the name of a EC_GROUP object + * \param group EC_GROUP object + * \param nid NID of the curve name OID + */ +void EC_GROUP_set_curve_name(EC_GROUP *group, int nid); + +/** Returns the curve name of a EC_GROUP object + * \param group EC_GROUP object + * \return NID of the curve name OID or 0 if not set. + */ +int EC_GROUP_get_curve_name(const EC_GROUP *group); + +void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag); +int EC_GROUP_get_asn1_flag(const EC_GROUP *group); + +void EC_GROUP_set_point_conversion_form(EC_GROUP *group, + point_conversion_form_t form); +point_conversion_form_t EC_GROUP_get_point_conversion_form(const EC_GROUP *); + +unsigned char *EC_GROUP_get0_seed(const EC_GROUP *x); +size_t EC_GROUP_get_seed_len(const EC_GROUP *); +size_t EC_GROUP_set_seed(EC_GROUP *, const unsigned char *, size_t len); + +/** Sets the parameters of a ec curve defined by y^2 = x^3 + a*x + b (for GFp) + * or y^2 + x*y = x^3 + a*x^2 + b (for GF2m) + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM with parameter a of the equation + * \param b BIGNUM with parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, + const BIGNUM *b, BN_CTX *ctx); + +/** Gets the parameters of the ec curve defined by y^2 = x^3 + a*x + b (for GFp) + * or y^2 + x*y = x^3 + a*x^2 + b (for GF2m) + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM for parameter a of the equation + * \param b BIGNUM for parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_get_curve(const EC_GROUP *group, BIGNUM *p, BIGNUM *a, BIGNUM *b, + BN_CTX *ctx); + +/** Sets the parameters of an ec curve. Synonym for EC_GROUP_set_curve + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM with parameter a of the equation + * \param b BIGNUM with parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_GROUP_set_curve_GFp(EC_GROUP *group, const BIGNUM *p, + const BIGNUM *a, const BIGNUM *b, + BN_CTX *ctx)) + +/** Gets the parameters of an ec curve. Synonym for EC_GROUP_get_curve + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM for parameter a of the equation + * \param b BIGNUM for parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *p, + BIGNUM *a, BIGNUM *b, + BN_CTX *ctx)) + +# ifndef OPENSSL_NO_EC2M +/** Sets the parameter of an ec curve. Synonym for EC_GROUP_set_curve + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM with parameter a of the equation + * \param b BIGNUM with parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_GROUP_set_curve_GF2m(EC_GROUP *group, const BIGNUM *p, + const BIGNUM *a, const BIGNUM *b, + BN_CTX *ctx)) + +/** Gets the parameters of an ec curve. Synonym for EC_GROUP_get_curve + * \param group EC_GROUP object + * \param p BIGNUM with the prime number (GFp) or the polynomial + * defining the underlying field (GF2m) + * \param a BIGNUM for parameter a of the equation + * \param b BIGNUM for parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_GROUP_get_curve_GF2m(const EC_GROUP *group, BIGNUM *p, + BIGNUM *a, BIGNUM *b, + BN_CTX *ctx)) +# endif +/** Returns the number of bits needed to represent a field element + * \param group EC_GROUP object + * \return number of bits needed to represent a field element + */ +int EC_GROUP_get_degree(const EC_GROUP *group); + +/** Checks whether the parameter in the EC_GROUP define a valid ec group + * \param group EC_GROUP object + * \param ctx BN_CTX object (optional) + * \return 1 if group is a valid ec group and 0 otherwise + */ +int EC_GROUP_check(const EC_GROUP *group, BN_CTX *ctx); + +/** Checks whether the discriminant of the elliptic curve is zero or not + * \param group EC_GROUP object + * \param ctx BN_CTX object (optional) + * \return 1 if the discriminant is not zero and 0 otherwise + */ +int EC_GROUP_check_discriminant(const EC_GROUP *group, BN_CTX *ctx); + +/** Compares two EC_GROUP objects + * \param a first EC_GROUP object + * \param b second EC_GROUP object + * \param ctx BN_CTX object (optional) + * \return 0 if the groups are equal, 1 if not, or -1 on error + */ +int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ctx); + +/* + * EC_GROUP_new_GF*() calls EC_GROUP_new() and EC_GROUP_set_GF*() after + * choosing an appropriate EC_METHOD + */ + +/** Creates a new EC_GROUP object with the specified parameters defined + * over GFp (defined by the equation y^2 = x^3 + a*x + b) + * \param p BIGNUM with the prime number + * \param a BIGNUM with the parameter a of the equation + * \param b BIGNUM with the parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return newly created EC_GROUP object with the specified parameters + */ +EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, + const BIGNUM *b, BN_CTX *ctx); +# ifndef OPENSSL_NO_EC2M +/** Creates a new EC_GROUP object with the specified parameters defined + * over GF2m (defined by the equation y^2 + x*y = x^3 + a*x^2 + b) + * \param p BIGNUM with the polynomial defining the underlying field + * \param a BIGNUM with the parameter a of the equation + * \param b BIGNUM with the parameter b of the equation + * \param ctx BN_CTX object (optional) + * \return newly created EC_GROUP object with the specified parameters + */ +EC_GROUP *EC_GROUP_new_curve_GF2m(const BIGNUM *p, const BIGNUM *a, + const BIGNUM *b, BN_CTX *ctx); +# endif + +/** Creates a EC_GROUP object with a curve specified by a NID + * \param nid NID of the OID of the curve name + * \return newly created EC_GROUP object with specified curve or NULL + * if an error occurred + */ +EC_GROUP *EC_GROUP_new_by_curve_name(int nid); + +/** Creates a new EC_GROUP object from an ECPARAMETERS object + * \param params pointer to the ECPARAMETERS object + * \return newly created EC_GROUP object with specified curve or NULL + * if an error occurred + */ +EC_GROUP *EC_GROUP_new_from_ecparameters(const ECPARAMETERS *params); + +/** Creates an ECPARAMETERS object for the given EC_GROUP object. + * \param group pointer to the EC_GROUP object + * \param params pointer to an existing ECPARAMETERS object or NULL + * \return pointer to the new ECPARAMETERS object or NULL + * if an error occurred. + */ +ECPARAMETERS *EC_GROUP_get_ecparameters(const EC_GROUP *group, + ECPARAMETERS *params); + +/** Creates a new EC_GROUP object from an ECPKPARAMETERS object + * \param params pointer to an existing ECPKPARAMETERS object, or NULL + * \return newly created EC_GROUP object with specified curve, or NULL + * if an error occurred + */ +EC_GROUP *EC_GROUP_new_from_ecpkparameters(const ECPKPARAMETERS *params); + +/** Creates an ECPKPARAMETERS object for the given EC_GROUP object. + * \param group pointer to the EC_GROUP object + * \param params pointer to an existing ECPKPARAMETERS object or NULL + * \return pointer to the new ECPKPARAMETERS object or NULL + * if an error occurred. + */ +ECPKPARAMETERS *EC_GROUP_get_ecpkparameters(const EC_GROUP *group, + ECPKPARAMETERS *params); + +/********************************************************************/ +/* handling of internal curves */ +/********************************************************************/ + +typedef struct { + int nid; + const char *comment; +} EC_builtin_curve; + +/* + * EC_builtin_curves(EC_builtin_curve *r, size_t size) returns number of all + * available curves or zero if a error occurred. In case r is not zero, + * nitems EC_builtin_curve structures are filled with the data of the first + * nitems internal groups + */ +size_t EC_get_builtin_curves(EC_builtin_curve *r, size_t nitems); + +const char *EC_curve_nid2nist(int nid); +int EC_curve_nist2nid(const char *name); + +/********************************************************************/ +/* EC_POINT functions */ +/********************************************************************/ + +/** Creates a new EC_POINT object for the specified EC_GROUP + * \param group EC_GROUP the underlying EC_GROUP object + * \return newly created EC_POINT object or NULL if an error occurred + */ +EC_POINT *EC_POINT_new(const EC_GROUP *group); + +/** Frees a EC_POINT object + * \param point EC_POINT object to be freed + */ +void EC_POINT_free(EC_POINT *point); + +/** Clears and frees a EC_POINT object + * \param point EC_POINT object to be cleared and freed + */ +void EC_POINT_clear_free(EC_POINT *point); + +/** Copies EC_POINT object + * \param dst destination EC_POINT object + * \param src source EC_POINT object + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_copy(EC_POINT *dst, const EC_POINT *src); + +/** Creates a new EC_POINT object and copies the content of the supplied + * EC_POINT + * \param src source EC_POINT object + * \param group underlying the EC_GROUP object + * \return newly created EC_POINT object or NULL if an error occurred + */ +EC_POINT *EC_POINT_dup(const EC_POINT *src, const EC_GROUP *group); + +/** Returns the EC_METHOD used in EC_POINT object + * \param point EC_POINT object + * \return the EC_METHOD used + */ +const EC_METHOD *EC_POINT_method_of(const EC_POINT *point); + +/** Sets a point to infinity (neutral element) + * \param group underlying EC_GROUP object + * \param point EC_POINT to set to infinity + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point); + +/** Sets the jacobian projective coordinates of a EC_POINT over GFp + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with the x-coordinate + * \param y BIGNUM with the y-coordinate + * \param z BIGNUM with the z-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_set_Jprojective_coordinates_GFp(const EC_GROUP *group, + EC_POINT *p, const BIGNUM *x, + const BIGNUM *y, const BIGNUM *z, + BN_CTX *ctx); + +/** Gets the jacobian projective coordinates of a EC_POINT over GFp + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM for the x-coordinate + * \param y BIGNUM for the y-coordinate + * \param z BIGNUM for the z-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_get_Jprojective_coordinates_GFp(const EC_GROUP *group, + const EC_POINT *p, BIGNUM *x, + BIGNUM *y, BIGNUM *z, + BN_CTX *ctx); + +/** Sets the affine coordinates of an EC_POINT + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with the x-coordinate + * \param y BIGNUM with the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_set_affine_coordinates(const EC_GROUP *group, EC_POINT *p, + const BIGNUM *x, const BIGNUM *y, + BN_CTX *ctx); + +/** Gets the affine coordinates of an EC_POINT. + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM for the x-coordinate + * \param y BIGNUM for the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *p, + BIGNUM *x, BIGNUM *y, BN_CTX *ctx); + +/** Sets the affine coordinates of an EC_POINT. A synonym of + * EC_POINT_set_affine_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with the x-coordinate + * \param y BIGNUM with the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, + EC_POINT *p, + const BIGNUM *x, + const BIGNUM *y, + BN_CTX *ctx)) + +/** Gets the affine coordinates of an EC_POINT. A synonym of + * EC_POINT_get_affine_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM for the x-coordinate + * \param y BIGNUM for the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group, + const EC_POINT *p, + BIGNUM *x, + BIGNUM *y, + BN_CTX *ctx)) + +/** Sets the x9.62 compressed coordinates of a EC_POINT + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with x-coordinate + * \param y_bit integer with the y-Bit (either 0 or 1) + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_set_compressed_coordinates(const EC_GROUP *group, EC_POINT *p, + const BIGNUM *x, int y_bit, + BN_CTX *ctx); + +/** Sets the x9.62 compressed coordinates of a EC_POINT. A synonym of + * EC_POINT_set_compressed_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with x-coordinate + * \param y_bit integer with the y-Bit (either 0 or 1) + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, + EC_POINT *p, + const BIGNUM *x, + int y_bit, + BN_CTX *ctx)) +# ifndef OPENSSL_NO_EC2M +/** Sets the affine coordinates of an EC_POINT. A synonym of + * EC_POINT_set_affine_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with the x-coordinate + * \param y BIGNUM with the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_set_affine_coordinates_GF2m(const EC_GROUP *group, + EC_POINT *p, + const BIGNUM *x, + const BIGNUM *y, + BN_CTX *ctx)) + +/** Gets the affine coordinates of an EC_POINT. A synonym of + * EC_POINT_get_affine_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM for the x-coordinate + * \param y BIGNUM for the y-coordinate + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_get_affine_coordinates_GF2m(const EC_GROUP *group, + const EC_POINT *p, + BIGNUM *x, + BIGNUM *y, + BN_CTX *ctx)) + +/** Sets the x9.62 compressed coordinates of a EC_POINT. A synonym of + * EC_POINT_set_compressed_coordinates + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param x BIGNUM with x-coordinate + * \param y_bit integer with the y-Bit (either 0 or 1) + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +DEPRECATEDIN_1_2_0(int EC_POINT_set_compressed_coordinates_GF2m(const EC_GROUP *group, + EC_POINT *p, + const BIGNUM *x, + int y_bit, + BN_CTX *ctx)) +# endif +/** Encodes a EC_POINT object to a octet string + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param form point conversion form + * \param buf memory buffer for the result. If NULL the function returns + * required buffer size. + * \param len length of the memory buffer + * \param ctx BN_CTX object (optional) + * \return the length of the encoded octet string or 0 if an error occurred + */ +size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *p, + point_conversion_form_t form, + unsigned char *buf, size_t len, BN_CTX *ctx); + +/** Decodes a EC_POINT from a octet string + * \param group underlying EC_GROUP object + * \param p EC_POINT object + * \param buf memory buffer with the encoded ec point + * \param len length of the encoded ec point + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *p, + const unsigned char *buf, size_t len, BN_CTX *ctx); + +/** Encodes an EC_POINT object to an allocated octet string + * \param group underlying EC_GROUP object + * \param point EC_POINT object + * \param form point conversion form + * \param pbuf returns pointer to allocated buffer + * \param ctx BN_CTX object (optional) + * \return the length of the encoded octet string or 0 if an error occurred + */ +size_t EC_POINT_point2buf(const EC_GROUP *group, const EC_POINT *point, + point_conversion_form_t form, + unsigned char **pbuf, BN_CTX *ctx); + +/* other interfaces to point2oct/oct2point: */ +BIGNUM *EC_POINT_point2bn(const EC_GROUP *, const EC_POINT *, + point_conversion_form_t form, BIGNUM *, BN_CTX *); +EC_POINT *EC_POINT_bn2point(const EC_GROUP *, const BIGNUM *, + EC_POINT *, BN_CTX *); +char *EC_POINT_point2hex(const EC_GROUP *, const EC_POINT *, + point_conversion_form_t form, BN_CTX *); +EC_POINT *EC_POINT_hex2point(const EC_GROUP *, const char *, + EC_POINT *, BN_CTX *); + +/********************************************************************/ +/* functions for doing EC_POINT arithmetic */ +/********************************************************************/ + +/** Computes the sum of two EC_POINT + * \param group underlying EC_GROUP object + * \param r EC_POINT object for the result (r = a + b) + * \param a EC_POINT object with the first summand + * \param b EC_POINT object with the second summand + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, + const EC_POINT *b, BN_CTX *ctx); + +/** Computes the double of a EC_POINT + * \param group underlying EC_GROUP object + * \param r EC_POINT object for the result (r = 2 * a) + * \param a EC_POINT object + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, + BN_CTX *ctx); + +/** Computes the inverse of a EC_POINT + * \param group underlying EC_GROUP object + * \param a EC_POINT object to be inverted (it's used for the result as well) + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx); + +/** Checks whether the point is the neutral element of the group + * \param group the underlying EC_GROUP object + * \param p EC_POINT object + * \return 1 if the point is the neutral element and 0 otherwise + */ +int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *p); + +/** Checks whether the point is on the curve + * \param group underlying EC_GROUP object + * \param point EC_POINT object to check + * \param ctx BN_CTX object (optional) + * \return 1 if the point is on the curve, 0 if not, or -1 on error + */ +int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, + BN_CTX *ctx); + +/** Compares two EC_POINTs + * \param group underlying EC_GROUP object + * \param a first EC_POINT object + * \param b second EC_POINT object + * \param ctx BN_CTX object (optional) + * \return 1 if the points are not equal, 0 if they are, or -1 on error + */ +int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, + BN_CTX *ctx); + +int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx); +int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, + EC_POINT *points[], BN_CTX *ctx); + +/** Computes r = generator * n + sum_{i=0}^{num-1} p[i] * m[i] + * \param group underlying EC_GROUP object + * \param r EC_POINT object for the result + * \param n BIGNUM with the multiplier for the group generator (optional) + * \param num number further summands + * \param p array of size num of EC_POINT objects + * \param m array of size num of BIGNUM objects + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINTs_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, + size_t num, const EC_POINT *p[], const BIGNUM *m[], + BN_CTX *ctx); + +/** Computes r = generator * n + q * m + * \param group underlying EC_GROUP object + * \param r EC_POINT object for the result + * \param n BIGNUM with the multiplier for the group generator (optional) + * \param q EC_POINT object with the first factor of the second summand + * \param m BIGNUM with the second factor of the second summand + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *n, + const EC_POINT *q, const BIGNUM *m, BN_CTX *ctx); + +/** Stores multiples of generator for faster point multiplication + * \param group EC_GROUP object + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ +int EC_GROUP_precompute_mult(EC_GROUP *group, BN_CTX *ctx); + +/** Reports whether a precomputation has been done + * \param group EC_GROUP object + * \return 1 if a pre-computation has been done and 0 otherwise + */ +int EC_GROUP_have_precompute_mult(const EC_GROUP *group); + +/********************************************************************/ +/* ASN1 stuff */ +/********************************************************************/ + +DECLARE_ASN1_ITEM(ECPKPARAMETERS) +DECLARE_ASN1_ALLOC_FUNCTIONS(ECPKPARAMETERS) +DECLARE_ASN1_ITEM(ECPARAMETERS) +DECLARE_ASN1_ALLOC_FUNCTIONS(ECPARAMETERS) + +/* + * EC_GROUP_get_basis_type() returns the NID of the basis type used to + * represent the field elements + */ +int EC_GROUP_get_basis_type(const EC_GROUP *); +# ifndef OPENSSL_NO_EC2M +int EC_GROUP_get_trinomial_basis(const EC_GROUP *, unsigned int *k); +int EC_GROUP_get_pentanomial_basis(const EC_GROUP *, unsigned int *k1, + unsigned int *k2, unsigned int *k3); +# endif + +# define OPENSSL_EC_EXPLICIT_CURVE 0x000 +# define OPENSSL_EC_NAMED_CURVE 0x001 + +EC_GROUP *d2i_ECPKParameters(EC_GROUP **, const unsigned char **in, long len); +int i2d_ECPKParameters(const EC_GROUP *, unsigned char **out); + +# define d2i_ECPKParameters_bio(bp,x) \ + ASN1_d2i_bio_of(EC_GROUP, NULL, d2i_ECPKParameters, bp, x) +# define i2d_ECPKParameters_bio(bp,x) \ + ASN1_i2d_bio_of_const(EC_GROUP, i2d_ECPKParameters, bp, x) +# define d2i_ECPKParameters_fp(fp,x) \ + (EC_GROUP *)ASN1_d2i_fp(NULL, (d2i_of_void *)d2i_ECPKParameters, (fp), \ + (void **)(x)) +# define i2d_ECPKParameters_fp(fp,x) \ + ASN1_i2d_fp((i2d_of_void *)i2d_ECPKParameters, (fp), (void *)(x)) + +int ECPKParameters_print(BIO *bp, const EC_GROUP *x, int off); +# ifndef OPENSSL_NO_STDIO +int ECPKParameters_print_fp(FILE *fp, const EC_GROUP *x, int off); +# endif + +/********************************************************************/ +/* EC_KEY functions */ +/********************************************************************/ + +/* some values for the encoding_flag */ +# define EC_PKEY_NO_PARAMETERS 0x001 +# define EC_PKEY_NO_PUBKEY 0x002 + +/* some values for the flags field */ +# define EC_FLAG_NON_FIPS_ALLOW 0x1 +# define EC_FLAG_FIPS_CHECKED 0x2 +# define EC_FLAG_COFACTOR_ECDH 0x1000 + +/** Creates a new EC_KEY object. + * \return EC_KEY object or NULL if an error occurred. + */ +EC_KEY *EC_KEY_new(void); + +int EC_KEY_get_flags(const EC_KEY *key); + +void EC_KEY_set_flags(EC_KEY *key, int flags); + +void EC_KEY_clear_flags(EC_KEY *key, int flags); + +int EC_KEY_decoded_from_explicit_params(const EC_KEY *key); + +/** Creates a new EC_KEY object using a named curve as underlying + * EC_GROUP object. + * \param nid NID of the named curve. + * \return EC_KEY object or NULL if an error occurred. + */ +EC_KEY *EC_KEY_new_by_curve_name(int nid); + +/** Frees a EC_KEY object. + * \param key EC_KEY object to be freed. + */ +void EC_KEY_free(EC_KEY *key); + +/** Copies a EC_KEY object. + * \param dst destination EC_KEY object + * \param src src EC_KEY object + * \return dst or NULL if an error occurred. + */ +EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src); + +/** Creates a new EC_KEY object and copies the content from src to it. + * \param src the source EC_KEY object + * \return newly created EC_KEY object or NULL if an error occurred. + */ +EC_KEY *EC_KEY_dup(const EC_KEY *src); + +/** Increases the internal reference count of a EC_KEY object. + * \param key EC_KEY object + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_up_ref(EC_KEY *key); + +/** Returns the ENGINE object of a EC_KEY object + * \param eckey EC_KEY object + * \return the ENGINE object (possibly NULL). + */ +ENGINE *EC_KEY_get0_engine(const EC_KEY *eckey); + +/** Returns the EC_GROUP object of a EC_KEY object + * \param key EC_KEY object + * \return the EC_GROUP object (possibly NULL). + */ +const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key); + +/** Sets the EC_GROUP of a EC_KEY object. + * \param key EC_KEY object + * \param group EC_GROUP to use in the EC_KEY object (note: the EC_KEY + * object will use an own copy of the EC_GROUP). + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group); + +/** Returns the private key of a EC_KEY object. + * \param key EC_KEY object + * \return a BIGNUM with the private key (possibly NULL). + */ +const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key); + +/** Sets the private key of a EC_KEY object. + * \param key EC_KEY object + * \param prv BIGNUM with the private key (note: the EC_KEY object + * will use an own copy of the BIGNUM). + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv); + +/** Returns the public key of a EC_KEY object. + * \param key the EC_KEY object + * \return a EC_POINT object with the public key (possibly NULL) + */ +const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key); + +/** Sets the public key of a EC_KEY object. + * \param key EC_KEY object + * \param pub EC_POINT object with the public key (note: the EC_KEY object + * will use an own copy of the EC_POINT object). + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub); + +unsigned EC_KEY_get_enc_flags(const EC_KEY *key); +void EC_KEY_set_enc_flags(EC_KEY *eckey, unsigned int flags); +point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key); +void EC_KEY_set_conv_form(EC_KEY *eckey, point_conversion_form_t cform); + +#define EC_KEY_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_EC_KEY, l, p, newf, dupf, freef) +int EC_KEY_set_ex_data(EC_KEY *key, int idx, void *arg); +void *EC_KEY_get_ex_data(const EC_KEY *key, int idx); + +/* wrapper functions for the underlying EC_GROUP object */ +void EC_KEY_set_asn1_flag(EC_KEY *eckey, int asn1_flag); + +/** Creates a table of pre-computed multiples of the generator to + * accelerate further EC_KEY operations. + * \param key EC_KEY object + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx); + +/** Creates a new ec private (and optional a new public) key. + * \param key EC_KEY object + * \return 1 on success and 0 if an error occurred. + */ +int EC_KEY_generate_key(EC_KEY *key); + +/** Verifies that a private and/or public key is valid. + * \param key the EC_KEY object + * \return 1 on success and 0 otherwise. + */ +int EC_KEY_check_key(const EC_KEY *key); + +/** Indicates if an EC_KEY can be used for signing. + * \param eckey the EC_KEY object + * \return 1 if can can sign and 0 otherwise. + */ +int EC_KEY_can_sign(const EC_KEY *eckey); + +/** Sets a public key from affine coordinates performing + * necessary NIST PKV tests. + * \param key the EC_KEY object + * \param x public key x coordinate + * \param y public key y coordinate + * \return 1 on success and 0 otherwise. + */ +int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, + BIGNUM *y); + +/** Encodes an EC_KEY public key to an allocated octet string + * \param key key to encode + * \param form point conversion form + * \param pbuf returns pointer to allocated buffer + * \param ctx BN_CTX object (optional) + * \return the length of the encoded octet string or 0 if an error occurred + */ +size_t EC_KEY_key2buf(const EC_KEY *key, point_conversion_form_t form, + unsigned char **pbuf, BN_CTX *ctx); + +/** Decodes a EC_KEY public key from a octet string + * \param key key to decode + * \param buf memory buffer with the encoded ec point + * \param len length of the encoded ec point + * \param ctx BN_CTX object (optional) + * \return 1 on success and 0 if an error occurred + */ + +int EC_KEY_oct2key(EC_KEY *key, const unsigned char *buf, size_t len, + BN_CTX *ctx); + +/** Decodes an EC_KEY private key from an octet string + * \param key key to decode + * \param buf memory buffer with the encoded private key + * \param len length of the encoded key + * \return 1 on success and 0 if an error occurred + */ + +int EC_KEY_oct2priv(EC_KEY *key, const unsigned char *buf, size_t len); + +/** Encodes a EC_KEY private key to an octet string + * \param key key to encode + * \param buf memory buffer for the result. If NULL the function returns + * required buffer size. + * \param len length of the memory buffer + * \return the length of the encoded octet string or 0 if an error occurred + */ + +size_t EC_KEY_priv2oct(const EC_KEY *key, unsigned char *buf, size_t len); + +/** Encodes an EC_KEY private key to an allocated octet string + * \param eckey key to encode + * \param pbuf returns pointer to allocated buffer + * \return the length of the encoded octet string or 0 if an error occurred + */ +size_t EC_KEY_priv2buf(const EC_KEY *eckey, unsigned char **pbuf); + +/********************************************************************/ +/* de- and encoding functions for SEC1 ECPrivateKey */ +/********************************************************************/ + +/** Decodes a private key from a memory buffer. + * \param key a pointer to a EC_KEY object which should be used (or NULL) + * \param in pointer to memory with the DER encoded private key + * \param len length of the DER encoded private key + * \return the decoded private key or NULL if an error occurred. + */ +EC_KEY *d2i_ECPrivateKey(EC_KEY **key, const unsigned char **in, long len); + +/** Encodes a private key object and stores the result in a buffer. + * \param key the EC_KEY object to encode + * \param out the buffer for the result (if NULL the function returns number + * of bytes needed). + * \return 1 on success and 0 if an error occurred. + */ +int i2d_ECPrivateKey(EC_KEY *key, unsigned char **out); + +/********************************************************************/ +/* de- and encoding functions for EC parameters */ +/********************************************************************/ + +/** Decodes ec parameter from a memory buffer. + * \param key a pointer to a EC_KEY object which should be used (or NULL) + * \param in pointer to memory with the DER encoded ec parameters + * \param len length of the DER encoded ec parameters + * \return a EC_KEY object with the decoded parameters or NULL if an error + * occurred. + */ +EC_KEY *d2i_ECParameters(EC_KEY **key, const unsigned char **in, long len); + +/** Encodes ec parameter and stores the result in a buffer. + * \param key the EC_KEY object with ec parameters to encode + * \param out the buffer for the result (if NULL the function returns number + * of bytes needed). + * \return 1 on success and 0 if an error occurred. + */ +int i2d_ECParameters(EC_KEY *key, unsigned char **out); + +/********************************************************************/ +/* de- and encoding functions for EC public key */ +/* (octet string, not DER -- hence 'o2i' and 'i2o') */ +/********************************************************************/ + +/** Decodes a ec public key from a octet string. + * \param key a pointer to a EC_KEY object which should be used + * \param in memory buffer with the encoded public key + * \param len length of the encoded public key + * \return EC_KEY object with decoded public key or NULL if an error + * occurred. + */ +EC_KEY *o2i_ECPublicKey(EC_KEY **key, const unsigned char **in, long len); + +/** Encodes a ec public key in an octet string. + * \param key the EC_KEY object with the public key + * \param out the buffer for the result (if NULL the function returns number + * of bytes needed). + * \return 1 on success and 0 if an error occurred + */ +int i2o_ECPublicKey(const EC_KEY *key, unsigned char **out); + +/** Prints out the ec parameters on human readable form. + * \param bp BIO object to which the information is printed + * \param key EC_KEY object + * \return 1 on success and 0 if an error occurred + */ +int ECParameters_print(BIO *bp, const EC_KEY *key); + +/** Prints out the contents of a EC_KEY object + * \param bp BIO object to which the information is printed + * \param key EC_KEY object + * \param off line offset + * \return 1 on success and 0 if an error occurred + */ +int EC_KEY_print(BIO *bp, const EC_KEY *key, int off); + +# ifndef OPENSSL_NO_STDIO +/** Prints out the ec parameters on human readable form. + * \param fp file descriptor to which the information is printed + * \param key EC_KEY object + * \return 1 on success and 0 if an error occurred + */ +int ECParameters_print_fp(FILE *fp, const EC_KEY *key); + +/** Prints out the contents of a EC_KEY object + * \param fp file descriptor to which the information is printed + * \param key EC_KEY object + * \param off line offset + * \return 1 on success and 0 if an error occurred + */ +int EC_KEY_print_fp(FILE *fp, const EC_KEY *key, int off); + +# endif + +const EC_KEY_METHOD *EC_KEY_OpenSSL(void); +const EC_KEY_METHOD *EC_KEY_get_default_method(void); +void EC_KEY_set_default_method(const EC_KEY_METHOD *meth); +const EC_KEY_METHOD *EC_KEY_get_method(const EC_KEY *key); +int EC_KEY_set_method(EC_KEY *key, const EC_KEY_METHOD *meth); +EC_KEY *EC_KEY_new_method(ENGINE *engine); + +/** The old name for ecdh_KDF_X9_63 + * The ECDH KDF specification has been mistakingly attributed to ANSI X9.62, + * it is actually specified in ANSI X9.63. + * This identifier is retained for backwards compatibility + */ +int ECDH_KDF_X9_62(unsigned char *out, size_t outlen, + const unsigned char *Z, size_t Zlen, + const unsigned char *sinfo, size_t sinfolen, + const EVP_MD *md); + +int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, + const EC_KEY *ecdh, + void *(*KDF) (const void *in, size_t inlen, + void *out, size_t *outlen)); + +typedef struct ECDSA_SIG_st ECDSA_SIG; + +/** Allocates and initialize a ECDSA_SIG structure + * \return pointer to a ECDSA_SIG structure or NULL if an error occurred + */ +ECDSA_SIG *ECDSA_SIG_new(void); + +/** frees a ECDSA_SIG structure + * \param sig pointer to the ECDSA_SIG structure + */ +void ECDSA_SIG_free(ECDSA_SIG *sig); + +/** DER encode content of ECDSA_SIG object (note: this function modifies *pp + * (*pp += length of the DER encoded signature)). + * \param sig pointer to the ECDSA_SIG object + * \param pp pointer to a unsigned char pointer for the output or NULL + * \return the length of the DER encoded ECDSA_SIG object or a negative value + * on error + */ +int i2d_ECDSA_SIG(const ECDSA_SIG *sig, unsigned char **pp); + +/** Decodes a DER encoded ECDSA signature (note: this function changes *pp + * (*pp += len)). + * \param sig pointer to ECDSA_SIG pointer (may be NULL) + * \param pp memory buffer with the DER encoded signature + * \param len length of the buffer + * \return pointer to the decoded ECDSA_SIG structure (or NULL) + */ +ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **sig, const unsigned char **pp, long len); + +/** Accessor for r and s fields of ECDSA_SIG + * \param sig pointer to ECDSA_SIG structure + * \param pr pointer to BIGNUM pointer for r (may be NULL) + * \param ps pointer to BIGNUM pointer for s (may be NULL) + */ +void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps); + +/** Accessor for r field of ECDSA_SIG + * \param sig pointer to ECDSA_SIG structure + */ +const BIGNUM *ECDSA_SIG_get0_r(const ECDSA_SIG *sig); + +/** Accessor for s field of ECDSA_SIG + * \param sig pointer to ECDSA_SIG structure + */ +const BIGNUM *ECDSA_SIG_get0_s(const ECDSA_SIG *sig); + +/** Setter for r and s fields of ECDSA_SIG + * \param sig pointer to ECDSA_SIG structure + * \param r pointer to BIGNUM for r (may be NULL) + * \param s pointer to BIGNUM for s (may be NULL) + */ +int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s); + +/** Computes the ECDSA signature of the given hash value using + * the supplied private key and returns the created signature. + * \param dgst pointer to the hash value + * \param dgst_len length of the hash value + * \param eckey EC_KEY object containing a private EC key + * \return pointer to a ECDSA_SIG structure or NULL if an error occurred + */ +ECDSA_SIG *ECDSA_do_sign(const unsigned char *dgst, int dgst_len, + EC_KEY *eckey); + +/** Computes ECDSA signature of a given hash value using the supplied + * private key (note: sig must point to ECDSA_size(eckey) bytes of memory). + * \param dgst pointer to the hash value to sign + * \param dgstlen length of the hash value + * \param kinv BIGNUM with a pre-computed inverse k (optional) + * \param rp BIGNUM with a pre-computed rp value (optional), + * see ECDSA_sign_setup + * \param eckey EC_KEY object containing a private EC key + * \return pointer to a ECDSA_SIG structure or NULL if an error occurred + */ +ECDSA_SIG *ECDSA_do_sign_ex(const unsigned char *dgst, int dgstlen, + const BIGNUM *kinv, const BIGNUM *rp, + EC_KEY *eckey); + +/** Verifies that the supplied signature is a valid ECDSA + * signature of the supplied hash value using the supplied public key. + * \param dgst pointer to the hash value + * \param dgst_len length of the hash value + * \param sig ECDSA_SIG structure + * \param eckey EC_KEY object containing a public EC key + * \return 1 if the signature is valid, 0 if the signature is invalid + * and -1 on error + */ +int ECDSA_do_verify(const unsigned char *dgst, int dgst_len, + const ECDSA_SIG *sig, EC_KEY *eckey); + +/** Precompute parts of the signing operation + * \param eckey EC_KEY object containing a private EC key + * \param ctx BN_CTX object (optional) + * \param kinv BIGNUM pointer for the inverse of k + * \param rp BIGNUM pointer for x coordinate of k * generator + * \return 1 on success and 0 otherwise + */ +int ECDSA_sign_setup(EC_KEY *eckey, BN_CTX *ctx, BIGNUM **kinv, BIGNUM **rp); + +/** Computes ECDSA signature of a given hash value using the supplied + * private key (note: sig must point to ECDSA_size(eckey) bytes of memory). + * \param type this parameter is ignored + * \param dgst pointer to the hash value to sign + * \param dgstlen length of the hash value + * \param sig memory for the DER encoded created signature + * \param siglen pointer to the length of the returned signature + * \param eckey EC_KEY object containing a private EC key + * \return 1 on success and 0 otherwise + */ +int ECDSA_sign(int type, const unsigned char *dgst, int dgstlen, + unsigned char *sig, unsigned int *siglen, EC_KEY *eckey); + +/** Computes ECDSA signature of a given hash value using the supplied + * private key (note: sig must point to ECDSA_size(eckey) bytes of memory). + * \param type this parameter is ignored + * \param dgst pointer to the hash value to sign + * \param dgstlen length of the hash value + * \param sig buffer to hold the DER encoded signature + * \param siglen pointer to the length of the returned signature + * \param kinv BIGNUM with a pre-computed inverse k (optional) + * \param rp BIGNUM with a pre-computed rp value (optional), + * see ECDSA_sign_setup + * \param eckey EC_KEY object containing a private EC key + * \return 1 on success and 0 otherwise + */ +int ECDSA_sign_ex(int type, const unsigned char *dgst, int dgstlen, + unsigned char *sig, unsigned int *siglen, + const BIGNUM *kinv, const BIGNUM *rp, EC_KEY *eckey); + +/** Verifies that the given signature is valid ECDSA signature + * of the supplied hash value using the specified public key. + * \param type this parameter is ignored + * \param dgst pointer to the hash value + * \param dgstlen length of the hash value + * \param sig pointer to the DER encoded signature + * \param siglen length of the DER encoded signature + * \param eckey EC_KEY object containing a public EC key + * \return 1 if the signature is valid, 0 if the signature is invalid + * and -1 on error + */ +int ECDSA_verify(int type, const unsigned char *dgst, int dgstlen, + const unsigned char *sig, int siglen, EC_KEY *eckey); + +/** Returns the maximum length of the DER encoded signature + * \param eckey EC_KEY object + * \return numbers of bytes required for the DER encoded signature + */ +int ECDSA_size(const EC_KEY *eckey); + +/********************************************************************/ +/* EC_KEY_METHOD constructors, destructors, writers and accessors */ +/********************************************************************/ + +EC_KEY_METHOD *EC_KEY_METHOD_new(const EC_KEY_METHOD *meth); +void EC_KEY_METHOD_free(EC_KEY_METHOD *meth); +void EC_KEY_METHOD_set_init(EC_KEY_METHOD *meth, + int (*init)(EC_KEY *key), + void (*finish)(EC_KEY *key), + int (*copy)(EC_KEY *dest, const EC_KEY *src), + int (*set_group)(EC_KEY *key, const EC_GROUP *grp), + int (*set_private)(EC_KEY *key, + const BIGNUM *priv_key), + int (*set_public)(EC_KEY *key, + const EC_POINT *pub_key)); + +void EC_KEY_METHOD_set_keygen(EC_KEY_METHOD *meth, + int (*keygen)(EC_KEY *key)); + +void EC_KEY_METHOD_set_compute_key(EC_KEY_METHOD *meth, + int (*ckey)(unsigned char **psec, + size_t *pseclen, + const EC_POINT *pub_key, + const EC_KEY *ecdh)); + +void EC_KEY_METHOD_set_sign(EC_KEY_METHOD *meth, + int (*sign)(int type, const unsigned char *dgst, + int dlen, unsigned char *sig, + unsigned int *siglen, + const BIGNUM *kinv, const BIGNUM *r, + EC_KEY *eckey), + int (*sign_setup)(EC_KEY *eckey, BN_CTX *ctx_in, + BIGNUM **kinvp, BIGNUM **rp), + ECDSA_SIG *(*sign_sig)(const unsigned char *dgst, + int dgst_len, + const BIGNUM *in_kinv, + const BIGNUM *in_r, + EC_KEY *eckey)); + +void EC_KEY_METHOD_set_verify(EC_KEY_METHOD *meth, + int (*verify)(int type, const unsigned + char *dgst, int dgst_len, + const unsigned char *sigbuf, + int sig_len, EC_KEY *eckey), + int (*verify_sig)(const unsigned char *dgst, + int dgst_len, + const ECDSA_SIG *sig, + EC_KEY *eckey)); + +void EC_KEY_METHOD_get_init(const EC_KEY_METHOD *meth, + int (**pinit)(EC_KEY *key), + void (**pfinish)(EC_KEY *key), + int (**pcopy)(EC_KEY *dest, const EC_KEY *src), + int (**pset_group)(EC_KEY *key, + const EC_GROUP *grp), + int (**pset_private)(EC_KEY *key, + const BIGNUM *priv_key), + int (**pset_public)(EC_KEY *key, + const EC_POINT *pub_key)); + +void EC_KEY_METHOD_get_keygen(const EC_KEY_METHOD *meth, + int (**pkeygen)(EC_KEY *key)); + +void EC_KEY_METHOD_get_compute_key(const EC_KEY_METHOD *meth, + int (**pck)(unsigned char **psec, + size_t *pseclen, + const EC_POINT *pub_key, + const EC_KEY *ecdh)); + +void EC_KEY_METHOD_get_sign(const EC_KEY_METHOD *meth, + int (**psign)(int type, const unsigned char *dgst, + int dlen, unsigned char *sig, + unsigned int *siglen, + const BIGNUM *kinv, const BIGNUM *r, + EC_KEY *eckey), + int (**psign_setup)(EC_KEY *eckey, BN_CTX *ctx_in, + BIGNUM **kinvp, BIGNUM **rp), + ECDSA_SIG *(**psign_sig)(const unsigned char *dgst, + int dgst_len, + const BIGNUM *in_kinv, + const BIGNUM *in_r, + EC_KEY *eckey)); + +void EC_KEY_METHOD_get_verify(const EC_KEY_METHOD *meth, + int (**pverify)(int type, const unsigned + char *dgst, int dgst_len, + const unsigned char *sigbuf, + int sig_len, EC_KEY *eckey), + int (**pverify_sig)(const unsigned char *dgst, + int dgst_len, + const ECDSA_SIG *sig, + EC_KEY *eckey)); + +# define ECParameters_dup(x) ASN1_dup_of(EC_KEY,i2d_ECParameters,d2i_ECParameters,x) + +# ifndef __cplusplus +# if defined(__SUNPRO_C) +# if __SUNPRO_C >= 0x520 +# pragma error_messages (default,E_ARRAY_OF_INCOMPLETE_NONAME,E_ARRAY_OF_INCOMPLETE) +# endif +# endif +# endif + +# define EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_PARAMGEN|EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID, nid, NULL) + +# define EVP_PKEY_CTX_set_ec_param_enc(ctx, flag) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_PARAMGEN|EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_EC_PARAM_ENC, flag, NULL) + +# define EVP_PKEY_CTX_set_ecdh_cofactor_mode(ctx, flag) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_ECDH_COFACTOR, flag, NULL) + +# define EVP_PKEY_CTX_get_ecdh_cofactor_mode(ctx) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_ECDH_COFACTOR, -2, NULL) + +# define EVP_PKEY_CTX_set_ecdh_kdf_type(ctx, kdf) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_KDF_TYPE, kdf, NULL) + +# define EVP_PKEY_CTX_get_ecdh_kdf_type(ctx) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_KDF_TYPE, -2, NULL) + +# define EVP_PKEY_CTX_set_ecdh_kdf_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_KDF_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_get_ecdh_kdf_md(ctx, pmd) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_EC_KDF_MD, 0, (void *)(pmd)) + +# define EVP_PKEY_CTX_set_ecdh_kdf_outlen(ctx, len) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_KDF_OUTLEN, len, NULL) + +# define EVP_PKEY_CTX_get_ecdh_kdf_outlen(ctx, plen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_EC_KDF_OUTLEN, 0, \ + (void *)(plen)) + +# define EVP_PKEY_CTX_set0_ecdh_kdf_ukm(ctx, p, plen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_EC_KDF_UKM, plen, (void *)(p)) + +# define EVP_PKEY_CTX_get0_ecdh_kdf_ukm(ctx, p) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_EC, \ + EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_GET_EC_KDF_UKM, 0, (void *)(p)) + +/* SM2 will skip the operation check so no need to pass operation here */ +# define EVP_PKEY_CTX_set1_id(ctx, id, id_len) \ + EVP_PKEY_CTX_ctrl(ctx, -1, -1, \ + EVP_PKEY_CTRL_SET1_ID, (int)id_len, (void*)(id)) + +# define EVP_PKEY_CTX_get1_id(ctx, id) \ + EVP_PKEY_CTX_ctrl(ctx, -1, -1, \ + EVP_PKEY_CTRL_GET1_ID, 0, (void*)(id)) + +# define EVP_PKEY_CTX_get1_id_len(ctx, id_len) \ + EVP_PKEY_CTX_ctrl(ctx, -1, -1, \ + EVP_PKEY_CTRL_GET1_ID_LEN, 0, (void*)(id_len)) + +# define EVP_PKEY_CTRL_EC_PARAMGEN_CURVE_NID (EVP_PKEY_ALG_CTRL + 1) +# define EVP_PKEY_CTRL_EC_PARAM_ENC (EVP_PKEY_ALG_CTRL + 2) +# define EVP_PKEY_CTRL_EC_ECDH_COFACTOR (EVP_PKEY_ALG_CTRL + 3) +# define EVP_PKEY_CTRL_EC_KDF_TYPE (EVP_PKEY_ALG_CTRL + 4) +# define EVP_PKEY_CTRL_EC_KDF_MD (EVP_PKEY_ALG_CTRL + 5) +# define EVP_PKEY_CTRL_GET_EC_KDF_MD (EVP_PKEY_ALG_CTRL + 6) +# define EVP_PKEY_CTRL_EC_KDF_OUTLEN (EVP_PKEY_ALG_CTRL + 7) +# define EVP_PKEY_CTRL_GET_EC_KDF_OUTLEN (EVP_PKEY_ALG_CTRL + 8) +# define EVP_PKEY_CTRL_EC_KDF_UKM (EVP_PKEY_ALG_CTRL + 9) +# define EVP_PKEY_CTRL_GET_EC_KDF_UKM (EVP_PKEY_ALG_CTRL + 10) +# define EVP_PKEY_CTRL_SET1_ID (EVP_PKEY_ALG_CTRL + 11) +# define EVP_PKEY_CTRL_GET1_ID (EVP_PKEY_ALG_CTRL + 12) +# define EVP_PKEY_CTRL_GET1_ID_LEN (EVP_PKEY_ALG_CTRL + 13) +/* KDF types */ +# define EVP_PKEY_ECDH_KDF_NONE 1 +# define EVP_PKEY_ECDH_KDF_X9_63 2 +/** The old name for EVP_PKEY_ECDH_KDF_X9_63 + * The ECDH KDF specification has been mistakingly attributed to ANSI X9.62, + * it is actually specified in ANSI X9.63. + * This identifier is retained for backwards compatibility + */ +# define EVP_PKEY_ECDH_KDF_X9_62 EVP_PKEY_ECDH_KDF_X9_63 + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ecdh.h b/third_party/openssl/uos/sw_64/include/openssl/ecdh.h new file mode 100644 index 00000000..681f3d5e --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ecdh.h @@ -0,0 +1,10 @@ +/* + * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include diff --git a/third_party/openssl/uos/sw_64/include/openssl/ecdsa.h b/third_party/openssl/uos/sw_64/include/openssl/ecdsa.h new file mode 100644 index 00000000..681f3d5e --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ecdsa.h @@ -0,0 +1,10 @@ +/* + * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include diff --git a/third_party/openssl/uos/sw_64/include/openssl/ecerr.h b/third_party/openssl/uos/sw_64/include/openssl/ecerr.h new file mode 100644 index 00000000..51738113 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ecerr.h @@ -0,0 +1,276 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ECERR_H +# define HEADER_ECERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_EC + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_EC_strings(void); + +/* + * EC function codes. + */ +# define EC_F_BN_TO_FELEM 224 +# define EC_F_D2I_ECPARAMETERS 144 +# define EC_F_D2I_ECPKPARAMETERS 145 +# define EC_F_D2I_ECPRIVATEKEY 146 +# define EC_F_DO_EC_KEY_PRINT 221 +# define EC_F_ECDH_CMS_DECRYPT 238 +# define EC_F_ECDH_CMS_SET_SHARED_INFO 239 +# define EC_F_ECDH_COMPUTE_KEY 246 +# define EC_F_ECDH_SIMPLE_COMPUTE_KEY 257 +# define EC_F_ECDSA_DO_SIGN_EX 251 +# define EC_F_ECDSA_DO_VERIFY 252 +# define EC_F_ECDSA_SIGN_EX 254 +# define EC_F_ECDSA_SIGN_SETUP 248 +# define EC_F_ECDSA_SIG_NEW 265 +# define EC_F_ECDSA_VERIFY 253 +# define EC_F_ECD_ITEM_VERIFY 270 +# define EC_F_ECKEY_PARAM2TYPE 223 +# define EC_F_ECKEY_PARAM_DECODE 212 +# define EC_F_ECKEY_PRIV_DECODE 213 +# define EC_F_ECKEY_PRIV_ENCODE 214 +# define EC_F_ECKEY_PUB_DECODE 215 +# define EC_F_ECKEY_PUB_ENCODE 216 +# define EC_F_ECKEY_TYPE2PARAM 220 +# define EC_F_ECPARAMETERS_PRINT 147 +# define EC_F_ECPARAMETERS_PRINT_FP 148 +# define EC_F_ECPKPARAMETERS_PRINT 149 +# define EC_F_ECPKPARAMETERS_PRINT_FP 150 +# define EC_F_ECP_NISTZ256_GET_AFFINE 240 +# define EC_F_ECP_NISTZ256_INV_MOD_ORD 275 +# define EC_F_ECP_NISTZ256_MULT_PRECOMPUTE 243 +# define EC_F_ECP_NISTZ256_POINTS_MUL 241 +# define EC_F_ECP_NISTZ256_PRE_COMP_NEW 244 +# define EC_F_ECP_NISTZ256_WINDOWED_MUL 242 +# define EC_F_ECX_KEY_OP 266 +# define EC_F_ECX_PRIV_ENCODE 267 +# define EC_F_ECX_PUB_ENCODE 268 +# define EC_F_EC_ASN1_GROUP2CURVE 153 +# define EC_F_EC_ASN1_GROUP2FIELDID 154 +# define EC_F_EC_GF2M_MONTGOMERY_POINT_MULTIPLY 208 +# define EC_F_EC_GF2M_SIMPLE_FIELD_INV 296 +# define EC_F_EC_GF2M_SIMPLE_GROUP_CHECK_DISCRIMINANT 159 +# define EC_F_EC_GF2M_SIMPLE_GROUP_SET_CURVE 195 +# define EC_F_EC_GF2M_SIMPLE_LADDER_POST 285 +# define EC_F_EC_GF2M_SIMPLE_LADDER_PRE 288 +# define EC_F_EC_GF2M_SIMPLE_OCT2POINT 160 +# define EC_F_EC_GF2M_SIMPLE_POINT2OCT 161 +# define EC_F_EC_GF2M_SIMPLE_POINTS_MUL 289 +# define EC_F_EC_GF2M_SIMPLE_POINT_GET_AFFINE_COORDINATES 162 +# define EC_F_EC_GF2M_SIMPLE_POINT_SET_AFFINE_COORDINATES 163 +# define EC_F_EC_GF2M_SIMPLE_SET_COMPRESSED_COORDINATES 164 +# define EC_F_EC_GFP_MONT_FIELD_DECODE 133 +# define EC_F_EC_GFP_MONT_FIELD_ENCODE 134 +# define EC_F_EC_GFP_MONT_FIELD_INV 297 +# define EC_F_EC_GFP_MONT_FIELD_MUL 131 +# define EC_F_EC_GFP_MONT_FIELD_SET_TO_ONE 209 +# define EC_F_EC_GFP_MONT_FIELD_SQR 132 +# define EC_F_EC_GFP_MONT_GROUP_SET_CURVE 189 +# define EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE 225 +# define EC_F_EC_GFP_NISTP224_POINTS_MUL 228 +# define EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES 226 +# define EC_F_EC_GFP_NISTP256_GROUP_SET_CURVE 230 +# define EC_F_EC_GFP_NISTP256_POINTS_MUL 231 +# define EC_F_EC_GFP_NISTP256_POINT_GET_AFFINE_COORDINATES 232 +# define EC_F_EC_GFP_NISTP521_GROUP_SET_CURVE 233 +# define EC_F_EC_GFP_NISTP521_POINTS_MUL 234 +# define EC_F_EC_GFP_NISTP521_POINT_GET_AFFINE_COORDINATES 235 +# define EC_F_EC_GFP_NIST_FIELD_MUL 200 +# define EC_F_EC_GFP_NIST_FIELD_SQR 201 +# define EC_F_EC_GFP_NIST_GROUP_SET_CURVE 202 +# define EC_F_EC_GFP_SIMPLE_BLIND_COORDINATES 287 +# define EC_F_EC_GFP_SIMPLE_FIELD_INV 298 +# define EC_F_EC_GFP_SIMPLE_GROUP_CHECK_DISCRIMINANT 165 +# define EC_F_EC_GFP_SIMPLE_GROUP_SET_CURVE 166 +# define EC_F_EC_GFP_SIMPLE_MAKE_AFFINE 102 +# define EC_F_EC_GFP_SIMPLE_OCT2POINT 103 +# define EC_F_EC_GFP_SIMPLE_POINT2OCT 104 +# define EC_F_EC_GFP_SIMPLE_POINTS_MAKE_AFFINE 137 +# define EC_F_EC_GFP_SIMPLE_POINT_GET_AFFINE_COORDINATES 167 +# define EC_F_EC_GFP_SIMPLE_POINT_SET_AFFINE_COORDINATES 168 +# define EC_F_EC_GFP_SIMPLE_SET_COMPRESSED_COORDINATES 169 +# define EC_F_EC_GROUP_CHECK 170 +# define EC_F_EC_GROUP_CHECK_DISCRIMINANT 171 +# define EC_F_EC_GROUP_COPY 106 +# define EC_F_EC_GROUP_GET_CURVE 291 +# define EC_F_EC_GROUP_GET_CURVE_GF2M 172 +# define EC_F_EC_GROUP_GET_CURVE_GFP 130 +# define EC_F_EC_GROUP_GET_DEGREE 173 +# define EC_F_EC_GROUP_GET_ECPARAMETERS 261 +# define EC_F_EC_GROUP_GET_ECPKPARAMETERS 262 +# define EC_F_EC_GROUP_GET_PENTANOMIAL_BASIS 193 +# define EC_F_EC_GROUP_GET_TRINOMIAL_BASIS 194 +# define EC_F_EC_GROUP_NEW 108 +# define EC_F_EC_GROUP_NEW_BY_CURVE_NAME 174 +# define EC_F_EC_GROUP_NEW_FROM_DATA 175 +# define EC_F_EC_GROUP_NEW_FROM_ECPARAMETERS 263 +# define EC_F_EC_GROUP_NEW_FROM_ECPKPARAMETERS 264 +# define EC_F_EC_GROUP_SET_CURVE 292 +# define EC_F_EC_GROUP_SET_CURVE_GF2M 176 +# define EC_F_EC_GROUP_SET_CURVE_GFP 109 +# define EC_F_EC_GROUP_SET_GENERATOR 111 +# define EC_F_EC_GROUP_SET_SEED 286 +# define EC_F_EC_KEY_CHECK_KEY 177 +# define EC_F_EC_KEY_COPY 178 +# define EC_F_EC_KEY_GENERATE_KEY 179 +# define EC_F_EC_KEY_NEW 182 +# define EC_F_EC_KEY_NEW_METHOD 245 +# define EC_F_EC_KEY_OCT2PRIV 255 +# define EC_F_EC_KEY_PRINT 180 +# define EC_F_EC_KEY_PRINT_FP 181 +# define EC_F_EC_KEY_PRIV2BUF 279 +# define EC_F_EC_KEY_PRIV2OCT 256 +# define EC_F_EC_KEY_SET_PUBLIC_KEY_AFFINE_COORDINATES 229 +# define EC_F_EC_KEY_SIMPLE_CHECK_KEY 258 +# define EC_F_EC_KEY_SIMPLE_OCT2PRIV 259 +# define EC_F_EC_KEY_SIMPLE_PRIV2OCT 260 +# define EC_F_EC_PKEY_CHECK 273 +# define EC_F_EC_PKEY_PARAM_CHECK 274 +# define EC_F_EC_POINTS_MAKE_AFFINE 136 +# define EC_F_EC_POINTS_MUL 290 +# define EC_F_EC_POINT_ADD 112 +# define EC_F_EC_POINT_BN2POINT 280 +# define EC_F_EC_POINT_CMP 113 +# define EC_F_EC_POINT_COPY 114 +# define EC_F_EC_POINT_DBL 115 +# define EC_F_EC_POINT_GET_AFFINE_COORDINATES 293 +# define EC_F_EC_POINT_GET_AFFINE_COORDINATES_GF2M 183 +# define EC_F_EC_POINT_GET_AFFINE_COORDINATES_GFP 116 +# define EC_F_EC_POINT_GET_JPROJECTIVE_COORDINATES_GFP 117 +# define EC_F_EC_POINT_INVERT 210 +# define EC_F_EC_POINT_IS_AT_INFINITY 118 +# define EC_F_EC_POINT_IS_ON_CURVE 119 +# define EC_F_EC_POINT_MAKE_AFFINE 120 +# define EC_F_EC_POINT_NEW 121 +# define EC_F_EC_POINT_OCT2POINT 122 +# define EC_F_EC_POINT_POINT2BUF 281 +# define EC_F_EC_POINT_POINT2OCT 123 +# define EC_F_EC_POINT_SET_AFFINE_COORDINATES 294 +# define EC_F_EC_POINT_SET_AFFINE_COORDINATES_GF2M 185 +# define EC_F_EC_POINT_SET_AFFINE_COORDINATES_GFP 124 +# define EC_F_EC_POINT_SET_COMPRESSED_COORDINATES 295 +# define EC_F_EC_POINT_SET_COMPRESSED_COORDINATES_GF2M 186 +# define EC_F_EC_POINT_SET_COMPRESSED_COORDINATES_GFP 125 +# define EC_F_EC_POINT_SET_JPROJECTIVE_COORDINATES_GFP 126 +# define EC_F_EC_POINT_SET_TO_INFINITY 127 +# define EC_F_EC_PRE_COMP_NEW 196 +# define EC_F_EC_SCALAR_MUL_LADDER 284 +# define EC_F_EC_WNAF_MUL 187 +# define EC_F_EC_WNAF_PRECOMPUTE_MULT 188 +# define EC_F_I2D_ECPARAMETERS 190 +# define EC_F_I2D_ECPKPARAMETERS 191 +# define EC_F_I2D_ECPRIVATEKEY 192 +# define EC_F_I2O_ECPUBLICKEY 151 +# define EC_F_NISTP224_PRE_COMP_NEW 227 +# define EC_F_NISTP256_PRE_COMP_NEW 236 +# define EC_F_NISTP521_PRE_COMP_NEW 237 +# define EC_F_O2I_ECPUBLICKEY 152 +# define EC_F_OLD_EC_PRIV_DECODE 222 +# define EC_F_OSSL_ECDH_COMPUTE_KEY 247 +# define EC_F_OSSL_ECDSA_SIGN_SIG 249 +# define EC_F_OSSL_ECDSA_VERIFY_SIG 250 +# define EC_F_PKEY_ECD_CTRL 271 +# define EC_F_PKEY_ECD_DIGESTSIGN 272 +# define EC_F_PKEY_ECD_DIGESTSIGN25519 276 +# define EC_F_PKEY_ECD_DIGESTSIGN448 277 +# define EC_F_PKEY_ECX_DERIVE 269 +# define EC_F_PKEY_EC_CTRL 197 +# define EC_F_PKEY_EC_CTRL_STR 198 +# define EC_F_PKEY_EC_DERIVE 217 +# define EC_F_PKEY_EC_INIT 282 +# define EC_F_PKEY_EC_KDF_DERIVE 283 +# define EC_F_PKEY_EC_KEYGEN 199 +# define EC_F_PKEY_EC_PARAMGEN 219 +# define EC_F_PKEY_EC_SIGN 218 +# define EC_F_VALIDATE_ECX_DERIVE 278 + +/* + * EC reason codes. + */ +# define EC_R_ASN1_ERROR 115 +# define EC_R_BAD_SIGNATURE 156 +# define EC_R_BIGNUM_OUT_OF_RANGE 144 +# define EC_R_BUFFER_TOO_SMALL 100 +# define EC_R_CANNOT_INVERT 165 +# define EC_R_COORDINATES_OUT_OF_RANGE 146 +# define EC_R_CURVE_DOES_NOT_SUPPORT_ECDH 160 +# define EC_R_CURVE_DOES_NOT_SUPPORT_SIGNING 159 +# define EC_R_D2I_ECPKPARAMETERS_FAILURE 117 +# define EC_R_DECODE_ERROR 142 +# define EC_R_DISCRIMINANT_IS_ZERO 118 +# define EC_R_EC_GROUP_NEW_BY_NAME_FAILURE 119 +# define EC_R_FIELD_TOO_LARGE 143 +# define EC_R_GF2M_NOT_SUPPORTED 147 +# define EC_R_GROUP2PKPARAMETERS_FAILURE 120 +# define EC_R_I2D_ECPKPARAMETERS_FAILURE 121 +# define EC_R_INCOMPATIBLE_OBJECTS 101 +# define EC_R_INVALID_ARGUMENT 112 +# define EC_R_INVALID_COMPRESSED_POINT 110 +# define EC_R_INVALID_COMPRESSION_BIT 109 +# define EC_R_INVALID_CURVE 141 +# define EC_R_INVALID_DIGEST 151 +# define EC_R_INVALID_DIGEST_TYPE 138 +# define EC_R_INVALID_ENCODING 102 +# define EC_R_INVALID_FIELD 103 +# define EC_R_INVALID_FORM 104 +# define EC_R_INVALID_GROUP_ORDER 122 +# define EC_R_INVALID_KEY 116 +# define EC_R_INVALID_OUTPUT_LENGTH 161 +# define EC_R_INVALID_PEER_KEY 133 +# define EC_R_INVALID_PENTANOMIAL_BASIS 132 +# define EC_R_INVALID_PRIVATE_KEY 123 +# define EC_R_INVALID_TRINOMIAL_BASIS 137 +# define EC_R_KDF_PARAMETER_ERROR 148 +# define EC_R_KEYS_NOT_SET 140 +# define EC_R_LADDER_POST_FAILURE 136 +# define EC_R_LADDER_PRE_FAILURE 153 +# define EC_R_LADDER_STEP_FAILURE 162 +# define EC_R_MISSING_OID 167 +# define EC_R_MISSING_PARAMETERS 124 +# define EC_R_MISSING_PRIVATE_KEY 125 +# define EC_R_NEED_NEW_SETUP_VALUES 157 +# define EC_R_NOT_A_NIST_PRIME 135 +# define EC_R_NOT_IMPLEMENTED 126 +# define EC_R_NOT_INITIALIZED 111 +# define EC_R_NO_PARAMETERS_SET 139 +# define EC_R_NO_PRIVATE_VALUE 154 +# define EC_R_OPERATION_NOT_SUPPORTED 152 +# define EC_R_PASSED_NULL_PARAMETER 134 +# define EC_R_PEER_KEY_ERROR 149 +# define EC_R_PKPARAMETERS2GROUP_FAILURE 127 +# define EC_R_POINT_ARITHMETIC_FAILURE 155 +# define EC_R_POINT_AT_INFINITY 106 +# define EC_R_POINT_COORDINATES_BLIND_FAILURE 163 +# define EC_R_POINT_IS_NOT_ON_CURVE 107 +# define EC_R_RANDOM_NUMBER_GENERATION_FAILED 158 +# define EC_R_SHARED_INFO_ERROR 150 +# define EC_R_SLOT_FULL 108 +# define EC_R_UNDEFINED_GENERATOR 113 +# define EC_R_UNDEFINED_ORDER 128 +# define EC_R_UNKNOWN_COFACTOR 164 +# define EC_R_UNKNOWN_GROUP 129 +# define EC_R_UNKNOWN_ORDER 114 +# define EC_R_UNSUPPORTED_FIELD 131 +# define EC_R_WRONG_CURVE_PARAMETERS 145 +# define EC_R_WRONG_ORDER 130 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/engine.h b/third_party/openssl/uos/sw_64/include/openssl/engine.h new file mode 100644 index 00000000..d707eaeb --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/engine.h @@ -0,0 +1,752 @@ +/* + * Copyright 2000-2022 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ENGINE_H +# define HEADER_ENGINE_H + +# include + +# ifndef OPENSSL_NO_ENGINE +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# include +# include +# include +# include +# include +# include +# include +# endif +# include +# include +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +/* + * These flags are used to control combinations of algorithm (methods) by + * bitwise "OR"ing. + */ +# define ENGINE_METHOD_RSA (unsigned int)0x0001 +# define ENGINE_METHOD_DSA (unsigned int)0x0002 +# define ENGINE_METHOD_DH (unsigned int)0x0004 +# define ENGINE_METHOD_RAND (unsigned int)0x0008 +# define ENGINE_METHOD_CIPHERS (unsigned int)0x0040 +# define ENGINE_METHOD_DIGESTS (unsigned int)0x0080 +# define ENGINE_METHOD_PKEY_METHS (unsigned int)0x0200 +# define ENGINE_METHOD_PKEY_ASN1_METHS (unsigned int)0x0400 +# define ENGINE_METHOD_EC (unsigned int)0x0800 +/* Obvious all-or-nothing cases. */ +# define ENGINE_METHOD_ALL (unsigned int)0xFFFF +# define ENGINE_METHOD_NONE (unsigned int)0x0000 + +/* + * This(ese) flag(s) controls behaviour of the ENGINE_TABLE mechanism used + * internally to control registration of ENGINE implementations, and can be + * set by ENGINE_set_table_flags(). The "NOINIT" flag prevents attempts to + * initialise registered ENGINEs if they are not already initialised. + */ +# define ENGINE_TABLE_FLAG_NOINIT (unsigned int)0x0001 + +/* ENGINE flags that can be set by ENGINE_set_flags(). */ +/* Not used */ +/* #define ENGINE_FLAGS_MALLOCED 0x0001 */ + +/* + * This flag is for ENGINEs that wish to handle the various 'CMD'-related + * control commands on their own. Without this flag, ENGINE_ctrl() handles + * these control commands on behalf of the ENGINE using their "cmd_defns" + * data. + */ +# define ENGINE_FLAGS_MANUAL_CMD_CTRL (int)0x0002 + +/* + * This flag is for ENGINEs who return new duplicate structures when found + * via "ENGINE_by_id()". When an ENGINE must store state (eg. if + * ENGINE_ctrl() commands are called in sequence as part of some stateful + * process like key-generation setup and execution), it can set this flag - + * then each attempt to obtain the ENGINE will result in it being copied into + * a new structure. Normally, ENGINEs don't declare this flag so + * ENGINE_by_id() just increments the existing ENGINE's structural reference + * count. + */ +# define ENGINE_FLAGS_BY_ID_COPY (int)0x0004 + +/* + * This flag if for an ENGINE that does not want its methods registered as + * part of ENGINE_register_all_complete() for example if the methods are not + * usable as default methods. + */ + +# define ENGINE_FLAGS_NO_REGISTER_ALL (int)0x0008 + +/* + * ENGINEs can support their own command types, and these flags are used in + * ENGINE_CTRL_GET_CMD_FLAGS to indicate to the caller what kind of input + * each command expects. Currently only numeric and string input is + * supported. If a control command supports none of the _NUMERIC, _STRING, or + * _NO_INPUT options, then it is regarded as an "internal" control command - + * and not for use in config setting situations. As such, they're not + * available to the ENGINE_ctrl_cmd_string() function, only raw ENGINE_ctrl() + * access. Changes to this list of 'command types' should be reflected + * carefully in ENGINE_cmd_is_executable() and ENGINE_ctrl_cmd_string(). + */ + +/* accepts a 'long' input value (3rd parameter to ENGINE_ctrl) */ +# define ENGINE_CMD_FLAG_NUMERIC (unsigned int)0x0001 +/* + * accepts string input (cast from 'void*' to 'const char *', 4th parameter + * to ENGINE_ctrl) + */ +# define ENGINE_CMD_FLAG_STRING (unsigned int)0x0002 +/* + * Indicates that the control command takes *no* input. Ie. the control + * command is unparameterised. + */ +# define ENGINE_CMD_FLAG_NO_INPUT (unsigned int)0x0004 +/* + * Indicates that the control command is internal. This control command won't + * be shown in any output, and is only usable through the ENGINE_ctrl_cmd() + * function. + */ +# define ENGINE_CMD_FLAG_INTERNAL (unsigned int)0x0008 + +/* + * NB: These 3 control commands are deprecated and should not be used. + * ENGINEs relying on these commands should compile conditional support for + * compatibility (eg. if these symbols are defined) but should also migrate + * the same functionality to their own ENGINE-specific control functions that + * can be "discovered" by calling applications. The fact these control + * commands wouldn't be "executable" (ie. usable by text-based config) + * doesn't change the fact that application code can find and use them + * without requiring per-ENGINE hacking. + */ + +/* + * These flags are used to tell the ctrl function what should be done. All + * command numbers are shared between all engines, even if some don't make + * sense to some engines. In such a case, they do nothing but return the + * error ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED. + */ +# define ENGINE_CTRL_SET_LOGSTREAM 1 +# define ENGINE_CTRL_SET_PASSWORD_CALLBACK 2 +# define ENGINE_CTRL_HUP 3/* Close and reinitialise + * any handles/connections + * etc. */ +# define ENGINE_CTRL_SET_USER_INTERFACE 4/* Alternative to callback */ +# define ENGINE_CTRL_SET_CALLBACK_DATA 5/* User-specific data, used + * when calling the password + * callback and the user + * interface */ +# define ENGINE_CTRL_LOAD_CONFIGURATION 6/* Load a configuration, + * given a string that + * represents a file name + * or so */ +# define ENGINE_CTRL_LOAD_SECTION 7/* Load data from a given + * section in the already + * loaded configuration */ + +/* + * These control commands allow an application to deal with an arbitrary + * engine in a dynamic way. Warn: Negative return values indicate errors FOR + * THESE COMMANDS because zero is used to indicate 'end-of-list'. Other + * commands, including ENGINE-specific command types, return zero for an + * error. An ENGINE can choose to implement these ctrl functions, and can + * internally manage things however it chooses - it does so by setting the + * ENGINE_FLAGS_MANUAL_CMD_CTRL flag (using ENGINE_set_flags()). Otherwise + * the ENGINE_ctrl() code handles this on the ENGINE's behalf using the + * cmd_defns data (set using ENGINE_set_cmd_defns()). This means an ENGINE's + * ctrl() handler need only implement its own commands - the above "meta" + * commands will be taken care of. + */ + +/* + * Returns non-zero if the supplied ENGINE has a ctrl() handler. If "not", + * then all the remaining control commands will return failure, so it is + * worth checking this first if the caller is trying to "discover" the + * engine's capabilities and doesn't want errors generated unnecessarily. + */ +# define ENGINE_CTRL_HAS_CTRL_FUNCTION 10 +/* + * Returns a positive command number for the first command supported by the + * engine. Returns zero if no ctrl commands are supported. + */ +# define ENGINE_CTRL_GET_FIRST_CMD_TYPE 11 +/* + * The 'long' argument specifies a command implemented by the engine, and the + * return value is the next command supported, or zero if there are no more. + */ +# define ENGINE_CTRL_GET_NEXT_CMD_TYPE 12 +/* + * The 'void*' argument is a command name (cast from 'const char *'), and the + * return value is the command that corresponds to it. + */ +# define ENGINE_CTRL_GET_CMD_FROM_NAME 13 +/* + * The next two allow a command to be converted into its corresponding string + * form. In each case, the 'long' argument supplies the command. In the + * NAME_LEN case, the return value is the length of the command name (not + * counting a trailing EOL). In the NAME case, the 'void*' argument must be a + * string buffer large enough, and it will be populated with the name of the + * command (WITH a trailing EOL). + */ +# define ENGINE_CTRL_GET_NAME_LEN_FROM_CMD 14 +# define ENGINE_CTRL_GET_NAME_FROM_CMD 15 +/* The next two are similar but give a "short description" of a command. */ +# define ENGINE_CTRL_GET_DESC_LEN_FROM_CMD 16 +# define ENGINE_CTRL_GET_DESC_FROM_CMD 17 +/* + * With this command, the return value is the OR'd combination of + * ENGINE_CMD_FLAG_*** values that indicate what kind of input a given + * engine-specific ctrl command expects. + */ +# define ENGINE_CTRL_GET_CMD_FLAGS 18 + +/* + * ENGINE implementations should start the numbering of their own control + * commands from this value. (ie. ENGINE_CMD_BASE, ENGINE_CMD_BASE + 1, etc). + */ +# define ENGINE_CMD_BASE 200 + +/* + * NB: These 2 nCipher "chil" control commands are deprecated, and their + * functionality is now available through ENGINE-specific control commands + * (exposed through the above-mentioned 'CMD'-handling). Code using these 2 + * commands should be migrated to the more general command handling before + * these are removed. + */ + +/* Flags specific to the nCipher "chil" engine */ +# define ENGINE_CTRL_CHIL_SET_FORKCHECK 100 + /* + * Depending on the value of the (long)i argument, this sets or + * unsets the SimpleForkCheck flag in the CHIL API to enable or + * disable checking and workarounds for applications that fork(). + */ +# define ENGINE_CTRL_CHIL_NO_LOCKING 101 + /* + * This prevents the initialisation function from providing mutex + * callbacks to the nCipher library. + */ + +/* + * If an ENGINE supports its own specific control commands and wishes the + * framework to handle the above 'ENGINE_CMD_***'-manipulation commands on + * its behalf, it should supply a null-terminated array of ENGINE_CMD_DEFN + * entries to ENGINE_set_cmd_defns(). It should also implement a ctrl() + * handler that supports the stated commands (ie. the "cmd_num" entries as + * described by the array). NB: The array must be ordered in increasing order + * of cmd_num. "null-terminated" means that the last ENGINE_CMD_DEFN element + * has cmd_num set to zero and/or cmd_name set to NULL. + */ +typedef struct ENGINE_CMD_DEFN_st { + unsigned int cmd_num; /* The command number */ + const char *cmd_name; /* The command name itself */ + const char *cmd_desc; /* A short description of the command */ + unsigned int cmd_flags; /* The input the command expects */ +} ENGINE_CMD_DEFN; + +/* Generic function pointer */ +typedef int (*ENGINE_GEN_FUNC_PTR) (void); +/* Generic function pointer taking no arguments */ +typedef int (*ENGINE_GEN_INT_FUNC_PTR) (ENGINE *); +/* Specific control function pointer */ +typedef int (*ENGINE_CTRL_FUNC_PTR) (ENGINE *, int, long, void *, + void (*f) (void)); +/* Generic load_key function pointer */ +typedef EVP_PKEY *(*ENGINE_LOAD_KEY_PTR)(ENGINE *, const char *, + UI_METHOD *ui_method, + void *callback_data); +typedef int (*ENGINE_SSL_CLIENT_CERT_PTR) (ENGINE *, SSL *ssl, + STACK_OF(X509_NAME) *ca_dn, + X509 **pcert, EVP_PKEY **pkey, + STACK_OF(X509) **pother, + UI_METHOD *ui_method, + void *callback_data); +/*- + * These callback types are for an ENGINE's handler for cipher and digest logic. + * These handlers have these prototypes; + * int foo(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid); + * int foo(ENGINE *e, const EVP_MD **digest, const int **nids, int nid); + * Looking at how to implement these handlers in the case of cipher support, if + * the framework wants the EVP_CIPHER for 'nid', it will call; + * foo(e, &p_evp_cipher, NULL, nid); (return zero for failure) + * If the framework wants a list of supported 'nid's, it will call; + * foo(e, NULL, &p_nids, 0); (returns number of 'nids' or -1 for error) + */ +/* + * Returns to a pointer to the array of supported cipher 'nid's. If the + * second parameter is non-NULL it is set to the size of the returned array. + */ +typedef int (*ENGINE_CIPHERS_PTR) (ENGINE *, const EVP_CIPHER **, + const int **, int); +typedef int (*ENGINE_DIGESTS_PTR) (ENGINE *, const EVP_MD **, const int **, + int); +typedef int (*ENGINE_PKEY_METHS_PTR) (ENGINE *, EVP_PKEY_METHOD **, + const int **, int); +typedef int (*ENGINE_PKEY_ASN1_METHS_PTR) (ENGINE *, EVP_PKEY_ASN1_METHOD **, + const int **, int); +/* + * STRUCTURE functions ... all of these functions deal with pointers to + * ENGINE structures where the pointers have a "structural reference". This + * means that their reference is to allowed access to the structure but it + * does not imply that the structure is functional. To simply increment or + * decrement the structural reference count, use ENGINE_by_id and + * ENGINE_free. NB: This is not required when iterating using ENGINE_get_next + * as it will automatically decrement the structural reference count of the + * "current" ENGINE and increment the structural reference count of the + * ENGINE it returns (unless it is NULL). + */ + +/* Get the first/last "ENGINE" type available. */ +ENGINE *ENGINE_get_first(void); +ENGINE *ENGINE_get_last(void); +/* Iterate to the next/previous "ENGINE" type (NULL = end of the list). */ +ENGINE *ENGINE_get_next(ENGINE *e); +ENGINE *ENGINE_get_prev(ENGINE *e); +/* Add another "ENGINE" type into the array. */ +int ENGINE_add(ENGINE *e); +/* Remove an existing "ENGINE" type from the array. */ +int ENGINE_remove(ENGINE *e); +/* Retrieve an engine from the list by its unique "id" value. */ +ENGINE *ENGINE_by_id(const char *id); + +#if OPENSSL_API_COMPAT < 0x10100000L +# define ENGINE_load_openssl() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_OPENSSL, NULL) +# define ENGINE_load_dynamic() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_DYNAMIC, NULL) +# ifndef OPENSSL_NO_STATIC_ENGINE +# define ENGINE_load_padlock() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_PADLOCK, NULL) +# define ENGINE_load_capi() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_CAPI, NULL) +# define ENGINE_load_afalg() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_AFALG, NULL) +# endif +# define ENGINE_load_cryptodev() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_CRYPTODEV, NULL) +# define ENGINE_load_rdrand() \ + OPENSSL_init_crypto(OPENSSL_INIT_ENGINE_RDRAND, NULL) +#endif +void ENGINE_load_builtin_engines(void); + +/* + * Get and set global flags (ENGINE_TABLE_FLAG_***) for the implementation + * "registry" handling. + */ +unsigned int ENGINE_get_table_flags(void); +void ENGINE_set_table_flags(unsigned int flags); + +/*- Manage registration of ENGINEs per "table". For each type, there are 3 + * functions; + * ENGINE_register_***(e) - registers the implementation from 'e' (if it has one) + * ENGINE_unregister_***(e) - unregister the implementation from 'e' + * ENGINE_register_all_***() - call ENGINE_register_***() for each 'e' in the list + * Cleanup is automatically registered from each table when required. + */ + +int ENGINE_register_RSA(ENGINE *e); +void ENGINE_unregister_RSA(ENGINE *e); +void ENGINE_register_all_RSA(void); + +int ENGINE_register_DSA(ENGINE *e); +void ENGINE_unregister_DSA(ENGINE *e); +void ENGINE_register_all_DSA(void); + +int ENGINE_register_EC(ENGINE *e); +void ENGINE_unregister_EC(ENGINE *e); +void ENGINE_register_all_EC(void); + +int ENGINE_register_DH(ENGINE *e); +void ENGINE_unregister_DH(ENGINE *e); +void ENGINE_register_all_DH(void); + +int ENGINE_register_RAND(ENGINE *e); +void ENGINE_unregister_RAND(ENGINE *e); +void ENGINE_register_all_RAND(void); + +int ENGINE_register_ciphers(ENGINE *e); +void ENGINE_unregister_ciphers(ENGINE *e); +void ENGINE_register_all_ciphers(void); + +int ENGINE_register_digests(ENGINE *e); +void ENGINE_unregister_digests(ENGINE *e); +void ENGINE_register_all_digests(void); + +int ENGINE_register_pkey_meths(ENGINE *e); +void ENGINE_unregister_pkey_meths(ENGINE *e); +void ENGINE_register_all_pkey_meths(void); + +int ENGINE_register_pkey_asn1_meths(ENGINE *e); +void ENGINE_unregister_pkey_asn1_meths(ENGINE *e); +void ENGINE_register_all_pkey_asn1_meths(void); + +/* + * These functions register all support from the above categories. Note, use + * of these functions can result in static linkage of code your application + * may not need. If you only need a subset of functionality, consider using + * more selective initialisation. + */ +int ENGINE_register_complete(ENGINE *e); +int ENGINE_register_all_complete(void); + +/* + * Send parameterised control commands to the engine. The possibilities to + * send down an integer, a pointer to data or a function pointer are + * provided. Any of the parameters may or may not be NULL, depending on the + * command number. In actuality, this function only requires a structural + * (rather than functional) reference to an engine, but many control commands + * may require the engine be functional. The caller should be aware of trying + * commands that require an operational ENGINE, and only use functional + * references in such situations. + */ +int ENGINE_ctrl(ENGINE *e, int cmd, long i, void *p, void (*f) (void)); + +/* + * This function tests if an ENGINE-specific command is usable as a + * "setting". Eg. in an application's config file that gets processed through + * ENGINE_ctrl_cmd_string(). If this returns zero, it is not available to + * ENGINE_ctrl_cmd_string(), only ENGINE_ctrl(). + */ +int ENGINE_cmd_is_executable(ENGINE *e, int cmd); + +/* + * This function works like ENGINE_ctrl() with the exception of taking a + * command name instead of a command number, and can handle optional + * commands. See the comment on ENGINE_ctrl_cmd_string() for an explanation + * on how to use the cmd_name and cmd_optional. + */ +int ENGINE_ctrl_cmd(ENGINE *e, const char *cmd_name, + long i, void *p, void (*f) (void), int cmd_optional); + +/* + * This function passes a command-name and argument to an ENGINE. The + * cmd_name is converted to a command number and the control command is + * called using 'arg' as an argument (unless the ENGINE doesn't support such + * a command, in which case no control command is called). The command is + * checked for input flags, and if necessary the argument will be converted + * to a numeric value. If cmd_optional is non-zero, then if the ENGINE + * doesn't support the given cmd_name the return value will be success + * anyway. This function is intended for applications to use so that users + * (or config files) can supply engine-specific config data to the ENGINE at + * run-time to control behaviour of specific engines. As such, it shouldn't + * be used for calling ENGINE_ctrl() functions that return data, deal with + * binary data, or that are otherwise supposed to be used directly through + * ENGINE_ctrl() in application code. Any "return" data from an ENGINE_ctrl() + * operation in this function will be lost - the return value is interpreted + * as failure if the return value is zero, success otherwise, and this + * function returns a boolean value as a result. In other words, vendors of + * 'ENGINE'-enabled devices should write ENGINE implementations with + * parameterisations that work in this scheme, so that compliant ENGINE-based + * applications can work consistently with the same configuration for the + * same ENGINE-enabled devices, across applications. + */ +int ENGINE_ctrl_cmd_string(ENGINE *e, const char *cmd_name, const char *arg, + int cmd_optional); + +/* + * These functions are useful for manufacturing new ENGINE structures. They + * don't address reference counting at all - one uses them to populate an + * ENGINE structure with personalised implementations of things prior to + * using it directly or adding it to the builtin ENGINE list in OpenSSL. + * These are also here so that the ENGINE structure doesn't have to be + * exposed and break binary compatibility! + */ +ENGINE *ENGINE_new(void); +int ENGINE_free(ENGINE *e); +int ENGINE_up_ref(ENGINE *e); +int ENGINE_set_id(ENGINE *e, const char *id); +int ENGINE_set_name(ENGINE *e, const char *name); +int ENGINE_set_RSA(ENGINE *e, const RSA_METHOD *rsa_meth); +int ENGINE_set_DSA(ENGINE *e, const DSA_METHOD *dsa_meth); +int ENGINE_set_EC(ENGINE *e, const EC_KEY_METHOD *ecdsa_meth); +int ENGINE_set_DH(ENGINE *e, const DH_METHOD *dh_meth); +int ENGINE_set_RAND(ENGINE *e, const RAND_METHOD *rand_meth); +int ENGINE_set_destroy_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR destroy_f); +int ENGINE_set_init_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR init_f); +int ENGINE_set_finish_function(ENGINE *e, ENGINE_GEN_INT_FUNC_PTR finish_f); +int ENGINE_set_ctrl_function(ENGINE *e, ENGINE_CTRL_FUNC_PTR ctrl_f); +int ENGINE_set_load_privkey_function(ENGINE *e, + ENGINE_LOAD_KEY_PTR loadpriv_f); +int ENGINE_set_load_pubkey_function(ENGINE *e, ENGINE_LOAD_KEY_PTR loadpub_f); +int ENGINE_set_load_ssl_client_cert_function(ENGINE *e, + ENGINE_SSL_CLIENT_CERT_PTR + loadssl_f); +int ENGINE_set_ciphers(ENGINE *e, ENGINE_CIPHERS_PTR f); +int ENGINE_set_digests(ENGINE *e, ENGINE_DIGESTS_PTR f); +int ENGINE_set_pkey_meths(ENGINE *e, ENGINE_PKEY_METHS_PTR f); +int ENGINE_set_pkey_asn1_meths(ENGINE *e, ENGINE_PKEY_ASN1_METHS_PTR f); +int ENGINE_set_flags(ENGINE *e, int flags); +int ENGINE_set_cmd_defns(ENGINE *e, const ENGINE_CMD_DEFN *defns); +/* These functions allow control over any per-structure ENGINE data. */ +#define ENGINE_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_ENGINE, l, p, newf, dupf, freef) +int ENGINE_set_ex_data(ENGINE *e, int idx, void *arg); +void *ENGINE_get_ex_data(const ENGINE *e, int idx); + +#if OPENSSL_API_COMPAT < 0x10100000L +/* + * This function previously cleaned up anything that needs it. Auto-deinit will + * now take care of it so it is no longer required to call this function. + */ +# define ENGINE_cleanup() while(0) continue +#endif + +/* + * These return values from within the ENGINE structure. These can be useful + * with functional references as well as structural references - it depends + * which you obtained. Using the result for functional purposes if you only + * obtained a structural reference may be problematic! + */ +const char *ENGINE_get_id(const ENGINE *e); +const char *ENGINE_get_name(const ENGINE *e); +const RSA_METHOD *ENGINE_get_RSA(const ENGINE *e); +const DSA_METHOD *ENGINE_get_DSA(const ENGINE *e); +const EC_KEY_METHOD *ENGINE_get_EC(const ENGINE *e); +const DH_METHOD *ENGINE_get_DH(const ENGINE *e); +const RAND_METHOD *ENGINE_get_RAND(const ENGINE *e); +ENGINE_GEN_INT_FUNC_PTR ENGINE_get_destroy_function(const ENGINE *e); +ENGINE_GEN_INT_FUNC_PTR ENGINE_get_init_function(const ENGINE *e); +ENGINE_GEN_INT_FUNC_PTR ENGINE_get_finish_function(const ENGINE *e); +ENGINE_CTRL_FUNC_PTR ENGINE_get_ctrl_function(const ENGINE *e); +ENGINE_LOAD_KEY_PTR ENGINE_get_load_privkey_function(const ENGINE *e); +ENGINE_LOAD_KEY_PTR ENGINE_get_load_pubkey_function(const ENGINE *e); +ENGINE_SSL_CLIENT_CERT_PTR ENGINE_get_ssl_client_cert_function(const ENGINE + *e); +ENGINE_CIPHERS_PTR ENGINE_get_ciphers(const ENGINE *e); +ENGINE_DIGESTS_PTR ENGINE_get_digests(const ENGINE *e); +ENGINE_PKEY_METHS_PTR ENGINE_get_pkey_meths(const ENGINE *e); +ENGINE_PKEY_ASN1_METHS_PTR ENGINE_get_pkey_asn1_meths(const ENGINE *e); +const EVP_CIPHER *ENGINE_get_cipher(ENGINE *e, int nid); +const EVP_MD *ENGINE_get_digest(ENGINE *e, int nid); +const EVP_PKEY_METHOD *ENGINE_get_pkey_meth(ENGINE *e, int nid); +const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth(ENGINE *e, int nid); +const EVP_PKEY_ASN1_METHOD *ENGINE_get_pkey_asn1_meth_str(ENGINE *e, + const char *str, + int len); +const EVP_PKEY_ASN1_METHOD *ENGINE_pkey_asn1_find_str(ENGINE **pe, + const char *str, + int len); +const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *e); +int ENGINE_get_flags(const ENGINE *e); + +/* + * FUNCTIONAL functions. These functions deal with ENGINE structures that + * have (or will) be initialised for use. Broadly speaking, the structural + * functions are useful for iterating the list of available engine types, + * creating new engine types, and other "list" operations. These functions + * actually deal with ENGINEs that are to be used. As such these functions + * can fail (if applicable) when particular engines are unavailable - eg. if + * a hardware accelerator is not attached or not functioning correctly. Each + * ENGINE has 2 reference counts; structural and functional. Every time a + * functional reference is obtained or released, a corresponding structural + * reference is automatically obtained or released too. + */ + +/* + * Initialise a engine type for use (or up its reference count if it's + * already in use). This will fail if the engine is not currently operational + * and cannot initialise. + */ +int ENGINE_init(ENGINE *e); +/* + * Free a functional reference to a engine type. This does not require a + * corresponding call to ENGINE_free as it also releases a structural + * reference. + */ +int ENGINE_finish(ENGINE *e); + +/* + * The following functions handle keys that are stored in some secondary + * location, handled by the engine. The storage may be on a card or + * whatever. + */ +EVP_PKEY *ENGINE_load_private_key(ENGINE *e, const char *key_id, + UI_METHOD *ui_method, void *callback_data); +EVP_PKEY *ENGINE_load_public_key(ENGINE *e, const char *key_id, + UI_METHOD *ui_method, void *callback_data); +int ENGINE_load_ssl_client_cert(ENGINE *e, SSL *s, + STACK_OF(X509_NAME) *ca_dn, X509 **pcert, + EVP_PKEY **ppkey, STACK_OF(X509) **pother, + UI_METHOD *ui_method, void *callback_data); + +/* + * This returns a pointer for the current ENGINE structure that is (by + * default) performing any RSA operations. The value returned is an + * incremented reference, so it should be free'd (ENGINE_finish) before it is + * discarded. + */ +ENGINE *ENGINE_get_default_RSA(void); +/* Same for the other "methods" */ +ENGINE *ENGINE_get_default_DSA(void); +ENGINE *ENGINE_get_default_EC(void); +ENGINE *ENGINE_get_default_DH(void); +ENGINE *ENGINE_get_default_RAND(void); +/* + * These functions can be used to get a functional reference to perform + * ciphering or digesting corresponding to "nid". + */ +ENGINE *ENGINE_get_cipher_engine(int nid); +ENGINE *ENGINE_get_digest_engine(int nid); +ENGINE *ENGINE_get_pkey_meth_engine(int nid); +ENGINE *ENGINE_get_pkey_asn1_meth_engine(int nid); + +/* + * This sets a new default ENGINE structure for performing RSA operations. If + * the result is non-zero (success) then the ENGINE structure will have had + * its reference count up'd so the caller should still free their own + * reference 'e'. + */ +int ENGINE_set_default_RSA(ENGINE *e); +int ENGINE_set_default_string(ENGINE *e, const char *def_list); +/* Same for the other "methods" */ +int ENGINE_set_default_DSA(ENGINE *e); +int ENGINE_set_default_EC(ENGINE *e); +int ENGINE_set_default_DH(ENGINE *e); +int ENGINE_set_default_RAND(ENGINE *e); +int ENGINE_set_default_ciphers(ENGINE *e); +int ENGINE_set_default_digests(ENGINE *e); +int ENGINE_set_default_pkey_meths(ENGINE *e); +int ENGINE_set_default_pkey_asn1_meths(ENGINE *e); + +/* + * The combination "set" - the flags are bitwise "OR"d from the + * ENGINE_METHOD_*** defines above. As with the "ENGINE_register_complete()" + * function, this function can result in unnecessary static linkage. If your + * application requires only specific functionality, consider using more + * selective functions. + */ +int ENGINE_set_default(ENGINE *e, unsigned int flags); + +void ENGINE_add_conf_module(void); + +/* Deprecated functions ... */ +/* int ENGINE_clear_defaults(void); */ + +/**************************/ +/* DYNAMIC ENGINE SUPPORT */ +/**************************/ + +/* Binary/behaviour compatibility levels */ +# define OSSL_DYNAMIC_VERSION (unsigned long)0x00030000 +/* + * Binary versions older than this are too old for us (whether we're a loader + * or a loadee) + */ +# define OSSL_DYNAMIC_OLDEST (unsigned long)0x00030000 + +/* + * When compiling an ENGINE entirely as an external shared library, loadable + * by the "dynamic" ENGINE, these types are needed. The 'dynamic_fns' + * structure type provides the calling application's (or library's) error + * functionality and memory management function pointers to the loaded + * library. These should be used/set in the loaded library code so that the + * loading application's 'state' will be used/changed in all operations. The + * 'static_state' pointer allows the loaded library to know if it shares the + * same static data as the calling application (or library), and thus whether + * these callbacks need to be set or not. + */ +typedef void *(*dyn_MEM_malloc_fn) (size_t, const char *, int); +typedef void *(*dyn_MEM_realloc_fn) (void *, size_t, const char *, int); +typedef void (*dyn_MEM_free_fn) (void *, const char *, int); +typedef struct st_dynamic_MEM_fns { + dyn_MEM_malloc_fn malloc_fn; + dyn_MEM_realloc_fn realloc_fn; + dyn_MEM_free_fn free_fn; +} dynamic_MEM_fns; +/* + * FIXME: Perhaps the memory and locking code (crypto.h) should declare and + * use these types so we (and any other dependent code) can simplify a bit?? + */ +/* The top-level structure */ +typedef struct st_dynamic_fns { + void *static_state; + dynamic_MEM_fns mem_fns; +} dynamic_fns; + +/* + * The version checking function should be of this prototype. NB: The + * ossl_version value passed in is the OSSL_DYNAMIC_VERSION of the loading + * code. If this function returns zero, it indicates a (potential) version + * incompatibility and the loaded library doesn't believe it can proceed. + * Otherwise, the returned value is the (latest) version supported by the + * loading library. The loader may still decide that the loaded code's + * version is unsatisfactory and could veto the load. The function is + * expected to be implemented with the symbol name "v_check", and a default + * implementation can be fully instantiated with + * IMPLEMENT_DYNAMIC_CHECK_FN(). + */ +typedef unsigned long (*dynamic_v_check_fn) (unsigned long ossl_version); +# define IMPLEMENT_DYNAMIC_CHECK_FN() \ + OPENSSL_EXPORT unsigned long v_check(unsigned long v); \ + OPENSSL_EXPORT unsigned long v_check(unsigned long v) { \ + if (v >= OSSL_DYNAMIC_OLDEST) return OSSL_DYNAMIC_VERSION; \ + return 0; } + +/* + * This function is passed the ENGINE structure to initialise with its own + * function and command settings. It should not adjust the structural or + * functional reference counts. If this function returns zero, (a) the load + * will be aborted, (b) the previous ENGINE state will be memcpy'd back onto + * the structure, and (c) the shared library will be unloaded. So + * implementations should do their own internal cleanup in failure + * circumstances otherwise they could leak. The 'id' parameter, if non-NULL, + * represents the ENGINE id that the loader is looking for. If this is NULL, + * the shared library can choose to return failure or to initialise a + * 'default' ENGINE. If non-NULL, the shared library must initialise only an + * ENGINE matching the passed 'id'. The function is expected to be + * implemented with the symbol name "bind_engine". A standard implementation + * can be instantiated with IMPLEMENT_DYNAMIC_BIND_FN(fn) where the parameter + * 'fn' is a callback function that populates the ENGINE structure and + * returns an int value (zero for failure). 'fn' should have prototype; + * [static] int fn(ENGINE *e, const char *id); + */ +typedef int (*dynamic_bind_engine) (ENGINE *e, const char *id, + const dynamic_fns *fns); +# define IMPLEMENT_DYNAMIC_BIND_FN(fn) \ + OPENSSL_EXPORT \ + int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns); \ + OPENSSL_EXPORT \ + int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) { \ + if (ENGINE_get_static_state() == fns->static_state) goto skip_cbs; \ + CRYPTO_set_mem_functions(fns->mem_fns.malloc_fn, \ + fns->mem_fns.realloc_fn, \ + fns->mem_fns.free_fn); \ + OPENSSL_init_crypto(OPENSSL_INIT_NO_ATEXIT, NULL); \ + skip_cbs: \ + if (!fn(e, id)) return 0; \ + return 1; } + +/* + * If the loading application (or library) and the loaded ENGINE library + * share the same static data (eg. they're both dynamically linked to the + * same libcrypto.so) we need a way to avoid trying to set system callbacks - + * this would fail, and for the same reason that it's unnecessary to try. If + * the loaded ENGINE has (or gets from through the loader) its own copy of + * the libcrypto static data, we will need to set the callbacks. The easiest + * way to detect this is to have a function that returns a pointer to some + * static data and let the loading application and loaded ENGINE compare + * their respective values. + */ +void *ENGINE_get_static_state(void); + +# if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) +DEPRECATEDIN_1_1_0(void ENGINE_setup_bsd_cryptodev(void)) +# endif + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/engineerr.h b/third_party/openssl/uos/sw_64/include/openssl/engineerr.h new file mode 100644 index 00000000..05e84bd2 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/engineerr.h @@ -0,0 +1,111 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ENGINEERR_H +# define HEADER_ENGINEERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_ENGINE + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_ENGINE_strings(void); + +/* + * ENGINE function codes. + */ +# define ENGINE_F_DIGEST_UPDATE 198 +# define ENGINE_F_DYNAMIC_CTRL 180 +# define ENGINE_F_DYNAMIC_GET_DATA_CTX 181 +# define ENGINE_F_DYNAMIC_LOAD 182 +# define ENGINE_F_DYNAMIC_SET_DATA_CTX 183 +# define ENGINE_F_ENGINE_ADD 105 +# define ENGINE_F_ENGINE_BY_ID 106 +# define ENGINE_F_ENGINE_CMD_IS_EXECUTABLE 170 +# define ENGINE_F_ENGINE_CTRL 142 +# define ENGINE_F_ENGINE_CTRL_CMD 178 +# define ENGINE_F_ENGINE_CTRL_CMD_STRING 171 +# define ENGINE_F_ENGINE_FINISH 107 +# define ENGINE_F_ENGINE_GET_CIPHER 185 +# define ENGINE_F_ENGINE_GET_DIGEST 186 +# define ENGINE_F_ENGINE_GET_FIRST 195 +# define ENGINE_F_ENGINE_GET_LAST 196 +# define ENGINE_F_ENGINE_GET_NEXT 115 +# define ENGINE_F_ENGINE_GET_PKEY_ASN1_METH 193 +# define ENGINE_F_ENGINE_GET_PKEY_METH 192 +# define ENGINE_F_ENGINE_GET_PREV 116 +# define ENGINE_F_ENGINE_INIT 119 +# define ENGINE_F_ENGINE_LIST_ADD 120 +# define ENGINE_F_ENGINE_LIST_REMOVE 121 +# define ENGINE_F_ENGINE_LOAD_PRIVATE_KEY 150 +# define ENGINE_F_ENGINE_LOAD_PUBLIC_KEY 151 +# define ENGINE_F_ENGINE_LOAD_SSL_CLIENT_CERT 194 +# define ENGINE_F_ENGINE_NEW 122 +# define ENGINE_F_ENGINE_PKEY_ASN1_FIND_STR 197 +# define ENGINE_F_ENGINE_REMOVE 123 +# define ENGINE_F_ENGINE_SET_DEFAULT_STRING 189 +# define ENGINE_F_ENGINE_SET_ID 129 +# define ENGINE_F_ENGINE_SET_NAME 130 +# define ENGINE_F_ENGINE_TABLE_REGISTER 184 +# define ENGINE_F_ENGINE_UNLOCKED_FINISH 191 +# define ENGINE_F_ENGINE_UP_REF 190 +# define ENGINE_F_INT_CLEANUP_ITEM 199 +# define ENGINE_F_INT_CTRL_HELPER 172 +# define ENGINE_F_INT_ENGINE_CONFIGURE 188 +# define ENGINE_F_INT_ENGINE_MODULE_INIT 187 +# define ENGINE_F_OSSL_HMAC_INIT 200 + +/* + * ENGINE reason codes. + */ +# define ENGINE_R_ALREADY_LOADED 100 +# define ENGINE_R_ARGUMENT_IS_NOT_A_NUMBER 133 +# define ENGINE_R_CMD_NOT_EXECUTABLE 134 +# define ENGINE_R_COMMAND_TAKES_INPUT 135 +# define ENGINE_R_COMMAND_TAKES_NO_INPUT 136 +# define ENGINE_R_CONFLICTING_ENGINE_ID 103 +# define ENGINE_R_CTRL_COMMAND_NOT_IMPLEMENTED 119 +# define ENGINE_R_DSO_FAILURE 104 +# define ENGINE_R_DSO_NOT_FOUND 132 +# define ENGINE_R_ENGINES_SECTION_ERROR 148 +# define ENGINE_R_ENGINE_CONFIGURATION_ERROR 102 +# define ENGINE_R_ENGINE_IS_NOT_IN_LIST 105 +# define ENGINE_R_ENGINE_SECTION_ERROR 149 +# define ENGINE_R_FAILED_LOADING_PRIVATE_KEY 128 +# define ENGINE_R_FAILED_LOADING_PUBLIC_KEY 129 +# define ENGINE_R_FINISH_FAILED 106 +# define ENGINE_R_ID_OR_NAME_MISSING 108 +# define ENGINE_R_INIT_FAILED 109 +# define ENGINE_R_INTERNAL_LIST_ERROR 110 +# define ENGINE_R_INVALID_ARGUMENT 143 +# define ENGINE_R_INVALID_CMD_NAME 137 +# define ENGINE_R_INVALID_CMD_NUMBER 138 +# define ENGINE_R_INVALID_INIT_VALUE 151 +# define ENGINE_R_INVALID_STRING 150 +# define ENGINE_R_NOT_INITIALISED 117 +# define ENGINE_R_NOT_LOADED 112 +# define ENGINE_R_NO_CONTROL_FUNCTION 120 +# define ENGINE_R_NO_INDEX 144 +# define ENGINE_R_NO_LOAD_FUNCTION 125 +# define ENGINE_R_NO_REFERENCE 130 +# define ENGINE_R_NO_SUCH_ENGINE 116 +# define ENGINE_R_UNIMPLEMENTED_CIPHER 146 +# define ENGINE_R_UNIMPLEMENTED_DIGEST 147 +# define ENGINE_R_UNIMPLEMENTED_PUBLIC_KEY_METHOD 101 +# define ENGINE_R_VERSION_INCOMPATIBILITY 145 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/err.h b/third_party/openssl/uos/sw_64/include/openssl/err.h new file mode 100644 index 00000000..b49f8812 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/err.h @@ -0,0 +1,274 @@ +/* + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ERR_H +# define HEADER_ERR_H + +# include + +# ifndef OPENSSL_NO_STDIO +# include +# include +# endif + +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# ifndef OPENSSL_NO_ERR +# define ERR_PUT_error(a,b,c,d,e) ERR_put_error(a,b,c,d,e) +# else +# define ERR_PUT_error(a,b,c,d,e) ERR_put_error(a,b,c,NULL,0) +# endif + +# include + +# define ERR_TXT_MALLOCED 0x01 +# define ERR_TXT_STRING 0x02 + +# define ERR_FLAG_MARK 0x01 +# define ERR_FLAG_CLEAR 0x02 + +# define ERR_NUM_ERRORS 16 +typedef struct err_state_st { + int err_flags[ERR_NUM_ERRORS]; + unsigned long err_buffer[ERR_NUM_ERRORS]; + char *err_data[ERR_NUM_ERRORS]; + int err_data_flags[ERR_NUM_ERRORS]; + const char *err_file[ERR_NUM_ERRORS]; + int err_line[ERR_NUM_ERRORS]; + int top, bottom; +} ERR_STATE; + +/* library */ +# define ERR_LIB_NONE 1 +# define ERR_LIB_SYS 2 +# define ERR_LIB_BN 3 +# define ERR_LIB_RSA 4 +# define ERR_LIB_DH 5 +# define ERR_LIB_EVP 6 +# define ERR_LIB_BUF 7 +# define ERR_LIB_OBJ 8 +# define ERR_LIB_PEM 9 +# define ERR_LIB_DSA 10 +# define ERR_LIB_X509 11 +/* #define ERR_LIB_METH 12 */ +# define ERR_LIB_ASN1 13 +# define ERR_LIB_CONF 14 +# define ERR_LIB_CRYPTO 15 +# define ERR_LIB_EC 16 +# define ERR_LIB_SSL 20 +/* #define ERR_LIB_SSL23 21 */ +/* #define ERR_LIB_SSL2 22 */ +/* #define ERR_LIB_SSL3 23 */ +/* #define ERR_LIB_RSAREF 30 */ +/* #define ERR_LIB_PROXY 31 */ +# define ERR_LIB_BIO 32 +# define ERR_LIB_PKCS7 33 +# define ERR_LIB_X509V3 34 +# define ERR_LIB_PKCS12 35 +# define ERR_LIB_RAND 36 +# define ERR_LIB_DSO 37 +# define ERR_LIB_ENGINE 38 +# define ERR_LIB_OCSP 39 +# define ERR_LIB_UI 40 +# define ERR_LIB_COMP 41 +# define ERR_LIB_ECDSA 42 +# define ERR_LIB_ECDH 43 +# define ERR_LIB_OSSL_STORE 44 +# define ERR_LIB_FIPS 45 +# define ERR_LIB_CMS 46 +# define ERR_LIB_TS 47 +# define ERR_LIB_HMAC 48 +/* # define ERR_LIB_JPAKE 49 */ +# define ERR_LIB_CT 50 +# define ERR_LIB_ASYNC 51 +# define ERR_LIB_KDF 52 +# define ERR_LIB_SM2 53 + +# define ERR_LIB_USER 128 + +# define SYSerr(f,r) ERR_PUT_error(ERR_LIB_SYS,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define BNerr(f,r) ERR_PUT_error(ERR_LIB_BN,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define RSAerr(f,r) ERR_PUT_error(ERR_LIB_RSA,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define DHerr(f,r) ERR_PUT_error(ERR_LIB_DH,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define EVPerr(f,r) ERR_PUT_error(ERR_LIB_EVP,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define BUFerr(f,r) ERR_PUT_error(ERR_LIB_BUF,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define OBJerr(f,r) ERR_PUT_error(ERR_LIB_OBJ,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define PEMerr(f,r) ERR_PUT_error(ERR_LIB_PEM,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define DSAerr(f,r) ERR_PUT_error(ERR_LIB_DSA,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define X509err(f,r) ERR_PUT_error(ERR_LIB_X509,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ASN1err(f,r) ERR_PUT_error(ERR_LIB_ASN1,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define CONFerr(f,r) ERR_PUT_error(ERR_LIB_CONF,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define CRYPTOerr(f,r) ERR_PUT_error(ERR_LIB_CRYPTO,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ECerr(f,r) ERR_PUT_error(ERR_LIB_EC,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define SSLerr(f,r) ERR_PUT_error(ERR_LIB_SSL,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define BIOerr(f,r) ERR_PUT_error(ERR_LIB_BIO,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define PKCS7err(f,r) ERR_PUT_error(ERR_LIB_PKCS7,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define X509V3err(f,r) ERR_PUT_error(ERR_LIB_X509V3,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define PKCS12err(f,r) ERR_PUT_error(ERR_LIB_PKCS12,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define RANDerr(f,r) ERR_PUT_error(ERR_LIB_RAND,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define DSOerr(f,r) ERR_PUT_error(ERR_LIB_DSO,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ENGINEerr(f,r) ERR_PUT_error(ERR_LIB_ENGINE,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define OCSPerr(f,r) ERR_PUT_error(ERR_LIB_OCSP,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define UIerr(f,r) ERR_PUT_error(ERR_LIB_UI,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define COMPerr(f,r) ERR_PUT_error(ERR_LIB_COMP,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ECDSAerr(f,r) ERR_PUT_error(ERR_LIB_ECDSA,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ECDHerr(f,r) ERR_PUT_error(ERR_LIB_ECDH,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define OSSL_STOREerr(f,r) ERR_PUT_error(ERR_LIB_OSSL_STORE,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define FIPSerr(f,r) ERR_PUT_error(ERR_LIB_FIPS,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define CMSerr(f,r) ERR_PUT_error(ERR_LIB_CMS,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define TSerr(f,r) ERR_PUT_error(ERR_LIB_TS,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define HMACerr(f,r) ERR_PUT_error(ERR_LIB_HMAC,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define CTerr(f,r) ERR_PUT_error(ERR_LIB_CT,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define ASYNCerr(f,r) ERR_PUT_error(ERR_LIB_ASYNC,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define KDFerr(f,r) ERR_PUT_error(ERR_LIB_KDF,(f),(r),OPENSSL_FILE,OPENSSL_LINE) +# define SM2err(f,r) ERR_PUT_error(ERR_LIB_SM2,(f),(r),OPENSSL_FILE,OPENSSL_LINE) + +# define ERR_PACK(l,f,r) ( \ + (((unsigned int)(l) & 0x0FF) << 24L) | \ + (((unsigned int)(f) & 0xFFF) << 12L) | \ + (((unsigned int)(r) & 0xFFF) ) ) +# define ERR_GET_LIB(l) (int)(((l) >> 24L) & 0x0FFL) +# define ERR_GET_FUNC(l) (int)(((l) >> 12L) & 0xFFFL) +# define ERR_GET_REASON(l) (int)( (l) & 0xFFFL) +# define ERR_FATAL_ERROR(l) (int)( (l) & ERR_R_FATAL) + +/* OS functions */ +# define SYS_F_FOPEN 1 +# define SYS_F_CONNECT 2 +# define SYS_F_GETSERVBYNAME 3 +# define SYS_F_SOCKET 4 +# define SYS_F_IOCTLSOCKET 5 +# define SYS_F_BIND 6 +# define SYS_F_LISTEN 7 +# define SYS_F_ACCEPT 8 +# define SYS_F_WSASTARTUP 9/* Winsock stuff */ +# define SYS_F_OPENDIR 10 +# define SYS_F_FREAD 11 +# define SYS_F_GETADDRINFO 12 +# define SYS_F_GETNAMEINFO 13 +# define SYS_F_SETSOCKOPT 14 +# define SYS_F_GETSOCKOPT 15 +# define SYS_F_GETSOCKNAME 16 +# define SYS_F_GETHOSTBYNAME 17 +# define SYS_F_FFLUSH 18 +# define SYS_F_OPEN 19 +# define SYS_F_CLOSE 20 +# define SYS_F_IOCTL 21 +# define SYS_F_STAT 22 +# define SYS_F_FCNTL 23 +# define SYS_F_FSTAT 24 + +/* reasons */ +# define ERR_R_SYS_LIB ERR_LIB_SYS/* 2 */ +# define ERR_R_BN_LIB ERR_LIB_BN/* 3 */ +# define ERR_R_RSA_LIB ERR_LIB_RSA/* 4 */ +# define ERR_R_DH_LIB ERR_LIB_DH/* 5 */ +# define ERR_R_EVP_LIB ERR_LIB_EVP/* 6 */ +# define ERR_R_BUF_LIB ERR_LIB_BUF/* 7 */ +# define ERR_R_OBJ_LIB ERR_LIB_OBJ/* 8 */ +# define ERR_R_PEM_LIB ERR_LIB_PEM/* 9 */ +# define ERR_R_DSA_LIB ERR_LIB_DSA/* 10 */ +# define ERR_R_X509_LIB ERR_LIB_X509/* 11 */ +# define ERR_R_ASN1_LIB ERR_LIB_ASN1/* 13 */ +# define ERR_R_EC_LIB ERR_LIB_EC/* 16 */ +# define ERR_R_BIO_LIB ERR_LIB_BIO/* 32 */ +# define ERR_R_PKCS7_LIB ERR_LIB_PKCS7/* 33 */ +# define ERR_R_X509V3_LIB ERR_LIB_X509V3/* 34 */ +# define ERR_R_ENGINE_LIB ERR_LIB_ENGINE/* 38 */ +# define ERR_R_UI_LIB ERR_LIB_UI/* 40 */ +# define ERR_R_ECDSA_LIB ERR_LIB_ECDSA/* 42 */ +# define ERR_R_OSSL_STORE_LIB ERR_LIB_OSSL_STORE/* 44 */ + +# define ERR_R_NESTED_ASN1_ERROR 58 +# define ERR_R_MISSING_ASN1_EOS 63 + +/* fatal error */ +# define ERR_R_FATAL 64 +# define ERR_R_MALLOC_FAILURE (1|ERR_R_FATAL) +# define ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED (2|ERR_R_FATAL) +# define ERR_R_PASSED_NULL_PARAMETER (3|ERR_R_FATAL) +# define ERR_R_INTERNAL_ERROR (4|ERR_R_FATAL) +# define ERR_R_DISABLED (5|ERR_R_FATAL) +# define ERR_R_INIT_FAIL (6|ERR_R_FATAL) +# define ERR_R_PASSED_INVALID_ARGUMENT (7) +# define ERR_R_OPERATION_FAIL (8|ERR_R_FATAL) + +/* + * 99 is the maximum possible ERR_R_... code, higher values are reserved for + * the individual libraries + */ + +typedef struct ERR_string_data_st { + unsigned long error; + const char *string; +} ERR_STRING_DATA; + +DEFINE_LHASH_OF(ERR_STRING_DATA); + +void ERR_put_error(int lib, int func, int reason, const char *file, int line); +void ERR_set_error_data(char *data, int flags); + +unsigned long ERR_get_error(void); +unsigned long ERR_get_error_line(const char **file, int *line); +unsigned long ERR_get_error_line_data(const char **file, int *line, + const char **data, int *flags); +unsigned long ERR_peek_error(void); +unsigned long ERR_peek_error_line(const char **file, int *line); +unsigned long ERR_peek_error_line_data(const char **file, int *line, + const char **data, int *flags); +unsigned long ERR_peek_last_error(void); +unsigned long ERR_peek_last_error_line(const char **file, int *line); +unsigned long ERR_peek_last_error_line_data(const char **file, int *line, + const char **data, int *flags); +void ERR_clear_error(void); +char *ERR_error_string(unsigned long e, char *buf); +void ERR_error_string_n(unsigned long e, char *buf, size_t len); +const char *ERR_lib_error_string(unsigned long e); +const char *ERR_func_error_string(unsigned long e); +const char *ERR_reason_error_string(unsigned long e); +void ERR_print_errors_cb(int (*cb) (const char *str, size_t len, void *u), + void *u); +# ifndef OPENSSL_NO_STDIO +void ERR_print_errors_fp(FILE *fp); +# endif +void ERR_print_errors(BIO *bp); +void ERR_add_error_data(int num, ...); +void ERR_add_error_vdata(int num, va_list args); +int ERR_load_strings(int lib, ERR_STRING_DATA *str); +int ERR_load_strings_const(const ERR_STRING_DATA *str); +int ERR_unload_strings(int lib, ERR_STRING_DATA *str); +int ERR_load_ERR_strings(void); + +#if OPENSSL_API_COMPAT < 0x10100000L +# define ERR_load_crypto_strings() \ + OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL) +# define ERR_free_strings() while(0) continue +#endif + +DEPRECATEDIN_1_1_0(void ERR_remove_thread_state(void *)) +DEPRECATEDIN_1_0_0(void ERR_remove_state(unsigned long pid)) +ERR_STATE *ERR_get_state(void); + +int ERR_get_next_error_library(void); + +int ERR_set_mark(void); +int ERR_pop_to_mark(void); +int ERR_clear_last_mark(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/evp.h b/third_party/openssl/uos/sw_64/include/openssl/evp.h new file mode 100644 index 00000000..a411f3f2 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/evp.h @@ -0,0 +1,1666 @@ +/* + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_ENVELOPE_H +# define HEADER_ENVELOPE_H + +# include +# include +# include +# include +# include + +# define EVP_MAX_MD_SIZE 64/* longest known is SHA512 */ +# define EVP_MAX_KEY_LENGTH 64 +# define EVP_MAX_IV_LENGTH 16 +# define EVP_MAX_BLOCK_LENGTH 32 + +# define PKCS5_SALT_LEN 8 +/* Default PKCS#5 iteration count */ +# define PKCS5_DEFAULT_ITER 2048 + +# include + +# define EVP_PK_RSA 0x0001 +# define EVP_PK_DSA 0x0002 +# define EVP_PK_DH 0x0004 +# define EVP_PK_EC 0x0008 +# define EVP_PKT_SIGN 0x0010 +# define EVP_PKT_ENC 0x0020 +# define EVP_PKT_EXCH 0x0040 +# define EVP_PKS_RSA 0x0100 +# define EVP_PKS_DSA 0x0200 +# define EVP_PKS_EC 0x0400 + +# define EVP_PKEY_NONE NID_undef +# define EVP_PKEY_RSA NID_rsaEncryption +# define EVP_PKEY_RSA2 NID_rsa +# define EVP_PKEY_RSA_PSS NID_rsassaPss +# define EVP_PKEY_DSA NID_dsa +# define EVP_PKEY_DSA1 NID_dsa_2 +# define EVP_PKEY_DSA2 NID_dsaWithSHA +# define EVP_PKEY_DSA3 NID_dsaWithSHA1 +# define EVP_PKEY_DSA4 NID_dsaWithSHA1_2 +# define EVP_PKEY_DH NID_dhKeyAgreement +# define EVP_PKEY_DHX NID_dhpublicnumber +# define EVP_PKEY_EC NID_X9_62_id_ecPublicKey +# define EVP_PKEY_SM2 NID_sm2 +# define EVP_PKEY_HMAC NID_hmac +# define EVP_PKEY_CMAC NID_cmac +# define EVP_PKEY_SCRYPT NID_id_scrypt +# define EVP_PKEY_TLS1_PRF NID_tls1_prf +# define EVP_PKEY_HKDF NID_hkdf +# define EVP_PKEY_POLY1305 NID_poly1305 +# define EVP_PKEY_SIPHASH NID_siphash +# define EVP_PKEY_X25519 NID_X25519 +# define EVP_PKEY_ED25519 NID_ED25519 +# define EVP_PKEY_X448 NID_X448 +# define EVP_PKEY_ED448 NID_ED448 + +#ifdef __cplusplus +extern "C" { +#endif + +# define EVP_PKEY_MO_SIGN 0x0001 +# define EVP_PKEY_MO_VERIFY 0x0002 +# define EVP_PKEY_MO_ENCRYPT 0x0004 +# define EVP_PKEY_MO_DECRYPT 0x0008 + +# ifndef EVP_MD +EVP_MD *EVP_MD_meth_new(int md_type, int pkey_type); +EVP_MD *EVP_MD_meth_dup(const EVP_MD *md); +void EVP_MD_meth_free(EVP_MD *md); + +int EVP_MD_meth_set_input_blocksize(EVP_MD *md, int blocksize); +int EVP_MD_meth_set_result_size(EVP_MD *md, int resultsize); +int EVP_MD_meth_set_app_datasize(EVP_MD *md, int datasize); +int EVP_MD_meth_set_flags(EVP_MD *md, unsigned long flags); +int EVP_MD_meth_set_init(EVP_MD *md, int (*init)(EVP_MD_CTX *ctx)); +int EVP_MD_meth_set_update(EVP_MD *md, int (*update)(EVP_MD_CTX *ctx, + const void *data, + size_t count)); +int EVP_MD_meth_set_final(EVP_MD *md, int (*final)(EVP_MD_CTX *ctx, + unsigned char *md)); +int EVP_MD_meth_set_copy(EVP_MD *md, int (*copy)(EVP_MD_CTX *to, + const EVP_MD_CTX *from)); +int EVP_MD_meth_set_cleanup(EVP_MD *md, int (*cleanup)(EVP_MD_CTX *ctx)); +int EVP_MD_meth_set_ctrl(EVP_MD *md, int (*ctrl)(EVP_MD_CTX *ctx, int cmd, + int p1, void *p2)); + +int EVP_MD_meth_get_input_blocksize(const EVP_MD *md); +int EVP_MD_meth_get_result_size(const EVP_MD *md); +int EVP_MD_meth_get_app_datasize(const EVP_MD *md); +unsigned long EVP_MD_meth_get_flags(const EVP_MD *md); +int (*EVP_MD_meth_get_init(const EVP_MD *md))(EVP_MD_CTX *ctx); +int (*EVP_MD_meth_get_update(const EVP_MD *md))(EVP_MD_CTX *ctx, + const void *data, + size_t count); +int (*EVP_MD_meth_get_final(const EVP_MD *md))(EVP_MD_CTX *ctx, + unsigned char *md); +int (*EVP_MD_meth_get_copy(const EVP_MD *md))(EVP_MD_CTX *to, + const EVP_MD_CTX *from); +int (*EVP_MD_meth_get_cleanup(const EVP_MD *md))(EVP_MD_CTX *ctx); +int (*EVP_MD_meth_get_ctrl(const EVP_MD *md))(EVP_MD_CTX *ctx, int cmd, + int p1, void *p2); + +/* digest can only handle a single block */ +# define EVP_MD_FLAG_ONESHOT 0x0001 + +/* digest is extensible-output function, XOF */ +# define EVP_MD_FLAG_XOF 0x0002 + +/* DigestAlgorithmIdentifier flags... */ + +# define EVP_MD_FLAG_DIGALGID_MASK 0x0018 + +/* NULL or absent parameter accepted. Use NULL */ + +# define EVP_MD_FLAG_DIGALGID_NULL 0x0000 + +/* NULL or absent parameter accepted. Use NULL for PKCS#1 otherwise absent */ + +# define EVP_MD_FLAG_DIGALGID_ABSENT 0x0008 + +/* Custom handling via ctrl */ + +# define EVP_MD_FLAG_DIGALGID_CUSTOM 0x0018 + +/* Note if suitable for use in FIPS mode */ +# define EVP_MD_FLAG_FIPS 0x0400 + +/* Digest ctrls */ + +# define EVP_MD_CTRL_DIGALGID 0x1 +# define EVP_MD_CTRL_MICALG 0x2 +# define EVP_MD_CTRL_XOF_LEN 0x3 + +/* Minimum Algorithm specific ctrl value */ + +# define EVP_MD_CTRL_ALG_CTRL 0x1000 + +# endif /* !EVP_MD */ + +/* values for EVP_MD_CTX flags */ + +# define EVP_MD_CTX_FLAG_ONESHOT 0x0001/* digest update will be + * called once only */ +# define EVP_MD_CTX_FLAG_CLEANED 0x0002/* context has already been + * cleaned */ +# define EVP_MD_CTX_FLAG_REUSE 0x0004/* Don't free up ctx->md_data + * in EVP_MD_CTX_reset */ +/* + * FIPS and pad options are ignored in 1.0.0, definitions are here so we + * don't accidentally reuse the values for other purposes. + */ + +# define EVP_MD_CTX_FLAG_NON_FIPS_ALLOW 0x0008/* Allow use of non FIPS + * digest in FIPS mode */ + +/* + * The following PAD options are also currently ignored in 1.0.0, digest + * parameters are handled through EVP_DigestSign*() and EVP_DigestVerify*() + * instead. + */ +# define EVP_MD_CTX_FLAG_PAD_MASK 0xF0/* RSA mode to use */ +# define EVP_MD_CTX_FLAG_PAD_PKCS1 0x00/* PKCS#1 v1.5 mode */ +# define EVP_MD_CTX_FLAG_PAD_X931 0x10/* X9.31 mode */ +# define EVP_MD_CTX_FLAG_PAD_PSS 0x20/* PSS mode */ + +# define EVP_MD_CTX_FLAG_NO_INIT 0x0100/* Don't initialize md_data */ +/* + * Some functions such as EVP_DigestSign only finalise copies of internal + * contexts so additional data can be included after the finalisation call. + * This is inefficient if this functionality is not required: it is disabled + * if the following flag is set. + */ +# define EVP_MD_CTX_FLAG_FINALISE 0x0200 +/* NOTE: 0x0400 is reserved for internal usage */ + +EVP_CIPHER *EVP_CIPHER_meth_new(int cipher_type, int block_size, int key_len); +EVP_CIPHER *EVP_CIPHER_meth_dup(const EVP_CIPHER *cipher); +void EVP_CIPHER_meth_free(EVP_CIPHER *cipher); + +int EVP_CIPHER_meth_set_iv_length(EVP_CIPHER *cipher, int iv_len); +int EVP_CIPHER_meth_set_flags(EVP_CIPHER *cipher, unsigned long flags); +int EVP_CIPHER_meth_set_impl_ctx_size(EVP_CIPHER *cipher, int ctx_size); +int EVP_CIPHER_meth_set_init(EVP_CIPHER *cipher, + int (*init) (EVP_CIPHER_CTX *ctx, + const unsigned char *key, + const unsigned char *iv, + int enc)); +int EVP_CIPHER_meth_set_do_cipher(EVP_CIPHER *cipher, + int (*do_cipher) (EVP_CIPHER_CTX *ctx, + unsigned char *out, + const unsigned char *in, + size_t inl)); +int EVP_CIPHER_meth_set_cleanup(EVP_CIPHER *cipher, + int (*cleanup) (EVP_CIPHER_CTX *)); +int EVP_CIPHER_meth_set_set_asn1_params(EVP_CIPHER *cipher, + int (*set_asn1_parameters) (EVP_CIPHER_CTX *, + ASN1_TYPE *)); +int EVP_CIPHER_meth_set_get_asn1_params(EVP_CIPHER *cipher, + int (*get_asn1_parameters) (EVP_CIPHER_CTX *, + ASN1_TYPE *)); +int EVP_CIPHER_meth_set_ctrl(EVP_CIPHER *cipher, + int (*ctrl) (EVP_CIPHER_CTX *, int type, + int arg, void *ptr)); + +int (*EVP_CIPHER_meth_get_init(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *ctx, + const unsigned char *key, + const unsigned char *iv, + int enc); +int (*EVP_CIPHER_meth_get_do_cipher(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *ctx, + unsigned char *out, + const unsigned char *in, + size_t inl); +int (*EVP_CIPHER_meth_get_cleanup(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *); +int (*EVP_CIPHER_meth_get_set_asn1_params(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *, + ASN1_TYPE *); +int (*EVP_CIPHER_meth_get_get_asn1_params(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *, + ASN1_TYPE *); +int (*EVP_CIPHER_meth_get_ctrl(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *, + int type, int arg, + void *ptr); + +/* Values for cipher flags */ + +/* Modes for ciphers */ + +# define EVP_CIPH_STREAM_CIPHER 0x0 +# define EVP_CIPH_ECB_MODE 0x1 +# define EVP_CIPH_CBC_MODE 0x2 +# define EVP_CIPH_CFB_MODE 0x3 +# define EVP_CIPH_OFB_MODE 0x4 +# define EVP_CIPH_CTR_MODE 0x5 +# define EVP_CIPH_GCM_MODE 0x6 +# define EVP_CIPH_CCM_MODE 0x7 +# define EVP_CIPH_XTS_MODE 0x10001 +# define EVP_CIPH_WRAP_MODE 0x10002 +# define EVP_CIPH_OCB_MODE 0x10003 +# define EVP_CIPH_MODE 0xF0007 +/* Set if variable length cipher */ +# define EVP_CIPH_VARIABLE_LENGTH 0x8 +/* Set if the iv handling should be done by the cipher itself */ +# define EVP_CIPH_CUSTOM_IV 0x10 +/* Set if the cipher's init() function should be called if key is NULL */ +# define EVP_CIPH_ALWAYS_CALL_INIT 0x20 +/* Call ctrl() to init cipher parameters */ +# define EVP_CIPH_CTRL_INIT 0x40 +/* Don't use standard key length function */ +# define EVP_CIPH_CUSTOM_KEY_LENGTH 0x80 +/* Don't use standard block padding */ +# define EVP_CIPH_NO_PADDING 0x100 +/* cipher handles random key generation */ +# define EVP_CIPH_RAND_KEY 0x200 +/* cipher has its own additional copying logic */ +# define EVP_CIPH_CUSTOM_COPY 0x400 +/* Don't use standard iv length function */ +# define EVP_CIPH_CUSTOM_IV_LENGTH 0x800 +/* Allow use default ASN1 get/set iv */ +# define EVP_CIPH_FLAG_DEFAULT_ASN1 0x1000 +/* Buffer length in bits not bytes: CFB1 mode only */ +# define EVP_CIPH_FLAG_LENGTH_BITS 0x2000 +/* Note if suitable for use in FIPS mode */ +# define EVP_CIPH_FLAG_FIPS 0x4000 +/* Allow non FIPS cipher in FIPS mode */ +# define EVP_CIPH_FLAG_NON_FIPS_ALLOW 0x8000 +/* + * Cipher handles any and all padding logic as well as finalisation. + */ +# define EVP_CIPH_FLAG_CUSTOM_CIPHER 0x100000 +# define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000 +# define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0x400000 +/* Cipher can handle pipeline operations */ +# define EVP_CIPH_FLAG_PIPELINE 0X800000 + +/* + * Cipher context flag to indicate we can handle wrap mode: if allowed in + * older applications it could overflow buffers. + */ + +# define EVP_CIPHER_CTX_FLAG_WRAP_ALLOW 0x1 + +/* ctrl() values */ + +# define EVP_CTRL_INIT 0x0 +# define EVP_CTRL_SET_KEY_LENGTH 0x1 +# define EVP_CTRL_GET_RC2_KEY_BITS 0x2 +# define EVP_CTRL_SET_RC2_KEY_BITS 0x3 +# define EVP_CTRL_GET_RC5_ROUNDS 0x4 +# define EVP_CTRL_SET_RC5_ROUNDS 0x5 +# define EVP_CTRL_RAND_KEY 0x6 +# define EVP_CTRL_PBE_PRF_NID 0x7 +# define EVP_CTRL_COPY 0x8 +# define EVP_CTRL_AEAD_SET_IVLEN 0x9 +# define EVP_CTRL_AEAD_GET_TAG 0x10 +# define EVP_CTRL_AEAD_SET_TAG 0x11 +# define EVP_CTRL_AEAD_SET_IV_FIXED 0x12 +# define EVP_CTRL_GCM_SET_IVLEN EVP_CTRL_AEAD_SET_IVLEN +# define EVP_CTRL_GCM_GET_TAG EVP_CTRL_AEAD_GET_TAG +# define EVP_CTRL_GCM_SET_TAG EVP_CTRL_AEAD_SET_TAG +# define EVP_CTRL_GCM_SET_IV_FIXED EVP_CTRL_AEAD_SET_IV_FIXED +# define EVP_CTRL_GCM_IV_GEN 0x13 +# define EVP_CTRL_CCM_SET_IVLEN EVP_CTRL_AEAD_SET_IVLEN +# define EVP_CTRL_CCM_GET_TAG EVP_CTRL_AEAD_GET_TAG +# define EVP_CTRL_CCM_SET_TAG EVP_CTRL_AEAD_SET_TAG +# define EVP_CTRL_CCM_SET_IV_FIXED EVP_CTRL_AEAD_SET_IV_FIXED +# define EVP_CTRL_CCM_SET_L 0x14 +# define EVP_CTRL_CCM_SET_MSGLEN 0x15 +/* + * AEAD cipher deduces payload length and returns number of bytes required to + * store MAC and eventual padding. Subsequent call to EVP_Cipher even + * appends/verifies MAC. + */ +# define EVP_CTRL_AEAD_TLS1_AAD 0x16 +/* Used by composite AEAD ciphers, no-op in GCM, CCM... */ +# define EVP_CTRL_AEAD_SET_MAC_KEY 0x17 +/* Set the GCM invocation field, decrypt only */ +# define EVP_CTRL_GCM_SET_IV_INV 0x18 + +# define EVP_CTRL_TLS1_1_MULTIBLOCK_AAD 0x19 +# define EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT 0x1a +# define EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT 0x1b +# define EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE 0x1c + +# define EVP_CTRL_SSL3_MASTER_SECRET 0x1d + +/* EVP_CTRL_SET_SBOX takes the char * specifying S-boxes */ +# define EVP_CTRL_SET_SBOX 0x1e +/* + * EVP_CTRL_SBOX_USED takes a 'size_t' and 'char *', pointing at a + * pre-allocated buffer with specified size + */ +# define EVP_CTRL_SBOX_USED 0x1f +/* EVP_CTRL_KEY_MESH takes 'size_t' number of bytes to mesh the key after, + * 0 switches meshing off + */ +# define EVP_CTRL_KEY_MESH 0x20 +/* EVP_CTRL_BLOCK_PADDING_MODE takes the padding mode */ +# define EVP_CTRL_BLOCK_PADDING_MODE 0x21 + +/* Set the output buffers to use for a pipelined operation */ +# define EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS 0x22 +/* Set the input buffers to use for a pipelined operation */ +# define EVP_CTRL_SET_PIPELINE_INPUT_BUFS 0x23 +/* Set the input buffer lengths to use for a pipelined operation */ +# define EVP_CTRL_SET_PIPELINE_INPUT_LENS 0x24 + +# define EVP_CTRL_GET_IVLEN 0x25 + +/* Padding modes */ +#define EVP_PADDING_PKCS7 1 +#define EVP_PADDING_ISO7816_4 2 +#define EVP_PADDING_ANSI923 3 +#define EVP_PADDING_ISO10126 4 +#define EVP_PADDING_ZERO 5 + +/* RFC 5246 defines additional data to be 13 bytes in length */ +# define EVP_AEAD_TLS1_AAD_LEN 13 + +typedef struct { + unsigned char *out; + const unsigned char *inp; + size_t len; + unsigned int interleave; +} EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM; + +/* GCM TLS constants */ +/* Length of fixed part of IV derived from PRF */ +# define EVP_GCM_TLS_FIXED_IV_LEN 4 +/* Length of explicit part of IV part of TLS records */ +# define EVP_GCM_TLS_EXPLICIT_IV_LEN 8 +/* Length of tag for TLS */ +# define EVP_GCM_TLS_TAG_LEN 16 + +/* CCM TLS constants */ +/* Length of fixed part of IV derived from PRF */ +# define EVP_CCM_TLS_FIXED_IV_LEN 4 +/* Length of explicit part of IV part of TLS records */ +# define EVP_CCM_TLS_EXPLICIT_IV_LEN 8 +/* Total length of CCM IV length for TLS */ +# define EVP_CCM_TLS_IV_LEN 12 +/* Length of tag for TLS */ +# define EVP_CCM_TLS_TAG_LEN 16 +/* Length of CCM8 tag for TLS */ +# define EVP_CCM8_TLS_TAG_LEN 8 + +/* Length of tag for TLS */ +# define EVP_CHACHAPOLY_TLS_TAG_LEN 16 + +typedef struct evp_cipher_info_st { + const EVP_CIPHER *cipher; + unsigned char iv[EVP_MAX_IV_LENGTH]; +} EVP_CIPHER_INFO; + + +/* Password based encryption function */ +typedef int (EVP_PBE_KEYGEN) (EVP_CIPHER_CTX *ctx, const char *pass, + int passlen, ASN1_TYPE *param, + const EVP_CIPHER *cipher, const EVP_MD *md, + int en_de); + +# ifndef OPENSSL_NO_RSA +# define EVP_PKEY_assign_RSA(pkey,rsa) EVP_PKEY_assign((pkey),EVP_PKEY_RSA,\ + (char *)(rsa)) +# endif + +# ifndef OPENSSL_NO_DSA +# define EVP_PKEY_assign_DSA(pkey,dsa) EVP_PKEY_assign((pkey),EVP_PKEY_DSA,\ + (char *)(dsa)) +# endif + +# ifndef OPENSSL_NO_DH +# define EVP_PKEY_assign_DH(pkey,dh) EVP_PKEY_assign((pkey),EVP_PKEY_DH,\ + (char *)(dh)) +# endif + +# ifndef OPENSSL_NO_EC +# define EVP_PKEY_assign_EC_KEY(pkey,eckey) EVP_PKEY_assign((pkey),EVP_PKEY_EC,\ + (char *)(eckey)) +# endif +# ifndef OPENSSL_NO_SIPHASH +# define EVP_PKEY_assign_SIPHASH(pkey,shkey) EVP_PKEY_assign((pkey),EVP_PKEY_SIPHASH,\ + (char *)(shkey)) +# endif + +# ifndef OPENSSL_NO_POLY1305 +# define EVP_PKEY_assign_POLY1305(pkey,polykey) EVP_PKEY_assign((pkey),EVP_PKEY_POLY1305,\ + (char *)(polykey)) +# endif + +/* Add some extra combinations */ +# define EVP_get_digestbynid(a) EVP_get_digestbyname(OBJ_nid2sn(a)) +# define EVP_get_digestbyobj(a) EVP_get_digestbynid(OBJ_obj2nid(a)) +# define EVP_get_cipherbynid(a) EVP_get_cipherbyname(OBJ_nid2sn(a)) +# define EVP_get_cipherbyobj(a) EVP_get_cipherbynid(OBJ_obj2nid(a)) + +int EVP_MD_type(const EVP_MD *md); +# define EVP_MD_nid(e) EVP_MD_type(e) +# define EVP_MD_name(e) OBJ_nid2sn(EVP_MD_nid(e)) +int EVP_MD_pkey_type(const EVP_MD *md); +int EVP_MD_size(const EVP_MD *md); +int EVP_MD_block_size(const EVP_MD *md); +unsigned long EVP_MD_flags(const EVP_MD *md); + +const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx); +int (*EVP_MD_CTX_update_fn(EVP_MD_CTX *ctx))(EVP_MD_CTX *ctx, + const void *data, size_t count); +void EVP_MD_CTX_set_update_fn(EVP_MD_CTX *ctx, + int (*update) (EVP_MD_CTX *ctx, + const void *data, size_t count)); +# define EVP_MD_CTX_size(e) EVP_MD_size(EVP_MD_CTX_md(e)) +# define EVP_MD_CTX_block_size(e) EVP_MD_block_size(EVP_MD_CTX_md(e)) +# define EVP_MD_CTX_type(e) EVP_MD_type(EVP_MD_CTX_md(e)) +EVP_PKEY_CTX *EVP_MD_CTX_pkey_ctx(const EVP_MD_CTX *ctx); +void EVP_MD_CTX_set_pkey_ctx(EVP_MD_CTX *ctx, EVP_PKEY_CTX *pctx); +void *EVP_MD_CTX_md_data(const EVP_MD_CTX *ctx); + +int EVP_CIPHER_nid(const EVP_CIPHER *cipher); +# define EVP_CIPHER_name(e) OBJ_nid2sn(EVP_CIPHER_nid(e)) +int EVP_CIPHER_block_size(const EVP_CIPHER *cipher); +int EVP_CIPHER_impl_ctx_size(const EVP_CIPHER *cipher); +int EVP_CIPHER_key_length(const EVP_CIPHER *cipher); +int EVP_CIPHER_iv_length(const EVP_CIPHER *cipher); +unsigned long EVP_CIPHER_flags(const EVP_CIPHER *cipher); +# define EVP_CIPHER_mode(e) (EVP_CIPHER_flags(e) & EVP_CIPH_MODE) + +const EVP_CIPHER *EVP_CIPHER_CTX_cipher(const EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx); +const unsigned char *EVP_CIPHER_CTX_iv(const EVP_CIPHER_CTX *ctx); +const unsigned char *EVP_CIPHER_CTX_original_iv(const EVP_CIPHER_CTX *ctx); +unsigned char *EVP_CIPHER_CTX_iv_noconst(EVP_CIPHER_CTX *ctx); +unsigned char *EVP_CIPHER_CTX_buf_noconst(EVP_CIPHER_CTX *ctx); +int EVP_CIPHER_CTX_num(const EVP_CIPHER_CTX *ctx); +void EVP_CIPHER_CTX_set_num(EVP_CIPHER_CTX *ctx, int num); +int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in); +void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx); +void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data); +void *EVP_CIPHER_CTX_get_cipher_data(const EVP_CIPHER_CTX *ctx); +void *EVP_CIPHER_CTX_set_cipher_data(EVP_CIPHER_CTX *ctx, void *cipher_data); +# define EVP_CIPHER_CTX_type(c) EVP_CIPHER_type(EVP_CIPHER_CTX_cipher(c)) +# if OPENSSL_API_COMPAT < 0x10100000L +# define EVP_CIPHER_CTX_flags(c) EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(c)) +# endif +# define EVP_CIPHER_CTX_mode(c) EVP_CIPHER_mode(EVP_CIPHER_CTX_cipher(c)) + +# define EVP_ENCODE_LENGTH(l) ((((l)+2)/3*4)+((l)/48+1)*2+80) +# define EVP_DECODE_LENGTH(l) (((l)+3)/4*3+80) + +# define EVP_SignInit_ex(a,b,c) EVP_DigestInit_ex(a,b,c) +# define EVP_SignInit(a,b) EVP_DigestInit(a,b) +# define EVP_SignUpdate(a,b,c) EVP_DigestUpdate(a,b,c) +# define EVP_VerifyInit_ex(a,b,c) EVP_DigestInit_ex(a,b,c) +# define EVP_VerifyInit(a,b) EVP_DigestInit(a,b) +# define EVP_VerifyUpdate(a,b,c) EVP_DigestUpdate(a,b,c) +# define EVP_OpenUpdate(a,b,c,d,e) EVP_DecryptUpdate(a,b,c,d,e) +# define EVP_SealUpdate(a,b,c,d,e) EVP_EncryptUpdate(a,b,c,d,e) +# define EVP_DigestSignUpdate(a,b,c) EVP_DigestUpdate(a,b,c) +# define EVP_DigestVerifyUpdate(a,b,c) EVP_DigestUpdate(a,b,c) + +# ifdef CONST_STRICT +void BIO_set_md(BIO *, const EVP_MD *md); +# else +# define BIO_set_md(b,md) BIO_ctrl(b,BIO_C_SET_MD,0,(char *)(md)) +# endif +# define BIO_get_md(b,mdp) BIO_ctrl(b,BIO_C_GET_MD,0,(char *)(mdp)) +# define BIO_get_md_ctx(b,mdcp) BIO_ctrl(b,BIO_C_GET_MD_CTX,0, \ + (char *)(mdcp)) +# define BIO_set_md_ctx(b,mdcp) BIO_ctrl(b,BIO_C_SET_MD_CTX,0, \ + (char *)(mdcp)) +# define BIO_get_cipher_status(b) BIO_ctrl(b,BIO_C_GET_CIPHER_STATUS,0,NULL) +# define BIO_get_cipher_ctx(b,c_pp) BIO_ctrl(b,BIO_C_GET_CIPHER_CTX,0, \ + (char *)(c_pp)) + +/*__owur*/ int EVP_Cipher(EVP_CIPHER_CTX *c, + unsigned char *out, + const unsigned char *in, unsigned int inl); + +# define EVP_add_cipher_alias(n,alias) \ + OBJ_NAME_add((alias),OBJ_NAME_TYPE_CIPHER_METH|OBJ_NAME_ALIAS,(n)) +# define EVP_add_digest_alias(n,alias) \ + OBJ_NAME_add((alias),OBJ_NAME_TYPE_MD_METH|OBJ_NAME_ALIAS,(n)) +# define EVP_delete_cipher_alias(alias) \ + OBJ_NAME_remove(alias,OBJ_NAME_TYPE_CIPHER_METH|OBJ_NAME_ALIAS); +# define EVP_delete_digest_alias(alias) \ + OBJ_NAME_remove(alias,OBJ_NAME_TYPE_MD_METH|OBJ_NAME_ALIAS); + +int EVP_MD_CTX_ctrl(EVP_MD_CTX *ctx, int cmd, int p1, void *p2); +EVP_MD_CTX *EVP_MD_CTX_new(void); +int EVP_MD_CTX_reset(EVP_MD_CTX *ctx); +void EVP_MD_CTX_free(EVP_MD_CTX *ctx); +# define EVP_MD_CTX_create() EVP_MD_CTX_new() +# define EVP_MD_CTX_init(ctx) EVP_MD_CTX_reset((ctx)) +# define EVP_MD_CTX_destroy(ctx) EVP_MD_CTX_free((ctx)) +__owur int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in); +void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags); +void EVP_MD_CTX_clear_flags(EVP_MD_CTX *ctx, int flags); +int EVP_MD_CTX_test_flags(const EVP_MD_CTX *ctx, int flags); +__owur int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, + ENGINE *impl); +__owur int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, + size_t cnt); +__owur int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, + unsigned int *s); +__owur int EVP_Digest(const void *data, size_t count, + unsigned char *md, unsigned int *size, + const EVP_MD *type, ENGINE *impl); + +__owur int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in); +__owur int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type); +__owur int EVP_DigestFinal(EVP_MD_CTX *ctx, unsigned char *md, + unsigned int *s); +__owur int EVP_DigestFinalXOF(EVP_MD_CTX *ctx, unsigned char *md, + size_t len); + +int EVP_read_pw_string(char *buf, int length, const char *prompt, int verify); +int EVP_read_pw_string_min(char *buf, int minlen, int maxlen, + const char *prompt, int verify); +void EVP_set_pw_prompt(const char *prompt); +char *EVP_get_pw_prompt(void); + +__owur int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, + const unsigned char *salt, + const unsigned char *data, int datal, int count, + unsigned char *key, unsigned char *iv); + +void EVP_CIPHER_CTX_set_flags(EVP_CIPHER_CTX *ctx, int flags); +void EVP_CIPHER_CTX_clear_flags(EVP_CIPHER_CTX *ctx, int flags); +int EVP_CIPHER_CTX_test_flags(const EVP_CIPHER_CTX *ctx, int flags); + +__owur int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, + const unsigned char *key, const unsigned char *iv); +/*__owur*/ int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, + const EVP_CIPHER *cipher, ENGINE *impl, + const unsigned char *key, + const unsigned char *iv); +/*__owur*/ int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, + int *outl, const unsigned char *in, int inl); +/*__owur*/ int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, + int *outl); +/*__owur*/ int EVP_EncryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, + int *outl); + +__owur int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, + const unsigned char *key, const unsigned char *iv); +/*__owur*/ int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, + const EVP_CIPHER *cipher, ENGINE *impl, + const unsigned char *key, + const unsigned char *iv); +/*__owur*/ int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, + int *outl, const unsigned char *in, int inl); +__owur int EVP_DecryptFinal(EVP_CIPHER_CTX *ctx, unsigned char *outm, + int *outl); +/*__owur*/ int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *outm, + int *outl); + +__owur int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, + const unsigned char *key, const unsigned char *iv, + int enc); +/*__owur*/ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, + const EVP_CIPHER *cipher, ENGINE *impl, + const unsigned char *key, + const unsigned char *iv, int enc); +__owur int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, + int *outl, const unsigned char *in, int inl); +__owur int EVP_CipherFinal(EVP_CIPHER_CTX *ctx, unsigned char *outm, + int *outl); +__owur int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *outm, + int *outl); + +__owur int EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, + EVP_PKEY *pkey); + +__owur int EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, + size_t *siglen, const unsigned char *tbs, + size_t tbslen); + +__owur int EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, + unsigned int siglen, EVP_PKEY *pkey); + +__owur int EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, + size_t siglen, const unsigned char *tbs, + size_t tbslen); + +/*__owur*/ int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, + EVP_PKEY *pkey); +__owur int EVP_DigestSignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, + size_t *siglen); + +__owur int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, + EVP_PKEY *pkey); +__owur int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sig, + size_t siglen); + +# ifndef OPENSSL_NO_RSA +__owur int EVP_OpenInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, + const unsigned char *ek, int ekl, + const unsigned char *iv, EVP_PKEY *priv); +__owur int EVP_OpenFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); + +__owur int EVP_SealInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, + unsigned char **ek, int *ekl, unsigned char *iv, + EVP_PKEY **pubk, int npubk); +__owur int EVP_SealFinal(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); +# endif + +EVP_ENCODE_CTX *EVP_ENCODE_CTX_new(void); +void EVP_ENCODE_CTX_free(EVP_ENCODE_CTX *ctx); +int EVP_ENCODE_CTX_copy(EVP_ENCODE_CTX *dctx, EVP_ENCODE_CTX *sctx); +int EVP_ENCODE_CTX_num(EVP_ENCODE_CTX *ctx); +void EVP_EncodeInit(EVP_ENCODE_CTX *ctx); +int EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl, + const unsigned char *in, int inl); +void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl); +int EVP_EncodeBlock(unsigned char *t, const unsigned char *f, int n); + +void EVP_DecodeInit(EVP_ENCODE_CTX *ctx); +int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl, + const unsigned char *in, int inl); +int EVP_DecodeFinal(EVP_ENCODE_CTX *ctx, unsigned + char *out, int *outl); +int EVP_DecodeBlock(unsigned char *t, const unsigned char *f, int n); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define EVP_CIPHER_CTX_init(c) EVP_CIPHER_CTX_reset(c) +# define EVP_CIPHER_CTX_cleanup(c) EVP_CIPHER_CTX_reset(c) +# endif +EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void); +int EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *c); +void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *c); +int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *x, int keylen); +int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *c, int pad); +int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr); +int EVP_CIPHER_CTX_rand_key(EVP_CIPHER_CTX *ctx, unsigned char *key); + +const BIO_METHOD *BIO_f_md(void); +const BIO_METHOD *BIO_f_base64(void); +const BIO_METHOD *BIO_f_cipher(void); +const BIO_METHOD *BIO_f_reliable(void); +__owur int BIO_set_cipher(BIO *b, const EVP_CIPHER *c, const unsigned char *k, + const unsigned char *i, int enc); + +const EVP_MD *EVP_md_null(void); +# ifndef OPENSSL_NO_MD2 +const EVP_MD *EVP_md2(void); +# endif +# ifndef OPENSSL_NO_MD4 +const EVP_MD *EVP_md4(void); +# endif +# ifndef OPENSSL_NO_MD5 +const EVP_MD *EVP_md5(void); +const EVP_MD *EVP_md5_sha1(void); +# endif +# ifndef OPENSSL_NO_BLAKE2 +const EVP_MD *EVP_blake2b512(void); +const EVP_MD *EVP_blake2s256(void); +# endif +const EVP_MD *EVP_sha1(void); +const EVP_MD *EVP_sha224(void); +const EVP_MD *EVP_sha256(void); +const EVP_MD *EVP_sha384(void); +const EVP_MD *EVP_sha512(void); +const EVP_MD *EVP_sha512_224(void); +const EVP_MD *EVP_sha512_256(void); +const EVP_MD *EVP_sha3_224(void); +const EVP_MD *EVP_sha3_256(void); +const EVP_MD *EVP_sha3_384(void); +const EVP_MD *EVP_sha3_512(void); +const EVP_MD *EVP_shake128(void); +const EVP_MD *EVP_shake256(void); +# ifndef OPENSSL_NO_MDC2 +const EVP_MD *EVP_mdc2(void); +# endif +# ifndef OPENSSL_NO_RMD160 +const EVP_MD *EVP_ripemd160(void); +# endif +# ifndef OPENSSL_NO_WHIRLPOOL +const EVP_MD *EVP_whirlpool(void); +# endif +# ifndef OPENSSL_NO_SM3 +const EVP_MD *EVP_sm3(void); +# endif +const EVP_CIPHER *EVP_enc_null(void); /* does nothing :-) */ +# ifndef OPENSSL_NO_DES +const EVP_CIPHER *EVP_des_ecb(void); +const EVP_CIPHER *EVP_des_ede(void); +const EVP_CIPHER *EVP_des_ede3(void); +const EVP_CIPHER *EVP_des_ede_ecb(void); +const EVP_CIPHER *EVP_des_ede3_ecb(void); +const EVP_CIPHER *EVP_des_cfb64(void); +# define EVP_des_cfb EVP_des_cfb64 +const EVP_CIPHER *EVP_des_cfb1(void); +const EVP_CIPHER *EVP_des_cfb8(void); +const EVP_CIPHER *EVP_des_ede_cfb64(void); +# define EVP_des_ede_cfb EVP_des_ede_cfb64 +const EVP_CIPHER *EVP_des_ede3_cfb64(void); +# define EVP_des_ede3_cfb EVP_des_ede3_cfb64 +const EVP_CIPHER *EVP_des_ede3_cfb1(void); +const EVP_CIPHER *EVP_des_ede3_cfb8(void); +const EVP_CIPHER *EVP_des_ofb(void); +const EVP_CIPHER *EVP_des_ede_ofb(void); +const EVP_CIPHER *EVP_des_ede3_ofb(void); +const EVP_CIPHER *EVP_des_cbc(void); +const EVP_CIPHER *EVP_des_ede_cbc(void); +const EVP_CIPHER *EVP_des_ede3_cbc(void); +const EVP_CIPHER *EVP_desx_cbc(void); +const EVP_CIPHER *EVP_des_ede3_wrap(void); +/* + * This should now be supported through the dev_crypto ENGINE. But also, why + * are rc4 and md5 declarations made here inside a "NO_DES" precompiler + * branch? + */ +# endif +# ifndef OPENSSL_NO_RC4 +const EVP_CIPHER *EVP_rc4(void); +const EVP_CIPHER *EVP_rc4_40(void); +# ifndef OPENSSL_NO_MD5 +const EVP_CIPHER *EVP_rc4_hmac_md5(void); +# endif +# endif +# ifndef OPENSSL_NO_IDEA +const EVP_CIPHER *EVP_idea_ecb(void); +const EVP_CIPHER *EVP_idea_cfb64(void); +# define EVP_idea_cfb EVP_idea_cfb64 +const EVP_CIPHER *EVP_idea_ofb(void); +const EVP_CIPHER *EVP_idea_cbc(void); +# endif +# ifndef OPENSSL_NO_RC2 +const EVP_CIPHER *EVP_rc2_ecb(void); +const EVP_CIPHER *EVP_rc2_cbc(void); +const EVP_CIPHER *EVP_rc2_40_cbc(void); +const EVP_CIPHER *EVP_rc2_64_cbc(void); +const EVP_CIPHER *EVP_rc2_cfb64(void); +# define EVP_rc2_cfb EVP_rc2_cfb64 +const EVP_CIPHER *EVP_rc2_ofb(void); +# endif +# ifndef OPENSSL_NO_BF +const EVP_CIPHER *EVP_bf_ecb(void); +const EVP_CIPHER *EVP_bf_cbc(void); +const EVP_CIPHER *EVP_bf_cfb64(void); +# define EVP_bf_cfb EVP_bf_cfb64 +const EVP_CIPHER *EVP_bf_ofb(void); +# endif +# ifndef OPENSSL_NO_CAST +const EVP_CIPHER *EVP_cast5_ecb(void); +const EVP_CIPHER *EVP_cast5_cbc(void); +const EVP_CIPHER *EVP_cast5_cfb64(void); +# define EVP_cast5_cfb EVP_cast5_cfb64 +const EVP_CIPHER *EVP_cast5_ofb(void); +# endif +# ifndef OPENSSL_NO_RC5 +const EVP_CIPHER *EVP_rc5_32_12_16_cbc(void); +const EVP_CIPHER *EVP_rc5_32_12_16_ecb(void); +const EVP_CIPHER *EVP_rc5_32_12_16_cfb64(void); +# define EVP_rc5_32_12_16_cfb EVP_rc5_32_12_16_cfb64 +const EVP_CIPHER *EVP_rc5_32_12_16_ofb(void); +# endif +const EVP_CIPHER *EVP_aes_128_ecb(void); +const EVP_CIPHER *EVP_aes_128_cbc(void); +const EVP_CIPHER *EVP_aes_128_cfb1(void); +const EVP_CIPHER *EVP_aes_128_cfb8(void); +const EVP_CIPHER *EVP_aes_128_cfb128(void); +# define EVP_aes_128_cfb EVP_aes_128_cfb128 +const EVP_CIPHER *EVP_aes_128_ofb(void); +const EVP_CIPHER *EVP_aes_128_ctr(void); +const EVP_CIPHER *EVP_aes_128_ccm(void); +const EVP_CIPHER *EVP_aes_128_gcm(void); +const EVP_CIPHER *EVP_aes_128_xts(void); +const EVP_CIPHER *EVP_aes_128_wrap(void); +const EVP_CIPHER *EVP_aes_128_wrap_pad(void); +# ifndef OPENSSL_NO_OCB +const EVP_CIPHER *EVP_aes_128_ocb(void); +# endif +const EVP_CIPHER *EVP_aes_192_ecb(void); +const EVP_CIPHER *EVP_aes_192_cbc(void); +const EVP_CIPHER *EVP_aes_192_cfb1(void); +const EVP_CIPHER *EVP_aes_192_cfb8(void); +const EVP_CIPHER *EVP_aes_192_cfb128(void); +# define EVP_aes_192_cfb EVP_aes_192_cfb128 +const EVP_CIPHER *EVP_aes_192_ofb(void); +const EVP_CIPHER *EVP_aes_192_ctr(void); +const EVP_CIPHER *EVP_aes_192_ccm(void); +const EVP_CIPHER *EVP_aes_192_gcm(void); +const EVP_CIPHER *EVP_aes_192_wrap(void); +const EVP_CIPHER *EVP_aes_192_wrap_pad(void); +# ifndef OPENSSL_NO_OCB +const EVP_CIPHER *EVP_aes_192_ocb(void); +# endif +const EVP_CIPHER *EVP_aes_256_ecb(void); +const EVP_CIPHER *EVP_aes_256_cbc(void); +const EVP_CIPHER *EVP_aes_256_cfb1(void); +const EVP_CIPHER *EVP_aes_256_cfb8(void); +const EVP_CIPHER *EVP_aes_256_cfb128(void); +# define EVP_aes_256_cfb EVP_aes_256_cfb128 +const EVP_CIPHER *EVP_aes_256_ofb(void); +const EVP_CIPHER *EVP_aes_256_ctr(void); +const EVP_CIPHER *EVP_aes_256_ccm(void); +const EVP_CIPHER *EVP_aes_256_gcm(void); +const EVP_CIPHER *EVP_aes_256_xts(void); +const EVP_CIPHER *EVP_aes_256_wrap(void); +const EVP_CIPHER *EVP_aes_256_wrap_pad(void); +# ifndef OPENSSL_NO_OCB +const EVP_CIPHER *EVP_aes_256_ocb(void); +# endif +const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void); +const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void); +const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void); +const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void); +# ifndef OPENSSL_NO_ARIA +const EVP_CIPHER *EVP_aria_128_ecb(void); +const EVP_CIPHER *EVP_aria_128_cbc(void); +const EVP_CIPHER *EVP_aria_128_cfb1(void); +const EVP_CIPHER *EVP_aria_128_cfb8(void); +const EVP_CIPHER *EVP_aria_128_cfb128(void); +# define EVP_aria_128_cfb EVP_aria_128_cfb128 +const EVP_CIPHER *EVP_aria_128_ctr(void); +const EVP_CIPHER *EVP_aria_128_ofb(void); +const EVP_CIPHER *EVP_aria_128_gcm(void); +const EVP_CIPHER *EVP_aria_128_ccm(void); +const EVP_CIPHER *EVP_aria_192_ecb(void); +const EVP_CIPHER *EVP_aria_192_cbc(void); +const EVP_CIPHER *EVP_aria_192_cfb1(void); +const EVP_CIPHER *EVP_aria_192_cfb8(void); +const EVP_CIPHER *EVP_aria_192_cfb128(void); +# define EVP_aria_192_cfb EVP_aria_192_cfb128 +const EVP_CIPHER *EVP_aria_192_ctr(void); +const EVP_CIPHER *EVP_aria_192_ofb(void); +const EVP_CIPHER *EVP_aria_192_gcm(void); +const EVP_CIPHER *EVP_aria_192_ccm(void); +const EVP_CIPHER *EVP_aria_256_ecb(void); +const EVP_CIPHER *EVP_aria_256_cbc(void); +const EVP_CIPHER *EVP_aria_256_cfb1(void); +const EVP_CIPHER *EVP_aria_256_cfb8(void); +const EVP_CIPHER *EVP_aria_256_cfb128(void); +# define EVP_aria_256_cfb EVP_aria_256_cfb128 +const EVP_CIPHER *EVP_aria_256_ctr(void); +const EVP_CIPHER *EVP_aria_256_ofb(void); +const EVP_CIPHER *EVP_aria_256_gcm(void); +const EVP_CIPHER *EVP_aria_256_ccm(void); +# endif +# ifndef OPENSSL_NO_CAMELLIA +const EVP_CIPHER *EVP_camellia_128_ecb(void); +const EVP_CIPHER *EVP_camellia_128_cbc(void); +const EVP_CIPHER *EVP_camellia_128_cfb1(void); +const EVP_CIPHER *EVP_camellia_128_cfb8(void); +const EVP_CIPHER *EVP_camellia_128_cfb128(void); +# define EVP_camellia_128_cfb EVP_camellia_128_cfb128 +const EVP_CIPHER *EVP_camellia_128_ofb(void); +const EVP_CIPHER *EVP_camellia_128_ctr(void); +const EVP_CIPHER *EVP_camellia_192_ecb(void); +const EVP_CIPHER *EVP_camellia_192_cbc(void); +const EVP_CIPHER *EVP_camellia_192_cfb1(void); +const EVP_CIPHER *EVP_camellia_192_cfb8(void); +const EVP_CIPHER *EVP_camellia_192_cfb128(void); +# define EVP_camellia_192_cfb EVP_camellia_192_cfb128 +const EVP_CIPHER *EVP_camellia_192_ofb(void); +const EVP_CIPHER *EVP_camellia_192_ctr(void); +const EVP_CIPHER *EVP_camellia_256_ecb(void); +const EVP_CIPHER *EVP_camellia_256_cbc(void); +const EVP_CIPHER *EVP_camellia_256_cfb1(void); +const EVP_CIPHER *EVP_camellia_256_cfb8(void); +const EVP_CIPHER *EVP_camellia_256_cfb128(void); +# define EVP_camellia_256_cfb EVP_camellia_256_cfb128 +const EVP_CIPHER *EVP_camellia_256_ofb(void); +const EVP_CIPHER *EVP_camellia_256_ctr(void); +# endif +# ifndef OPENSSL_NO_CHACHA +const EVP_CIPHER *EVP_chacha20(void); +# ifndef OPENSSL_NO_POLY1305 +const EVP_CIPHER *EVP_chacha20_poly1305(void); +# endif +# endif + +# ifndef OPENSSL_NO_SEED +const EVP_CIPHER *EVP_seed_ecb(void); +const EVP_CIPHER *EVP_seed_cbc(void); +const EVP_CIPHER *EVP_seed_cfb128(void); +# define EVP_seed_cfb EVP_seed_cfb128 +const EVP_CIPHER *EVP_seed_ofb(void); +# endif + +# ifndef OPENSSL_NO_SM4 +const EVP_CIPHER *EVP_sm4_ecb(void); +const EVP_CIPHER *EVP_sm4_cbc(void); +const EVP_CIPHER *EVP_sm4_cfb128(void); +# define EVP_sm4_cfb EVP_sm4_cfb128 +const EVP_CIPHER *EVP_sm4_ofb(void); +const EVP_CIPHER *EVP_sm4_ctr(void); +# endif + +# if OPENSSL_API_COMPAT < 0x10100000L +# define OPENSSL_add_all_algorithms_conf() \ + OPENSSL_init_crypto(OPENSSL_INIT_ADD_ALL_CIPHERS \ + | OPENSSL_INIT_ADD_ALL_DIGESTS \ + | OPENSSL_INIT_LOAD_CONFIG, NULL) +# define OPENSSL_add_all_algorithms_noconf() \ + OPENSSL_init_crypto(OPENSSL_INIT_ADD_ALL_CIPHERS \ + | OPENSSL_INIT_ADD_ALL_DIGESTS, NULL) + +# ifdef OPENSSL_LOAD_CONF +# define OpenSSL_add_all_algorithms() OPENSSL_add_all_algorithms_conf() +# else +# define OpenSSL_add_all_algorithms() OPENSSL_add_all_algorithms_noconf() +# endif + +# define OpenSSL_add_all_ciphers() \ + OPENSSL_init_crypto(OPENSSL_INIT_ADD_ALL_CIPHERS, NULL) +# define OpenSSL_add_all_digests() \ + OPENSSL_init_crypto(OPENSSL_INIT_ADD_ALL_DIGESTS, NULL) + +# define EVP_cleanup() while(0) continue +# endif + +int EVP_add_cipher(const EVP_CIPHER *cipher); +int EVP_add_digest(const EVP_MD *digest); + +const EVP_CIPHER *EVP_get_cipherbyname(const char *name); +const EVP_MD *EVP_get_digestbyname(const char *name); + +void EVP_CIPHER_do_all(void (*fn) (const EVP_CIPHER *ciph, + const char *from, const char *to, void *x), + void *arg); +void EVP_CIPHER_do_all_sorted(void (*fn) + (const EVP_CIPHER *ciph, const char *from, + const char *to, void *x), void *arg); + +void EVP_MD_do_all(void (*fn) (const EVP_MD *ciph, + const char *from, const char *to, void *x), + void *arg); +void EVP_MD_do_all_sorted(void (*fn) + (const EVP_MD *ciph, const char *from, + const char *to, void *x), void *arg); + +int EVP_PKEY_decrypt_old(unsigned char *dec_key, + const unsigned char *enc_key, int enc_key_len, + EVP_PKEY *private_key); +int EVP_PKEY_encrypt_old(unsigned char *enc_key, + const unsigned char *key, int key_len, + EVP_PKEY *pub_key); +int EVP_PKEY_type(int type); +int EVP_PKEY_id(const EVP_PKEY *pkey); +int EVP_PKEY_base_id(const EVP_PKEY *pkey); +int EVP_PKEY_bits(const EVP_PKEY *pkey); +int EVP_PKEY_security_bits(const EVP_PKEY *pkey); +int EVP_PKEY_size(const EVP_PKEY *pkey); +int EVP_PKEY_set_type(EVP_PKEY *pkey, int type); +int EVP_PKEY_set_type_str(EVP_PKEY *pkey, const char *str, int len); +int EVP_PKEY_set_alias_type(EVP_PKEY *pkey, int type); +# ifndef OPENSSL_NO_ENGINE +int EVP_PKEY_set1_engine(EVP_PKEY *pkey, ENGINE *e); +ENGINE *EVP_PKEY_get0_engine(const EVP_PKEY *pkey); +# endif +int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key); +void *EVP_PKEY_get0(const EVP_PKEY *pkey); +const unsigned char *EVP_PKEY_get0_hmac(const EVP_PKEY *pkey, size_t *len); +# ifndef OPENSSL_NO_POLY1305 +const unsigned char *EVP_PKEY_get0_poly1305(const EVP_PKEY *pkey, size_t *len); +# endif +# ifndef OPENSSL_NO_SIPHASH +const unsigned char *EVP_PKEY_get0_siphash(const EVP_PKEY *pkey, size_t *len); +# endif + +# ifndef OPENSSL_NO_RSA +struct rsa_st; +int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key); +struct rsa_st *EVP_PKEY_get0_RSA(EVP_PKEY *pkey); +struct rsa_st *EVP_PKEY_get1_RSA(EVP_PKEY *pkey); +# endif +# ifndef OPENSSL_NO_DSA +struct dsa_st; +int EVP_PKEY_set1_DSA(EVP_PKEY *pkey, struct dsa_st *key); +struct dsa_st *EVP_PKEY_get0_DSA(EVP_PKEY *pkey); +struct dsa_st *EVP_PKEY_get1_DSA(EVP_PKEY *pkey); +# endif +# ifndef OPENSSL_NO_DH +struct dh_st; +int EVP_PKEY_set1_DH(EVP_PKEY *pkey, struct dh_st *key); +struct dh_st *EVP_PKEY_get0_DH(EVP_PKEY *pkey); +struct dh_st *EVP_PKEY_get1_DH(EVP_PKEY *pkey); +# endif +# ifndef OPENSSL_NO_EC +struct ec_key_st; +int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, struct ec_key_st *key); +struct ec_key_st *EVP_PKEY_get0_EC_KEY(EVP_PKEY *pkey); +struct ec_key_st *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey); +# endif + +EVP_PKEY *EVP_PKEY_new(void); +int EVP_PKEY_up_ref(EVP_PKEY *pkey); +void EVP_PKEY_free(EVP_PKEY *pkey); + +EVP_PKEY *d2i_PublicKey(int type, EVP_PKEY **a, const unsigned char **pp, + long length); +int i2d_PublicKey(EVP_PKEY *a, unsigned char **pp); + +EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **a, const unsigned char **pp, + long length); +EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const unsigned char **pp, + long length); +int i2d_PrivateKey(EVP_PKEY *a, unsigned char **pp); + +int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from); +int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey); +int EVP_PKEY_save_parameters(EVP_PKEY *pkey, int mode); +int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b); + +int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b); + +int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey, + int indent, ASN1_PCTX *pctx); +int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey, + int indent, ASN1_PCTX *pctx); +int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey, + int indent, ASN1_PCTX *pctx); + +int EVP_PKEY_get_default_digest_nid(EVP_PKEY *pkey, int *pnid); + +int EVP_PKEY_set1_tls_encodedpoint(EVP_PKEY *pkey, + const unsigned char *pt, size_t ptlen); +size_t EVP_PKEY_get1_tls_encodedpoint(EVP_PKEY *pkey, unsigned char **ppt); + +int EVP_CIPHER_type(const EVP_CIPHER *ctx); + +/* calls methods */ +int EVP_CIPHER_param_to_asn1(EVP_CIPHER_CTX *c, ASN1_TYPE *type); +int EVP_CIPHER_asn1_to_param(EVP_CIPHER_CTX *c, ASN1_TYPE *type); + +/* These are used by EVP_CIPHER methods */ +int EVP_CIPHER_set_asn1_iv(EVP_CIPHER_CTX *c, ASN1_TYPE *type); +int EVP_CIPHER_get_asn1_iv(EVP_CIPHER_CTX *c, ASN1_TYPE *type); + +/* PKCS5 password based encryption */ +int PKCS5_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen, + ASN1_TYPE *param, const EVP_CIPHER *cipher, + const EVP_MD *md, int en_de); +int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen, + const unsigned char *salt, int saltlen, int iter, + int keylen, unsigned char *out); +int PKCS5_PBKDF2_HMAC(const char *pass, int passlen, + const unsigned char *salt, int saltlen, int iter, + const EVP_MD *digest, int keylen, unsigned char *out); +int PKCS5_v2_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen, + ASN1_TYPE *param, const EVP_CIPHER *cipher, + const EVP_MD *md, int en_de); + +#ifndef OPENSSL_NO_SCRYPT +int EVP_PBE_scrypt(const char *pass, size_t passlen, + const unsigned char *salt, size_t saltlen, + uint64_t N, uint64_t r, uint64_t p, uint64_t maxmem, + unsigned char *key, size_t keylen); + +int PKCS5_v2_scrypt_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, + int passlen, ASN1_TYPE *param, + const EVP_CIPHER *c, const EVP_MD *md, int en_de); +#endif + +void PKCS5_PBE_add(void); + +int EVP_PBE_CipherInit(ASN1_OBJECT *pbe_obj, const char *pass, int passlen, + ASN1_TYPE *param, EVP_CIPHER_CTX *ctx, int en_de); + +/* PBE type */ + +/* Can appear as the outermost AlgorithmIdentifier */ +# define EVP_PBE_TYPE_OUTER 0x0 +/* Is an PRF type OID */ +# define EVP_PBE_TYPE_PRF 0x1 +/* Is a PKCS#5 v2.0 KDF */ +# define EVP_PBE_TYPE_KDF 0x2 + +int EVP_PBE_alg_add_type(int pbe_type, int pbe_nid, int cipher_nid, + int md_nid, EVP_PBE_KEYGEN *keygen); +int EVP_PBE_alg_add(int nid, const EVP_CIPHER *cipher, const EVP_MD *md, + EVP_PBE_KEYGEN *keygen); +int EVP_PBE_find(int type, int pbe_nid, int *pcnid, int *pmnid, + EVP_PBE_KEYGEN **pkeygen); +void EVP_PBE_cleanup(void); +int EVP_PBE_get(int *ptype, int *ppbe_nid, size_t num); + +# define ASN1_PKEY_ALIAS 0x1 +# define ASN1_PKEY_DYNAMIC 0x2 +# define ASN1_PKEY_SIGPARAM_NULL 0x4 + +# define ASN1_PKEY_CTRL_PKCS7_SIGN 0x1 +# define ASN1_PKEY_CTRL_PKCS7_ENCRYPT 0x2 +# define ASN1_PKEY_CTRL_DEFAULT_MD_NID 0x3 +# define ASN1_PKEY_CTRL_CMS_SIGN 0x5 +# define ASN1_PKEY_CTRL_CMS_ENVELOPE 0x7 +# define ASN1_PKEY_CTRL_CMS_RI_TYPE 0x8 + +# define ASN1_PKEY_CTRL_SET1_TLS_ENCPT 0x9 +# define ASN1_PKEY_CTRL_GET1_TLS_ENCPT 0xa + +int EVP_PKEY_asn1_get_count(void); +const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_get0(int idx); +const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_find(ENGINE **pe, int type); +const EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_find_str(ENGINE **pe, + const char *str, int len); +int EVP_PKEY_asn1_add0(const EVP_PKEY_ASN1_METHOD *ameth); +int EVP_PKEY_asn1_add_alias(int to, int from); +int EVP_PKEY_asn1_get0_info(int *ppkey_id, int *pkey_base_id, + int *ppkey_flags, const char **pinfo, + const char **ppem_str, + const EVP_PKEY_ASN1_METHOD *ameth); + +const EVP_PKEY_ASN1_METHOD *EVP_PKEY_get0_asn1(const EVP_PKEY *pkey); +EVP_PKEY_ASN1_METHOD *EVP_PKEY_asn1_new(int id, int flags, + const char *pem_str, + const char *info); +void EVP_PKEY_asn1_copy(EVP_PKEY_ASN1_METHOD *dst, + const EVP_PKEY_ASN1_METHOD *src); +void EVP_PKEY_asn1_free(EVP_PKEY_ASN1_METHOD *ameth); +void EVP_PKEY_asn1_set_public(EVP_PKEY_ASN1_METHOD *ameth, + int (*pub_decode) (EVP_PKEY *pk, + X509_PUBKEY *pub), + int (*pub_encode) (X509_PUBKEY *pub, + const EVP_PKEY *pk), + int (*pub_cmp) (const EVP_PKEY *a, + const EVP_PKEY *b), + int (*pub_print) (BIO *out, + const EVP_PKEY *pkey, + int indent, ASN1_PCTX *pctx), + int (*pkey_size) (const EVP_PKEY *pk), + int (*pkey_bits) (const EVP_PKEY *pk)); +void EVP_PKEY_asn1_set_private(EVP_PKEY_ASN1_METHOD *ameth, + int (*priv_decode) (EVP_PKEY *pk, + const PKCS8_PRIV_KEY_INFO + *p8inf), + int (*priv_encode) (PKCS8_PRIV_KEY_INFO *p8, + const EVP_PKEY *pk), + int (*priv_print) (BIO *out, + const EVP_PKEY *pkey, + int indent, + ASN1_PCTX *pctx)); +void EVP_PKEY_asn1_set_param(EVP_PKEY_ASN1_METHOD *ameth, + int (*param_decode) (EVP_PKEY *pkey, + const unsigned char **pder, + int derlen), + int (*param_encode) (const EVP_PKEY *pkey, + unsigned char **pder), + int (*param_missing) (const EVP_PKEY *pk), + int (*param_copy) (EVP_PKEY *to, + const EVP_PKEY *from), + int (*param_cmp) (const EVP_PKEY *a, + const EVP_PKEY *b), + int (*param_print) (BIO *out, + const EVP_PKEY *pkey, + int indent, + ASN1_PCTX *pctx)); + +void EVP_PKEY_asn1_set_free(EVP_PKEY_ASN1_METHOD *ameth, + void (*pkey_free) (EVP_PKEY *pkey)); +void EVP_PKEY_asn1_set_ctrl(EVP_PKEY_ASN1_METHOD *ameth, + int (*pkey_ctrl) (EVP_PKEY *pkey, int op, + long arg1, void *arg2)); +void EVP_PKEY_asn1_set_item(EVP_PKEY_ASN1_METHOD *ameth, + int (*item_verify) (EVP_MD_CTX *ctx, + const ASN1_ITEM *it, + void *asn, + X509_ALGOR *a, + ASN1_BIT_STRING *sig, + EVP_PKEY *pkey), + int (*item_sign) (EVP_MD_CTX *ctx, + const ASN1_ITEM *it, + void *asn, + X509_ALGOR *alg1, + X509_ALGOR *alg2, + ASN1_BIT_STRING *sig)); + +void EVP_PKEY_asn1_set_siginf(EVP_PKEY_ASN1_METHOD *ameth, + int (*siginf_set) (X509_SIG_INFO *siginf, + const X509_ALGOR *alg, + const ASN1_STRING *sig)); + +void EVP_PKEY_asn1_set_check(EVP_PKEY_ASN1_METHOD *ameth, + int (*pkey_check) (const EVP_PKEY *pk)); + +void EVP_PKEY_asn1_set_public_check(EVP_PKEY_ASN1_METHOD *ameth, + int (*pkey_pub_check) (const EVP_PKEY *pk)); + +void EVP_PKEY_asn1_set_param_check(EVP_PKEY_ASN1_METHOD *ameth, + int (*pkey_param_check) (const EVP_PKEY *pk)); + +void EVP_PKEY_asn1_set_set_priv_key(EVP_PKEY_ASN1_METHOD *ameth, + int (*set_priv_key) (EVP_PKEY *pk, + const unsigned char + *priv, + size_t len)); +void EVP_PKEY_asn1_set_set_pub_key(EVP_PKEY_ASN1_METHOD *ameth, + int (*set_pub_key) (EVP_PKEY *pk, + const unsigned char *pub, + size_t len)); +void EVP_PKEY_asn1_set_get_priv_key(EVP_PKEY_ASN1_METHOD *ameth, + int (*get_priv_key) (const EVP_PKEY *pk, + unsigned char *priv, + size_t *len)); +void EVP_PKEY_asn1_set_get_pub_key(EVP_PKEY_ASN1_METHOD *ameth, + int (*get_pub_key) (const EVP_PKEY *pk, + unsigned char *pub, + size_t *len)); + +void EVP_PKEY_asn1_set_security_bits(EVP_PKEY_ASN1_METHOD *ameth, + int (*pkey_security_bits) (const EVP_PKEY + *pk)); + +# define EVP_PKEY_OP_UNDEFINED 0 +# define EVP_PKEY_OP_PARAMGEN (1<<1) +# define EVP_PKEY_OP_KEYGEN (1<<2) +# define EVP_PKEY_OP_SIGN (1<<3) +# define EVP_PKEY_OP_VERIFY (1<<4) +# define EVP_PKEY_OP_VERIFYRECOVER (1<<5) +# define EVP_PKEY_OP_SIGNCTX (1<<6) +# define EVP_PKEY_OP_VERIFYCTX (1<<7) +# define EVP_PKEY_OP_ENCRYPT (1<<8) +# define EVP_PKEY_OP_DECRYPT (1<<9) +# define EVP_PKEY_OP_DERIVE (1<<10) + +# define EVP_PKEY_OP_TYPE_SIG \ + (EVP_PKEY_OP_SIGN | EVP_PKEY_OP_VERIFY | EVP_PKEY_OP_VERIFYRECOVER \ + | EVP_PKEY_OP_SIGNCTX | EVP_PKEY_OP_VERIFYCTX) + +# define EVP_PKEY_OP_TYPE_CRYPT \ + (EVP_PKEY_OP_ENCRYPT | EVP_PKEY_OP_DECRYPT) + +# define EVP_PKEY_OP_TYPE_NOGEN \ + (EVP_PKEY_OP_TYPE_SIG | EVP_PKEY_OP_TYPE_CRYPT | EVP_PKEY_OP_DERIVE) + +# define EVP_PKEY_OP_TYPE_GEN \ + (EVP_PKEY_OP_PARAMGEN | EVP_PKEY_OP_KEYGEN) + +# define EVP_PKEY_CTX_set_signature_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_TYPE_SIG, \ + EVP_PKEY_CTRL_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_get_signature_md(ctx, pmd) \ + EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_TYPE_SIG, \ + EVP_PKEY_CTRL_GET_MD, 0, (void *)(pmd)) + +# define EVP_PKEY_CTX_set_mac_key(ctx, key, len) \ + EVP_PKEY_CTX_ctrl(ctx, -1, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_SET_MAC_KEY, len, (void *)(key)) + +# define EVP_PKEY_CTRL_MD 1 +# define EVP_PKEY_CTRL_PEER_KEY 2 + +# define EVP_PKEY_CTRL_PKCS7_ENCRYPT 3 +# define EVP_PKEY_CTRL_PKCS7_DECRYPT 4 + +# define EVP_PKEY_CTRL_PKCS7_SIGN 5 + +# define EVP_PKEY_CTRL_SET_MAC_KEY 6 + +# define EVP_PKEY_CTRL_DIGESTINIT 7 + +/* Used by GOST key encryption in TLS */ +# define EVP_PKEY_CTRL_SET_IV 8 + +# define EVP_PKEY_CTRL_CMS_ENCRYPT 9 +# define EVP_PKEY_CTRL_CMS_DECRYPT 10 +# define EVP_PKEY_CTRL_CMS_SIGN 11 + +# define EVP_PKEY_CTRL_CIPHER 12 + +# define EVP_PKEY_CTRL_GET_MD 13 + +# define EVP_PKEY_CTRL_SET_DIGEST_SIZE 14 + +# define EVP_PKEY_ALG_CTRL 0x1000 + +# define EVP_PKEY_FLAG_AUTOARGLEN 2 +/* + * Method handles all operations: don't assume any digest related defaults. + */ +# define EVP_PKEY_FLAG_SIGCTX_CUSTOM 4 + +const EVP_PKEY_METHOD *EVP_PKEY_meth_find(int type); +EVP_PKEY_METHOD *EVP_PKEY_meth_new(int id, int flags); +void EVP_PKEY_meth_get0_info(int *ppkey_id, int *pflags, + const EVP_PKEY_METHOD *meth); +void EVP_PKEY_meth_copy(EVP_PKEY_METHOD *dst, const EVP_PKEY_METHOD *src); +void EVP_PKEY_meth_free(EVP_PKEY_METHOD *pmeth); +int EVP_PKEY_meth_add0(const EVP_PKEY_METHOD *pmeth); +int EVP_PKEY_meth_remove(const EVP_PKEY_METHOD *pmeth); +size_t EVP_PKEY_meth_get_count(void); +const EVP_PKEY_METHOD *EVP_PKEY_meth_get0(size_t idx); + +EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e); +EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e); +EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *ctx); +void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx); + +int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, + int cmd, int p1, void *p2); +int EVP_PKEY_CTX_ctrl_str(EVP_PKEY_CTX *ctx, const char *type, + const char *value); +int EVP_PKEY_CTX_ctrl_uint64(EVP_PKEY_CTX *ctx, int keytype, int optype, + int cmd, uint64_t value); + +int EVP_PKEY_CTX_str2ctrl(EVP_PKEY_CTX *ctx, int cmd, const char *str); +int EVP_PKEY_CTX_hex2ctrl(EVP_PKEY_CTX *ctx, int cmd, const char *hex); + +int EVP_PKEY_CTX_md(EVP_PKEY_CTX *ctx, int optype, int cmd, const char *md); + +int EVP_PKEY_CTX_get_operation(EVP_PKEY_CTX *ctx); +void EVP_PKEY_CTX_set0_keygen_info(EVP_PKEY_CTX *ctx, int *dat, int datlen); + +EVP_PKEY *EVP_PKEY_new_mac_key(int type, ENGINE *e, + const unsigned char *key, int keylen); +EVP_PKEY *EVP_PKEY_new_raw_private_key(int type, ENGINE *e, + const unsigned char *priv, + size_t len); +EVP_PKEY *EVP_PKEY_new_raw_public_key(int type, ENGINE *e, + const unsigned char *pub, + size_t len); +int EVP_PKEY_get_raw_private_key(const EVP_PKEY *pkey, unsigned char *priv, + size_t *len); +int EVP_PKEY_get_raw_public_key(const EVP_PKEY *pkey, unsigned char *pub, + size_t *len); + +EVP_PKEY *EVP_PKEY_new_CMAC_key(ENGINE *e, const unsigned char *priv, + size_t len, const EVP_CIPHER *cipher); + +void EVP_PKEY_CTX_set_data(EVP_PKEY_CTX *ctx, void *data); +void *EVP_PKEY_CTX_get_data(EVP_PKEY_CTX *ctx); +EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx); + +EVP_PKEY *EVP_PKEY_CTX_get0_peerkey(EVP_PKEY_CTX *ctx); + +void EVP_PKEY_CTX_set_app_data(EVP_PKEY_CTX *ctx, void *data); +void *EVP_PKEY_CTX_get_app_data(EVP_PKEY_CTX *ctx); + +int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, + unsigned char *sig, size_t *siglen, + const unsigned char *tbs, size_t tbslen); +int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, + const unsigned char *sig, size_t siglen, + const unsigned char *tbs, size_t tbslen); +int EVP_PKEY_verify_recover_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_verify_recover(EVP_PKEY_CTX *ctx, + unsigned char *rout, size_t *routlen, + const unsigned char *sig, size_t siglen); +int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx, + unsigned char *out, size_t *outlen, + const unsigned char *in, size_t inlen); +int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx, + unsigned char *out, size_t *outlen, + const unsigned char *in, size_t inlen); + +int EVP_PKEY_derive_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer); +int EVP_PKEY_derive(EVP_PKEY_CTX *ctx, unsigned char *key, size_t *keylen); + +typedef int EVP_PKEY_gen_cb(EVP_PKEY_CTX *ctx); + +int EVP_PKEY_paramgen_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_paramgen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey); +int EVP_PKEY_keygen_init(EVP_PKEY_CTX *ctx); +int EVP_PKEY_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey); +int EVP_PKEY_check(EVP_PKEY_CTX *ctx); +int EVP_PKEY_public_check(EVP_PKEY_CTX *ctx); +int EVP_PKEY_param_check(EVP_PKEY_CTX *ctx); + +void EVP_PKEY_CTX_set_cb(EVP_PKEY_CTX *ctx, EVP_PKEY_gen_cb *cb); +EVP_PKEY_gen_cb *EVP_PKEY_CTX_get_cb(EVP_PKEY_CTX *ctx); + +int EVP_PKEY_CTX_get_keygen_info(EVP_PKEY_CTX *ctx, int idx); + +void EVP_PKEY_meth_set_init(EVP_PKEY_METHOD *pmeth, + int (*init) (EVP_PKEY_CTX *ctx)); + +void EVP_PKEY_meth_set_copy(EVP_PKEY_METHOD *pmeth, + int (*copy) (EVP_PKEY_CTX *dst, + EVP_PKEY_CTX *src)); + +void EVP_PKEY_meth_set_cleanup(EVP_PKEY_METHOD *pmeth, + void (*cleanup) (EVP_PKEY_CTX *ctx)); + +void EVP_PKEY_meth_set_paramgen(EVP_PKEY_METHOD *pmeth, + int (*paramgen_init) (EVP_PKEY_CTX *ctx), + int (*paramgen) (EVP_PKEY_CTX *ctx, + EVP_PKEY *pkey)); + +void EVP_PKEY_meth_set_keygen(EVP_PKEY_METHOD *pmeth, + int (*keygen_init) (EVP_PKEY_CTX *ctx), + int (*keygen) (EVP_PKEY_CTX *ctx, + EVP_PKEY *pkey)); + +void EVP_PKEY_meth_set_sign(EVP_PKEY_METHOD *pmeth, + int (*sign_init) (EVP_PKEY_CTX *ctx), + int (*sign) (EVP_PKEY_CTX *ctx, + unsigned char *sig, size_t *siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_set_verify(EVP_PKEY_METHOD *pmeth, + int (*verify_init) (EVP_PKEY_CTX *ctx), + int (*verify) (EVP_PKEY_CTX *ctx, + const unsigned char *sig, + size_t siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_set_verify_recover(EVP_PKEY_METHOD *pmeth, + int (*verify_recover_init) (EVP_PKEY_CTX + *ctx), + int (*verify_recover) (EVP_PKEY_CTX + *ctx, + unsigned char + *sig, + size_t *siglen, + const unsigned + char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_set_signctx(EVP_PKEY_METHOD *pmeth, + int (*signctx_init) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx), + int (*signctx) (EVP_PKEY_CTX *ctx, + unsigned char *sig, + size_t *siglen, + EVP_MD_CTX *mctx)); + +void EVP_PKEY_meth_set_verifyctx(EVP_PKEY_METHOD *pmeth, + int (*verifyctx_init) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx), + int (*verifyctx) (EVP_PKEY_CTX *ctx, + const unsigned char *sig, + int siglen, + EVP_MD_CTX *mctx)); + +void EVP_PKEY_meth_set_encrypt(EVP_PKEY_METHOD *pmeth, + int (*encrypt_init) (EVP_PKEY_CTX *ctx), + int (*encryptfn) (EVP_PKEY_CTX *ctx, + unsigned char *out, + size_t *outlen, + const unsigned char *in, + size_t inlen)); + +void EVP_PKEY_meth_set_decrypt(EVP_PKEY_METHOD *pmeth, + int (*decrypt_init) (EVP_PKEY_CTX *ctx), + int (*decrypt) (EVP_PKEY_CTX *ctx, + unsigned char *out, + size_t *outlen, + const unsigned char *in, + size_t inlen)); + +void EVP_PKEY_meth_set_derive(EVP_PKEY_METHOD *pmeth, + int (*derive_init) (EVP_PKEY_CTX *ctx), + int (*derive) (EVP_PKEY_CTX *ctx, + unsigned char *key, + size_t *keylen)); + +void EVP_PKEY_meth_set_ctrl(EVP_PKEY_METHOD *pmeth, + int (*ctrl) (EVP_PKEY_CTX *ctx, int type, int p1, + void *p2), + int (*ctrl_str) (EVP_PKEY_CTX *ctx, + const char *type, + const char *value)); + +void EVP_PKEY_meth_set_digestsign(EVP_PKEY_METHOD *pmeth, + int (*digestsign) (EVP_MD_CTX *ctx, + unsigned char *sig, + size_t *siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_set_digestverify(EVP_PKEY_METHOD *pmeth, + int (*digestverify) (EVP_MD_CTX *ctx, + const unsigned char *sig, + size_t siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_set_check(EVP_PKEY_METHOD *pmeth, + int (*check) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_set_public_check(EVP_PKEY_METHOD *pmeth, + int (*check) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_set_param_check(EVP_PKEY_METHOD *pmeth, + int (*check) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_set_digest_custom(EVP_PKEY_METHOD *pmeth, + int (*digest_custom) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx)); + +void EVP_PKEY_meth_get_init(const EVP_PKEY_METHOD *pmeth, + int (**pinit) (EVP_PKEY_CTX *ctx)); + +void EVP_PKEY_meth_get_copy(const EVP_PKEY_METHOD *pmeth, + int (**pcopy) (EVP_PKEY_CTX *dst, + EVP_PKEY_CTX *src)); + +void EVP_PKEY_meth_get_cleanup(const EVP_PKEY_METHOD *pmeth, + void (**pcleanup) (EVP_PKEY_CTX *ctx)); + +void EVP_PKEY_meth_get_paramgen(const EVP_PKEY_METHOD *pmeth, + int (**pparamgen_init) (EVP_PKEY_CTX *ctx), + int (**pparamgen) (EVP_PKEY_CTX *ctx, + EVP_PKEY *pkey)); + +void EVP_PKEY_meth_get_keygen(const EVP_PKEY_METHOD *pmeth, + int (**pkeygen_init) (EVP_PKEY_CTX *ctx), + int (**pkeygen) (EVP_PKEY_CTX *ctx, + EVP_PKEY *pkey)); + +void EVP_PKEY_meth_get_sign(const EVP_PKEY_METHOD *pmeth, + int (**psign_init) (EVP_PKEY_CTX *ctx), + int (**psign) (EVP_PKEY_CTX *ctx, + unsigned char *sig, size_t *siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_get_verify(const EVP_PKEY_METHOD *pmeth, + int (**pverify_init) (EVP_PKEY_CTX *ctx), + int (**pverify) (EVP_PKEY_CTX *ctx, + const unsigned char *sig, + size_t siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_get_verify_recover(const EVP_PKEY_METHOD *pmeth, + int (**pverify_recover_init) (EVP_PKEY_CTX + *ctx), + int (**pverify_recover) (EVP_PKEY_CTX + *ctx, + unsigned char + *sig, + size_t *siglen, + const unsigned + char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_get_signctx(const EVP_PKEY_METHOD *pmeth, + int (**psignctx_init) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx), + int (**psignctx) (EVP_PKEY_CTX *ctx, + unsigned char *sig, + size_t *siglen, + EVP_MD_CTX *mctx)); + +void EVP_PKEY_meth_get_verifyctx(const EVP_PKEY_METHOD *pmeth, + int (**pverifyctx_init) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx), + int (**pverifyctx) (EVP_PKEY_CTX *ctx, + const unsigned char *sig, + int siglen, + EVP_MD_CTX *mctx)); + +void EVP_PKEY_meth_get_encrypt(const EVP_PKEY_METHOD *pmeth, + int (**pencrypt_init) (EVP_PKEY_CTX *ctx), + int (**pencryptfn) (EVP_PKEY_CTX *ctx, + unsigned char *out, + size_t *outlen, + const unsigned char *in, + size_t inlen)); + +void EVP_PKEY_meth_get_decrypt(const EVP_PKEY_METHOD *pmeth, + int (**pdecrypt_init) (EVP_PKEY_CTX *ctx), + int (**pdecrypt) (EVP_PKEY_CTX *ctx, + unsigned char *out, + size_t *outlen, + const unsigned char *in, + size_t inlen)); + +void EVP_PKEY_meth_get_derive(const EVP_PKEY_METHOD *pmeth, + int (**pderive_init) (EVP_PKEY_CTX *ctx), + int (**pderive) (EVP_PKEY_CTX *ctx, + unsigned char *key, + size_t *keylen)); + +void EVP_PKEY_meth_get_ctrl(const EVP_PKEY_METHOD *pmeth, + int (**pctrl) (EVP_PKEY_CTX *ctx, int type, int p1, + void *p2), + int (**pctrl_str) (EVP_PKEY_CTX *ctx, + const char *type, + const char *value)); + +void EVP_PKEY_meth_get_digestsign(EVP_PKEY_METHOD *pmeth, + int (**digestsign) (EVP_MD_CTX *ctx, + unsigned char *sig, + size_t *siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_get_digestverify(EVP_PKEY_METHOD *pmeth, + int (**digestverify) (EVP_MD_CTX *ctx, + const unsigned char *sig, + size_t siglen, + const unsigned char *tbs, + size_t tbslen)); + +void EVP_PKEY_meth_get_check(const EVP_PKEY_METHOD *pmeth, + int (**pcheck) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_get_public_check(const EVP_PKEY_METHOD *pmeth, + int (**pcheck) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_get_param_check(const EVP_PKEY_METHOD *pmeth, + int (**pcheck) (EVP_PKEY *pkey)); + +void EVP_PKEY_meth_get_digest_custom(EVP_PKEY_METHOD *pmeth, + int (**pdigest_custom) (EVP_PKEY_CTX *ctx, + EVP_MD_CTX *mctx)); +void EVP_add_alg_module(void); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/evperr.h b/third_party/openssl/uos/sw_64/include/openssl/evperr.h new file mode 100644 index 00000000..b4ea90ae --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/evperr.h @@ -0,0 +1,204 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_EVPERR_H +# define HEADER_EVPERR_H + +# include + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_EVP_strings(void); + +/* + * EVP function codes. + */ +# define EVP_F_AESNI_INIT_KEY 165 +# define EVP_F_AESNI_XTS_INIT_KEY 207 +# define EVP_F_AES_GCM_CTRL 196 +# define EVP_F_AES_INIT_KEY 133 +# define EVP_F_AES_OCB_CIPHER 169 +# define EVP_F_AES_T4_INIT_KEY 178 +# define EVP_F_AES_T4_XTS_INIT_KEY 208 +# define EVP_F_AES_WRAP_CIPHER 170 +# define EVP_F_AES_XTS_INIT_KEY 209 +# define EVP_F_ALG_MODULE_INIT 177 +# define EVP_F_ARIA_CCM_INIT_KEY 175 +# define EVP_F_ARIA_GCM_CTRL 197 +# define EVP_F_ARIA_GCM_INIT_KEY 176 +# define EVP_F_ARIA_INIT_KEY 185 +# define EVP_F_B64_NEW 198 +# define EVP_F_CAMELLIA_INIT_KEY 159 +# define EVP_F_CHACHA20_POLY1305_CTRL 182 +# define EVP_F_CMLL_T4_INIT_KEY 179 +# define EVP_F_DES_EDE3_WRAP_CIPHER 171 +# define EVP_F_DO_SIGVER_INIT 161 +# define EVP_F_ENC_NEW 199 +# define EVP_F_EVP_CIPHERINIT_EX 123 +# define EVP_F_EVP_CIPHER_ASN1_TO_PARAM 204 +# define EVP_F_EVP_CIPHER_CTX_COPY 163 +# define EVP_F_EVP_CIPHER_CTX_CTRL 124 +# define EVP_F_EVP_CIPHER_CTX_SET_KEY_LENGTH 122 +# define EVP_F_EVP_CIPHER_PARAM_TO_ASN1 205 +# define EVP_F_EVP_DECRYPTFINAL_EX 101 +# define EVP_F_EVP_DECRYPTUPDATE 166 +# define EVP_F_EVP_DIGESTFINALXOF 174 +# define EVP_F_EVP_DIGESTINIT_EX 128 +# define EVP_F_EVP_ENCRYPTDECRYPTUPDATE 219 +# define EVP_F_EVP_ENCRYPTFINAL_EX 127 +# define EVP_F_EVP_ENCRYPTUPDATE 167 +# define EVP_F_EVP_MD_CTX_COPY_EX 110 +# define EVP_F_EVP_MD_SIZE 162 +# define EVP_F_EVP_OPENINIT 102 +# define EVP_F_EVP_PBE_ALG_ADD 115 +# define EVP_F_EVP_PBE_ALG_ADD_TYPE 160 +# define EVP_F_EVP_PBE_CIPHERINIT 116 +# define EVP_F_EVP_PBE_SCRYPT 181 +# define EVP_F_EVP_PKCS82PKEY 111 +# define EVP_F_EVP_PKEY2PKCS8 113 +# define EVP_F_EVP_PKEY_ASN1_ADD0 188 +# define EVP_F_EVP_PKEY_CHECK 186 +# define EVP_F_EVP_PKEY_COPY_PARAMETERS 103 +# define EVP_F_EVP_PKEY_CTX_CTRL 137 +# define EVP_F_EVP_PKEY_CTX_CTRL_STR 150 +# define EVP_F_EVP_PKEY_CTX_DUP 156 +# define EVP_F_EVP_PKEY_CTX_MD 168 +# define EVP_F_EVP_PKEY_DECRYPT 104 +# define EVP_F_EVP_PKEY_DECRYPT_INIT 138 +# define EVP_F_EVP_PKEY_DECRYPT_OLD 151 +# define EVP_F_EVP_PKEY_DERIVE 153 +# define EVP_F_EVP_PKEY_DERIVE_INIT 154 +# define EVP_F_EVP_PKEY_DERIVE_SET_PEER 155 +# define EVP_F_EVP_PKEY_ENCRYPT 105 +# define EVP_F_EVP_PKEY_ENCRYPT_INIT 139 +# define EVP_F_EVP_PKEY_ENCRYPT_OLD 152 +# define EVP_F_EVP_PKEY_GET0_DH 119 +# define EVP_F_EVP_PKEY_GET0_DSA 120 +# define EVP_F_EVP_PKEY_GET0_EC_KEY 131 +# define EVP_F_EVP_PKEY_GET0_HMAC 183 +# define EVP_F_EVP_PKEY_GET0_POLY1305 184 +# define EVP_F_EVP_PKEY_GET0_RSA 121 +# define EVP_F_EVP_PKEY_GET0_SIPHASH 172 +# define EVP_F_EVP_PKEY_GET_RAW_PRIVATE_KEY 202 +# define EVP_F_EVP_PKEY_GET_RAW_PUBLIC_KEY 203 +# define EVP_F_EVP_PKEY_KEYGEN 146 +# define EVP_F_EVP_PKEY_KEYGEN_INIT 147 +# define EVP_F_EVP_PKEY_METH_ADD0 194 +# define EVP_F_EVP_PKEY_METH_NEW 195 +# define EVP_F_EVP_PKEY_NEW 106 +# define EVP_F_EVP_PKEY_NEW_CMAC_KEY 193 +# define EVP_F_EVP_PKEY_NEW_RAW_PRIVATE_KEY 191 +# define EVP_F_EVP_PKEY_NEW_RAW_PUBLIC_KEY 192 +# define EVP_F_EVP_PKEY_PARAMGEN 148 +# define EVP_F_EVP_PKEY_PARAMGEN_INIT 149 +# define EVP_F_EVP_PKEY_PARAM_CHECK 189 +# define EVP_F_EVP_PKEY_PUBLIC_CHECK 190 +# define EVP_F_EVP_PKEY_SET1_ENGINE 187 +# define EVP_F_EVP_PKEY_SET_ALIAS_TYPE 206 +# define EVP_F_EVP_PKEY_SIGN 140 +# define EVP_F_EVP_PKEY_SIGN_INIT 141 +# define EVP_F_EVP_PKEY_VERIFY 142 +# define EVP_F_EVP_PKEY_VERIFY_INIT 143 +# define EVP_F_EVP_PKEY_VERIFY_RECOVER 144 +# define EVP_F_EVP_PKEY_VERIFY_RECOVER_INIT 145 +# define EVP_F_EVP_SIGNFINAL 107 +# define EVP_F_EVP_VERIFYFINAL 108 +# define EVP_F_INT_CTX_NEW 157 +# define EVP_F_OK_NEW 200 +# define EVP_F_PKCS5_PBE_KEYIVGEN 117 +# define EVP_F_PKCS5_V2_PBE_KEYIVGEN 118 +# define EVP_F_PKCS5_V2_PBKDF2_KEYIVGEN 164 +# define EVP_F_PKCS5_V2_SCRYPT_KEYIVGEN 180 +# define EVP_F_PKEY_SET_TYPE 158 +# define EVP_F_RC2_MAGIC_TO_METH 109 +# define EVP_F_RC5_CTRL 125 +# define EVP_F_R_32_12_16_INIT_KEY 242 +# define EVP_F_S390X_AES_GCM_CTRL 201 +# define EVP_F_UPDATE 173 + +/* + * EVP reason codes. + */ +# define EVP_R_AES_KEY_SETUP_FAILED 143 +# define EVP_R_ARIA_KEY_SETUP_FAILED 176 +# define EVP_R_BAD_DECRYPT 100 +# define EVP_R_BAD_KEY_LENGTH 195 +# define EVP_R_BUFFER_TOO_SMALL 155 +# define EVP_R_CAMELLIA_KEY_SETUP_FAILED 157 +# define EVP_R_CIPHER_PARAMETER_ERROR 122 +# define EVP_R_COMMAND_NOT_SUPPORTED 147 +# define EVP_R_COPY_ERROR 173 +# define EVP_R_CTRL_NOT_IMPLEMENTED 132 +# define EVP_R_CTRL_OPERATION_NOT_IMPLEMENTED 133 +# define EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH 138 +# define EVP_R_DECODE_ERROR 114 +# define EVP_R_DIFFERENT_KEY_TYPES 101 +# define EVP_R_DIFFERENT_PARAMETERS 153 +# define EVP_R_ERROR_LOADING_SECTION 165 +# define EVP_R_ERROR_SETTING_FIPS_MODE 166 +# define EVP_R_EXPECTING_AN_HMAC_KEY 174 +# define EVP_R_EXPECTING_AN_RSA_KEY 127 +# define EVP_R_EXPECTING_A_DH_KEY 128 +# define EVP_R_EXPECTING_A_DSA_KEY 129 +# define EVP_R_EXPECTING_A_EC_KEY 142 +# define EVP_R_EXPECTING_A_POLY1305_KEY 164 +# define EVP_R_EXPECTING_A_SIPHASH_KEY 175 +# define EVP_R_FIPS_MODE_NOT_SUPPORTED 167 +# define EVP_R_GET_RAW_KEY_FAILED 182 +# define EVP_R_ILLEGAL_SCRYPT_PARAMETERS 171 +# define EVP_R_INITIALIZATION_ERROR 134 +# define EVP_R_INPUT_NOT_INITIALIZED 111 +# define EVP_R_INVALID_DIGEST 152 +# define EVP_R_INVALID_FIPS_MODE 168 +# define EVP_R_INVALID_IV_LENGTH 194 +# define EVP_R_INVALID_KEY 163 +# define EVP_R_INVALID_KEY_LENGTH 130 +# define EVP_R_INVALID_OPERATION 148 +# define EVP_R_KEYGEN_FAILURE 120 +# define EVP_R_KEY_SETUP_FAILED 180 +# define EVP_R_MEMORY_LIMIT_EXCEEDED 172 +# define EVP_R_MESSAGE_DIGEST_IS_NULL 159 +# define EVP_R_METHOD_NOT_SUPPORTED 144 +# define EVP_R_MISSING_PARAMETERS 103 +# define EVP_R_NOT_XOF_OR_INVALID_LENGTH 178 +# define EVP_R_NO_CIPHER_SET 131 +# define EVP_R_NO_DEFAULT_DIGEST 158 +# define EVP_R_NO_DIGEST_SET 139 +# define EVP_R_NO_KEY_SET 154 +# define EVP_R_NO_OPERATION_SET 149 +# define EVP_R_ONLY_ONESHOT_SUPPORTED 177 +# define EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE 150 +# define EVP_R_OPERATON_NOT_INITIALIZED 151 +# define EVP_R_OUTPUT_WOULD_OVERFLOW 184 +# define EVP_R_PARTIALLY_OVERLAPPING 162 +# define EVP_R_PBKDF2_ERROR 181 +# define EVP_R_PKEY_APPLICATION_ASN1_METHOD_ALREADY_REGISTERED 179 +# define EVP_R_PRIVATE_KEY_DECODE_ERROR 145 +# define EVP_R_PRIVATE_KEY_ENCODE_ERROR 146 +# define EVP_R_PUBLIC_KEY_NOT_RSA 106 +# define EVP_R_UNKNOWN_CIPHER 160 +# define EVP_R_UNKNOWN_DIGEST 161 +# define EVP_R_UNKNOWN_OPTION 169 +# define EVP_R_UNKNOWN_PBE_ALGORITHM 121 +# define EVP_R_UNSUPPORTED_ALGORITHM 156 +# define EVP_R_UNSUPPORTED_CIPHER 107 +# define EVP_R_UNSUPPORTED_KEYLENGTH 123 +# define EVP_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION 124 +# define EVP_R_UNSUPPORTED_KEY_SIZE 108 +# define EVP_R_UNSUPPORTED_NUMBER_OF_ROUNDS 135 +# define EVP_R_UNSUPPORTED_PRF 125 +# define EVP_R_UNSUPPORTED_PRIVATE_KEY_ALGORITHM 118 +# define EVP_R_UNSUPPORTED_SALT_TYPE 126 +# define EVP_R_WRAP_MODE_NOT_ALLOWED 170 +# define EVP_R_WRONG_FINAL_BLOCK_LENGTH 109 +# define EVP_R_XTS_DUPLICATED_KEYS 183 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/hmac.h b/third_party/openssl/uos/sw_64/include/openssl/hmac.h new file mode 100644 index 00000000..458efc1d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/hmac.h @@ -0,0 +1,51 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_HMAC_H +# define HEADER_HMAC_H + +# include + +# include + +# if OPENSSL_API_COMPAT < 0x10200000L +# define HMAC_MAX_MD_CBLOCK 128 /* Deprecated */ +# endif + +#ifdef __cplusplus +extern "C" { +#endif + +size_t HMAC_size(const HMAC_CTX *e); +HMAC_CTX *HMAC_CTX_new(void); +int HMAC_CTX_reset(HMAC_CTX *ctx); +void HMAC_CTX_free(HMAC_CTX *ctx); + +DEPRECATEDIN_1_1_0(__owur int HMAC_Init(HMAC_CTX *ctx, const void *key, int len, + const EVP_MD *md)) + +/*__owur*/ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, + const EVP_MD *md, ENGINE *impl); +/*__owur*/ int HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, + size_t len); +/*__owur*/ int HMAC_Final(HMAC_CTX *ctx, unsigned char *md, + unsigned int *len); +unsigned char *HMAC(const EVP_MD *evp_md, const void *key, int key_len, + const unsigned char *d, size_t n, unsigned char *md, + unsigned int *md_len); +__owur int HMAC_CTX_copy(HMAC_CTX *dctx, HMAC_CTX *sctx); + +void HMAC_CTX_set_flags(HMAC_CTX *ctx, unsigned long flags); +const EVP_MD *HMAC_CTX_get_md(const HMAC_CTX *ctx); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/idea.h b/third_party/openssl/uos/sw_64/include/openssl/idea.h new file mode 100644 index 00000000..4334f3ea --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/idea.h @@ -0,0 +1,64 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_IDEA_H +# define HEADER_IDEA_H + +# include + +# ifndef OPENSSL_NO_IDEA +# ifdef __cplusplus +extern "C" { +# endif + +typedef unsigned int IDEA_INT; + +# define IDEA_ENCRYPT 1 +# define IDEA_DECRYPT 0 + +# define IDEA_BLOCK 8 +# define IDEA_KEY_LENGTH 16 + +typedef struct idea_key_st { + IDEA_INT data[9][6]; +} IDEA_KEY_SCHEDULE; + +const char *IDEA_options(void); +void IDEA_ecb_encrypt(const unsigned char *in, unsigned char *out, + IDEA_KEY_SCHEDULE *ks); +void IDEA_set_encrypt_key(const unsigned char *key, IDEA_KEY_SCHEDULE *ks); +void IDEA_set_decrypt_key(IDEA_KEY_SCHEDULE *ek, IDEA_KEY_SCHEDULE *dk); +void IDEA_cbc_encrypt(const unsigned char *in, unsigned char *out, + long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, + int enc); +void IDEA_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, + int *num, int enc); +void IDEA_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, IDEA_KEY_SCHEDULE *ks, unsigned char *iv, + int *num); +void IDEA_encrypt(unsigned long *in, IDEA_KEY_SCHEDULE *ks); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define idea_options IDEA_options +# define idea_ecb_encrypt IDEA_ecb_encrypt +# define idea_set_encrypt_key IDEA_set_encrypt_key +# define idea_set_decrypt_key IDEA_set_decrypt_key +# define idea_cbc_encrypt IDEA_cbc_encrypt +# define idea_cfb64_encrypt IDEA_cfb64_encrypt +# define idea_ofb64_encrypt IDEA_ofb64_encrypt +# define idea_encrypt IDEA_encrypt +# endif + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/kdf.h b/third_party/openssl/uos/sw_64/include/openssl/kdf.h new file mode 100644 index 00000000..5abd4c37 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/kdf.h @@ -0,0 +1,97 @@ +/* + * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_KDF_H +# define HEADER_KDF_H + +# include +#ifdef __cplusplus +extern "C" { +#endif + +# define EVP_PKEY_CTRL_TLS_MD (EVP_PKEY_ALG_CTRL) +# define EVP_PKEY_CTRL_TLS_SECRET (EVP_PKEY_ALG_CTRL + 1) +# define EVP_PKEY_CTRL_TLS_SEED (EVP_PKEY_ALG_CTRL + 2) +# define EVP_PKEY_CTRL_HKDF_MD (EVP_PKEY_ALG_CTRL + 3) +# define EVP_PKEY_CTRL_HKDF_SALT (EVP_PKEY_ALG_CTRL + 4) +# define EVP_PKEY_CTRL_HKDF_KEY (EVP_PKEY_ALG_CTRL + 5) +# define EVP_PKEY_CTRL_HKDF_INFO (EVP_PKEY_ALG_CTRL + 6) +# define EVP_PKEY_CTRL_HKDF_MODE (EVP_PKEY_ALG_CTRL + 7) +# define EVP_PKEY_CTRL_PASS (EVP_PKEY_ALG_CTRL + 8) +# define EVP_PKEY_CTRL_SCRYPT_SALT (EVP_PKEY_ALG_CTRL + 9) +# define EVP_PKEY_CTRL_SCRYPT_N (EVP_PKEY_ALG_CTRL + 10) +# define EVP_PKEY_CTRL_SCRYPT_R (EVP_PKEY_ALG_CTRL + 11) +# define EVP_PKEY_CTRL_SCRYPT_P (EVP_PKEY_ALG_CTRL + 12) +# define EVP_PKEY_CTRL_SCRYPT_MAXMEM_BYTES (EVP_PKEY_ALG_CTRL + 13) + +# define EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND 0 +# define EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY 1 +# define EVP_PKEY_HKDEF_MODE_EXPAND_ONLY 2 + +# define EVP_PKEY_CTX_set_tls1_prf_md(pctx, md) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_TLS_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_set1_tls1_prf_secret(pctx, sec, seclen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_TLS_SECRET, seclen, (void *)(sec)) + +# define EVP_PKEY_CTX_add1_tls1_prf_seed(pctx, seed, seedlen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_TLS_SEED, seedlen, (void *)(seed)) + +# define EVP_PKEY_CTX_set_hkdf_md(pctx, md) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_HKDF_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_set1_hkdf_salt(pctx, salt, saltlen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_HKDF_SALT, saltlen, (void *)(salt)) + +# define EVP_PKEY_CTX_set1_hkdf_key(pctx, key, keylen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_HKDF_KEY, keylen, (void *)(key)) + +# define EVP_PKEY_CTX_add1_hkdf_info(pctx, info, infolen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_HKDF_INFO, infolen, (void *)(info)) + +# define EVP_PKEY_CTX_hkdf_mode(pctx, mode) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_HKDF_MODE, mode, NULL) + +# define EVP_PKEY_CTX_set1_pbe_pass(pctx, pass, passlen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_PASS, passlen, (void *)(pass)) + +# define EVP_PKEY_CTX_set1_scrypt_salt(pctx, salt, saltlen) \ + EVP_PKEY_CTX_ctrl(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_SCRYPT_SALT, saltlen, (void *)(salt)) + +# define EVP_PKEY_CTX_set_scrypt_N(pctx, n) \ + EVP_PKEY_CTX_ctrl_uint64(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_SCRYPT_N, n) + +# define EVP_PKEY_CTX_set_scrypt_r(pctx, r) \ + EVP_PKEY_CTX_ctrl_uint64(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_SCRYPT_R, r) + +# define EVP_PKEY_CTX_set_scrypt_p(pctx, p) \ + EVP_PKEY_CTX_ctrl_uint64(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_SCRYPT_P, p) + +# define EVP_PKEY_CTX_set_scrypt_maxmem_bytes(pctx, maxmem_bytes) \ + EVP_PKEY_CTX_ctrl_uint64(pctx, -1, EVP_PKEY_OP_DERIVE, \ + EVP_PKEY_CTRL_SCRYPT_MAXMEM_BYTES, maxmem_bytes) + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/kdferr.h b/third_party/openssl/uos/sw_64/include/openssl/kdferr.h new file mode 100644 index 00000000..3f51bd02 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/kdferr.h @@ -0,0 +1,55 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_KDFERR_H +# define HEADER_KDFERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_KDF_strings(void); + +/* + * KDF function codes. + */ +# define KDF_F_PKEY_HKDF_CTRL_STR 103 +# define KDF_F_PKEY_HKDF_DERIVE 102 +# define KDF_F_PKEY_HKDF_INIT 108 +# define KDF_F_PKEY_SCRYPT_CTRL_STR 104 +# define KDF_F_PKEY_SCRYPT_CTRL_UINT64 105 +# define KDF_F_PKEY_SCRYPT_DERIVE 109 +# define KDF_F_PKEY_SCRYPT_INIT 106 +# define KDF_F_PKEY_SCRYPT_SET_MEMBUF 107 +# define KDF_F_PKEY_TLS1_PRF_CTRL_STR 100 +# define KDF_F_PKEY_TLS1_PRF_DERIVE 101 +# define KDF_F_PKEY_TLS1_PRF_INIT 110 +# define KDF_F_TLS1_PRF_ALG 111 + +/* + * KDF reason codes. + */ +# define KDF_R_INVALID_DIGEST 100 +# define KDF_R_MISSING_ITERATION_COUNT 109 +# define KDF_R_MISSING_KEY 104 +# define KDF_R_MISSING_MESSAGE_DIGEST 105 +# define KDF_R_MISSING_PARAMETER 101 +# define KDF_R_MISSING_PASS 110 +# define KDF_R_MISSING_SALT 111 +# define KDF_R_MISSING_SECRET 107 +# define KDF_R_MISSING_SEED 106 +# define KDF_R_UNKNOWN_PARAMETER_TYPE 103 +# define KDF_R_VALUE_ERROR 108 +# define KDF_R_VALUE_MISSING 102 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/lhash.h b/third_party/openssl/uos/sw_64/include/openssl/lhash.h new file mode 100644 index 00000000..2e42d727 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/lhash.h @@ -0,0 +1,241 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +/* + * Header for dynamic hash table routines Author - Eric Young + */ + +#ifndef HEADER_LHASH_H +# define HEADER_LHASH_H + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct lhash_node_st OPENSSL_LH_NODE; +typedef int (*OPENSSL_LH_COMPFUNC) (const void *, const void *); +typedef unsigned long (*OPENSSL_LH_HASHFUNC) (const void *); +typedef void (*OPENSSL_LH_DOALL_FUNC) (void *); +typedef void (*OPENSSL_LH_DOALL_FUNCARG) (void *, void *); +typedef struct lhash_st OPENSSL_LHASH; + +/* + * Macros for declaring and implementing type-safe wrappers for LHASH + * callbacks. This way, callbacks can be provided to LHASH structures without + * function pointer casting and the macro-defined callbacks provide + * per-variable casting before deferring to the underlying type-specific + * callbacks. NB: It is possible to place a "static" in front of both the + * DECLARE and IMPLEMENT macros if the functions are strictly internal. + */ + +/* First: "hash" functions */ +# define DECLARE_LHASH_HASH_FN(name, o_type) \ + unsigned long name##_LHASH_HASH(const void *); +# define IMPLEMENT_LHASH_HASH_FN(name, o_type) \ + unsigned long name##_LHASH_HASH(const void *arg) { \ + const o_type *a = arg; \ + return name##_hash(a); } +# define LHASH_HASH_FN(name) name##_LHASH_HASH + +/* Second: "compare" functions */ +# define DECLARE_LHASH_COMP_FN(name, o_type) \ + int name##_LHASH_COMP(const void *, const void *); +# define IMPLEMENT_LHASH_COMP_FN(name, o_type) \ + int name##_LHASH_COMP(const void *arg1, const void *arg2) { \ + const o_type *a = arg1; \ + const o_type *b = arg2; \ + return name##_cmp(a,b); } +# define LHASH_COMP_FN(name) name##_LHASH_COMP + +/* Fourth: "doall_arg" functions */ +# define DECLARE_LHASH_DOALL_ARG_FN(name, o_type, a_type) \ + void name##_LHASH_DOALL_ARG(void *, void *); +# define IMPLEMENT_LHASH_DOALL_ARG_FN(name, o_type, a_type) \ + void name##_LHASH_DOALL_ARG(void *arg1, void *arg2) { \ + o_type *a = arg1; \ + a_type *b = arg2; \ + name##_doall_arg(a, b); } +# define LHASH_DOALL_ARG_FN(name) name##_LHASH_DOALL_ARG + + +# define LH_LOAD_MULT 256 + +int OPENSSL_LH_error(OPENSSL_LHASH *lh); +OPENSSL_LHASH *OPENSSL_LH_new(OPENSSL_LH_HASHFUNC h, OPENSSL_LH_COMPFUNC c); +void OPENSSL_LH_free(OPENSSL_LHASH *lh); +void *OPENSSL_LH_insert(OPENSSL_LHASH *lh, void *data); +void *OPENSSL_LH_delete(OPENSSL_LHASH *lh, const void *data); +void *OPENSSL_LH_retrieve(OPENSSL_LHASH *lh, const void *data); +void OPENSSL_LH_doall(OPENSSL_LHASH *lh, OPENSSL_LH_DOALL_FUNC func); +void OPENSSL_LH_doall_arg(OPENSSL_LHASH *lh, OPENSSL_LH_DOALL_FUNCARG func, void *arg); +unsigned long OPENSSL_LH_strhash(const char *c); +unsigned long OPENSSL_LH_num_items(const OPENSSL_LHASH *lh); +unsigned long OPENSSL_LH_get_down_load(const OPENSSL_LHASH *lh); +void OPENSSL_LH_set_down_load(OPENSSL_LHASH *lh, unsigned long down_load); + +# ifndef OPENSSL_NO_STDIO +void OPENSSL_LH_stats(const OPENSSL_LHASH *lh, FILE *fp); +void OPENSSL_LH_node_stats(const OPENSSL_LHASH *lh, FILE *fp); +void OPENSSL_LH_node_usage_stats(const OPENSSL_LHASH *lh, FILE *fp); +# endif +void OPENSSL_LH_stats_bio(const OPENSSL_LHASH *lh, BIO *out); +void OPENSSL_LH_node_stats_bio(const OPENSSL_LHASH *lh, BIO *out); +void OPENSSL_LH_node_usage_stats_bio(const OPENSSL_LHASH *lh, BIO *out); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define _LHASH OPENSSL_LHASH +# define LHASH_NODE OPENSSL_LH_NODE +# define lh_error OPENSSL_LH_error +# define lh_new OPENSSL_LH_new +# define lh_free OPENSSL_LH_free +# define lh_insert OPENSSL_LH_insert +# define lh_delete OPENSSL_LH_delete +# define lh_retrieve OPENSSL_LH_retrieve +# define lh_doall OPENSSL_LH_doall +# define lh_doall_arg OPENSSL_LH_doall_arg +# define lh_strhash OPENSSL_LH_strhash +# define lh_num_items OPENSSL_LH_num_items +# ifndef OPENSSL_NO_STDIO +# define lh_stats OPENSSL_LH_stats +# define lh_node_stats OPENSSL_LH_node_stats +# define lh_node_usage_stats OPENSSL_LH_node_usage_stats +# endif +# define lh_stats_bio OPENSSL_LH_stats_bio +# define lh_node_stats_bio OPENSSL_LH_node_stats_bio +# define lh_node_usage_stats_bio OPENSSL_LH_node_usage_stats_bio +# endif + +/* Type checking... */ + +# define LHASH_OF(type) struct lhash_st_##type + +# define DEFINE_LHASH_OF(type) \ + LHASH_OF(type) { union lh_##type##_dummy { void* d1; unsigned long d2; int d3; } dummy; }; \ + static ossl_unused ossl_inline LHASH_OF(type) *lh_##type##_new(unsigned long (*hfn)(const type *), \ + int (*cfn)(const type *, const type *)) \ + { \ + return (LHASH_OF(type) *) \ + OPENSSL_LH_new((OPENSSL_LH_HASHFUNC)hfn, (OPENSSL_LH_COMPFUNC)cfn); \ + } \ + static ossl_unused ossl_inline void lh_##type##_free(LHASH_OF(type) *lh) \ + { \ + OPENSSL_LH_free((OPENSSL_LHASH *)lh); \ + } \ + static ossl_unused ossl_inline type *lh_##type##_insert(LHASH_OF(type) *lh, type *d) \ + { \ + return (type *)OPENSSL_LH_insert((OPENSSL_LHASH *)lh, d); \ + } \ + static ossl_unused ossl_inline type *lh_##type##_delete(LHASH_OF(type) *lh, const type *d) \ + { \ + return (type *)OPENSSL_LH_delete((OPENSSL_LHASH *)lh, d); \ + } \ + static ossl_unused ossl_inline type *lh_##type##_retrieve(LHASH_OF(type) *lh, const type *d) \ + { \ + return (type *)OPENSSL_LH_retrieve((OPENSSL_LHASH *)lh, d); \ + } \ + static ossl_unused ossl_inline int lh_##type##_error(LHASH_OF(type) *lh) \ + { \ + return OPENSSL_LH_error((OPENSSL_LHASH *)lh); \ + } \ + static ossl_unused ossl_inline unsigned long lh_##type##_num_items(LHASH_OF(type) *lh) \ + { \ + return OPENSSL_LH_num_items((OPENSSL_LHASH *)lh); \ + } \ + static ossl_unused ossl_inline void lh_##type##_node_stats_bio(const LHASH_OF(type) *lh, BIO *out) \ + { \ + OPENSSL_LH_node_stats_bio((const OPENSSL_LHASH *)lh, out); \ + } \ + static ossl_unused ossl_inline void lh_##type##_node_usage_stats_bio(const LHASH_OF(type) *lh, BIO *out) \ + { \ + OPENSSL_LH_node_usage_stats_bio((const OPENSSL_LHASH *)lh, out); \ + } \ + static ossl_unused ossl_inline void lh_##type##_stats_bio(const LHASH_OF(type) *lh, BIO *out) \ + { \ + OPENSSL_LH_stats_bio((const OPENSSL_LHASH *)lh, out); \ + } \ + static ossl_unused ossl_inline unsigned long lh_##type##_get_down_load(LHASH_OF(type) *lh) \ + { \ + return OPENSSL_LH_get_down_load((OPENSSL_LHASH *)lh); \ + } \ + static ossl_unused ossl_inline void lh_##type##_set_down_load(LHASH_OF(type) *lh, unsigned long dl) \ + { \ + OPENSSL_LH_set_down_load((OPENSSL_LHASH *)lh, dl); \ + } \ + static ossl_unused ossl_inline void lh_##type##_doall(LHASH_OF(type) *lh, \ + void (*doall)(type *)) \ + { \ + OPENSSL_LH_doall((OPENSSL_LHASH *)lh, (OPENSSL_LH_DOALL_FUNC)doall); \ + } \ + LHASH_OF(type) + +#define IMPLEMENT_LHASH_DOALL_ARG_CONST(type, argtype) \ + int_implement_lhash_doall(type, argtype, const type) + +#define IMPLEMENT_LHASH_DOALL_ARG(type, argtype) \ + int_implement_lhash_doall(type, argtype, type) + +#define int_implement_lhash_doall(type, argtype, cbargtype) \ + static ossl_unused ossl_inline void \ + lh_##type##_doall_##argtype(LHASH_OF(type) *lh, \ + void (*fn)(cbargtype *, argtype *), \ + argtype *arg) \ + { \ + OPENSSL_LH_doall_arg((OPENSSL_LHASH *)lh, (OPENSSL_LH_DOALL_FUNCARG)fn, (void *)arg); \ + } \ + LHASH_OF(type) + +DEFINE_LHASH_OF(OPENSSL_STRING); +# ifdef _MSC_VER +/* + * push and pop this warning: + * warning C4090: 'function': different 'const' qualifiers + */ +# pragma warning (push) +# pragma warning (disable: 4090) +# endif + +DEFINE_LHASH_OF(OPENSSL_CSTRING); + +# ifdef _MSC_VER +# pragma warning (pop) +# endif + +/* + * If called without higher optimization (min. -xO3) the Oracle Developer + * Studio compiler generates code for the defined (static inline) functions + * above. + * This would later lead to the linker complaining about missing symbols when + * this header file is included but the resulting object is not linked against + * the Crypto library (openssl#6912). + */ +# ifdef __SUNPRO_C +# pragma weak OPENSSL_LH_new +# pragma weak OPENSSL_LH_free +# pragma weak OPENSSL_LH_insert +# pragma weak OPENSSL_LH_delete +# pragma weak OPENSSL_LH_retrieve +# pragma weak OPENSSL_LH_error +# pragma weak OPENSSL_LH_num_items +# pragma weak OPENSSL_LH_node_stats_bio +# pragma weak OPENSSL_LH_node_usage_stats_bio +# pragma weak OPENSSL_LH_stats_bio +# pragma weak OPENSSL_LH_get_down_load +# pragma weak OPENSSL_LH_set_down_load +# pragma weak OPENSSL_LH_doall +# pragma weak OPENSSL_LH_doall_arg +# endif /* __SUNPRO_C */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/md2.h b/third_party/openssl/uos/sw_64/include/openssl/md2.h new file mode 100644 index 00000000..7faf8e3d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/md2.h @@ -0,0 +1,44 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_MD2_H +# define HEADER_MD2_H + +# include + +# ifndef OPENSSL_NO_MD2 +# include +# ifdef __cplusplus +extern "C" { +# endif + +typedef unsigned char MD2_INT; + +# define MD2_DIGEST_LENGTH 16 +# define MD2_BLOCK 16 + +typedef struct MD2state_st { + unsigned int num; + unsigned char data[MD2_BLOCK]; + MD2_INT cksm[MD2_BLOCK]; + MD2_INT state[MD2_BLOCK]; +} MD2_CTX; + +const char *MD2_options(void); +int MD2_Init(MD2_CTX *c); +int MD2_Update(MD2_CTX *c, const unsigned char *data, size_t len); +int MD2_Final(unsigned char *md, MD2_CTX *c); +unsigned char *MD2(const unsigned char *d, size_t n, unsigned char *md); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/md4.h b/third_party/openssl/uos/sw_64/include/openssl/md4.h new file mode 100644 index 00000000..940e29db --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/md4.h @@ -0,0 +1,51 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_MD4_H +# define HEADER_MD4_H + +# include + +# ifndef OPENSSL_NO_MD4 +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +/*- + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * ! MD4_LONG has to be at least 32 bits wide. ! + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ +# define MD4_LONG unsigned int + +# define MD4_CBLOCK 64 +# define MD4_LBLOCK (MD4_CBLOCK/4) +# define MD4_DIGEST_LENGTH 16 + +typedef struct MD4state_st { + MD4_LONG A, B, C, D; + MD4_LONG Nl, Nh; + MD4_LONG data[MD4_LBLOCK]; + unsigned int num; +} MD4_CTX; + +int MD4_Init(MD4_CTX *c); +int MD4_Update(MD4_CTX *c, const void *data, size_t len); +int MD4_Final(unsigned char *md, MD4_CTX *c); +unsigned char *MD4(const unsigned char *d, size_t n, unsigned char *md); +void MD4_Transform(MD4_CTX *c, const unsigned char *b); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/md5.h b/third_party/openssl/uos/sw_64/include/openssl/md5.h new file mode 100644 index 00000000..2deb7721 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/md5.h @@ -0,0 +1,50 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_MD5_H +# define HEADER_MD5_H + +# include + +# ifndef OPENSSL_NO_MD5 +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +/* + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * ! MD5_LONG has to be at least 32 bits wide. ! + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ +# define MD5_LONG unsigned int + +# define MD5_CBLOCK 64 +# define MD5_LBLOCK (MD5_CBLOCK/4) +# define MD5_DIGEST_LENGTH 16 + +typedef struct MD5state_st { + MD5_LONG A, B, C, D; + MD5_LONG Nl, Nh; + MD5_LONG data[MD5_LBLOCK]; + unsigned int num; +} MD5_CTX; + +int MD5_Init(MD5_CTX *c); +int MD5_Update(MD5_CTX *c, const void *data, size_t len); +int MD5_Final(unsigned char *md, MD5_CTX *c); +unsigned char *MD5(const unsigned char *d, size_t n, unsigned char *md); +void MD5_Transform(MD5_CTX *c, const unsigned char *b); +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/mdc2.h b/third_party/openssl/uos/sw_64/include/openssl/mdc2.h new file mode 100644 index 00000000..aabd2bfa --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/mdc2.h @@ -0,0 +1,42 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_MDC2_H +# define HEADER_MDC2_H + +# include + +#ifndef OPENSSL_NO_MDC2 +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +# define MDC2_BLOCK 8 +# define MDC2_DIGEST_LENGTH 16 + +typedef struct mdc2_ctx_st { + unsigned int num; + unsigned char data[MDC2_BLOCK]; + DES_cblock h, hh; + int pad_type; /* either 1 or 2, default 1 */ +} MDC2_CTX; + +int MDC2_Init(MDC2_CTX *c); +int MDC2_Update(MDC2_CTX *c, const unsigned char *data, size_t len); +int MDC2_Final(unsigned char *md, MDC2_CTX *c); +unsigned char *MDC2(const unsigned char *d, size_t n, unsigned char *md); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/modes.h b/third_party/openssl/uos/sw_64/include/openssl/modes.h new file mode 100644 index 00000000..d544f98d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/modes.h @@ -0,0 +1,208 @@ +/* + * Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_MODES_H +# define HEADER_MODES_H + +# include + +# ifdef __cplusplus +extern "C" { +# endif +typedef void (*block128_f) (const unsigned char in[16], + unsigned char out[16], const void *key); + +typedef void (*cbc128_f) (const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], int enc); + +typedef void (*ctr128_f) (const unsigned char *in, unsigned char *out, + size_t blocks, const void *key, + const unsigned char ivec[16]); + +typedef void (*ccm128_f) (const unsigned char *in, unsigned char *out, + size_t blocks, const void *key, + const unsigned char ivec[16], + unsigned char cmac[16]); + +void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], block128_f block); +void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], block128_f block); + +void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], + unsigned char ecount_buf[16], unsigned int *num, + block128_f block); + +void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], + unsigned char ecount_buf[16], + unsigned int *num, ctr128_f ctr); + +void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], int *num, + block128_f block); + +void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], int *num, + int enc, block128_f block); +void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out, + size_t length, const void *key, + unsigned char ivec[16], int *num, + int enc, block128_f block); +void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out, + size_t bits, const void *key, + unsigned char ivec[16], int *num, + int enc, block128_f block); + +size_t CRYPTO_cts128_encrypt_block(const unsigned char *in, + unsigned char *out, size_t len, + const void *key, unsigned char ivec[16], + block128_f block); +size_t CRYPTO_cts128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], cbc128_f cbc); +size_t CRYPTO_cts128_decrypt_block(const unsigned char *in, + unsigned char *out, size_t len, + const void *key, unsigned char ivec[16], + block128_f block); +size_t CRYPTO_cts128_decrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], cbc128_f cbc); + +size_t CRYPTO_nistcts128_encrypt_block(const unsigned char *in, + unsigned char *out, size_t len, + const void *key, + unsigned char ivec[16], + block128_f block); +size_t CRYPTO_nistcts128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], cbc128_f cbc); +size_t CRYPTO_nistcts128_decrypt_block(const unsigned char *in, + unsigned char *out, size_t len, + const void *key, + unsigned char ivec[16], + block128_f block); +size_t CRYPTO_nistcts128_decrypt(const unsigned char *in, unsigned char *out, + size_t len, const void *key, + unsigned char ivec[16], cbc128_f cbc); + +typedef struct gcm128_context GCM128_CONTEXT; + +GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block); +void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block); +void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv, + size_t len); +int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad, + size_t len); +int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len); +int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len); +int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len, ctr128_f stream); +int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, + const unsigned char *in, unsigned char *out, + size_t len, ctr128_f stream); +int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag, + size_t len); +void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len); +void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx); + +typedef struct ccm128_context CCM128_CONTEXT; + +void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx, + unsigned int M, unsigned int L, void *key, + block128_f block); +int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx, const unsigned char *nonce, + size_t nlen, size_t mlen); +void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx, const unsigned char *aad, + size_t alen); +int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, const unsigned char *inp, + unsigned char *out, size_t len); +int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, const unsigned char *inp, + unsigned char *out, size_t len); +int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx, const unsigned char *inp, + unsigned char *out, size_t len, + ccm128_f stream); +int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx, const unsigned char *inp, + unsigned char *out, size_t len, + ccm128_f stream); +size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx, unsigned char *tag, size_t len); + +typedef struct xts128_context XTS128_CONTEXT; + +int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, + const unsigned char iv[16], + const unsigned char *inp, unsigned char *out, + size_t len, int enc); + +size_t CRYPTO_128_wrap(void *key, const unsigned char *iv, + unsigned char *out, + const unsigned char *in, size_t inlen, + block128_f block); + +size_t CRYPTO_128_unwrap(void *key, const unsigned char *iv, + unsigned char *out, + const unsigned char *in, size_t inlen, + block128_f block); +size_t CRYPTO_128_wrap_pad(void *key, const unsigned char *icv, + unsigned char *out, const unsigned char *in, + size_t inlen, block128_f block); +size_t CRYPTO_128_unwrap_pad(void *key, const unsigned char *icv, + unsigned char *out, const unsigned char *in, + size_t inlen, block128_f block); + +# ifndef OPENSSL_NO_OCB +typedef struct ocb128_context OCB128_CONTEXT; + +typedef void (*ocb128_f) (const unsigned char *in, unsigned char *out, + size_t blocks, const void *key, + size_t start_block_num, + unsigned char offset_i[16], + const unsigned char L_[][16], + unsigned char checksum[16]); + +OCB128_CONTEXT *CRYPTO_ocb128_new(void *keyenc, void *keydec, + block128_f encrypt, block128_f decrypt, + ocb128_f stream); +int CRYPTO_ocb128_init(OCB128_CONTEXT *ctx, void *keyenc, void *keydec, + block128_f encrypt, block128_f decrypt, + ocb128_f stream); +int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT *dest, OCB128_CONTEXT *src, + void *keyenc, void *keydec); +int CRYPTO_ocb128_setiv(OCB128_CONTEXT *ctx, const unsigned char *iv, + size_t len, size_t taglen); +int CRYPTO_ocb128_aad(OCB128_CONTEXT *ctx, const unsigned char *aad, + size_t len); +int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, const unsigned char *in, + unsigned char *out, size_t len); +int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, const unsigned char *in, + unsigned char *out, size_t len); +int CRYPTO_ocb128_finish(OCB128_CONTEXT *ctx, const unsigned char *tag, + size_t len); +int CRYPTO_ocb128_tag(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len); +void CRYPTO_ocb128_cleanup(OCB128_CONTEXT *ctx); +# endif /* OPENSSL_NO_OCB */ + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/obj_mac.h b/third_party/openssl/uos/sw_64/include/openssl/obj_mac.h new file mode 100644 index 00000000..53516a06 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/obj_mac.h @@ -0,0 +1,5198 @@ +/* + * WARNING: do not edit! + * Generated by crypto/objects/objects.pl + * + * Copyright 2000-2022 The OpenSSL Project Authors. All Rights Reserved. + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#define SN_undef "UNDEF" +#define LN_undef "undefined" +#define NID_undef 0 +#define OBJ_undef 0L + +#define SN_itu_t "ITU-T" +#define LN_itu_t "itu-t" +#define NID_itu_t 645 +#define OBJ_itu_t 0L + +#define NID_ccitt 404 +#define OBJ_ccitt OBJ_itu_t + +#define SN_iso "ISO" +#define LN_iso "iso" +#define NID_iso 181 +#define OBJ_iso 1L + +#define SN_joint_iso_itu_t "JOINT-ISO-ITU-T" +#define LN_joint_iso_itu_t "joint-iso-itu-t" +#define NID_joint_iso_itu_t 646 +#define OBJ_joint_iso_itu_t 2L + +#define NID_joint_iso_ccitt 393 +#define OBJ_joint_iso_ccitt OBJ_joint_iso_itu_t + +#define SN_member_body "member-body" +#define LN_member_body "ISO Member Body" +#define NID_member_body 182 +#define OBJ_member_body OBJ_iso,2L + +#define SN_identified_organization "identified-organization" +#define NID_identified_organization 676 +#define OBJ_identified_organization OBJ_iso,3L + +#define SN_hmac_md5 "HMAC-MD5" +#define LN_hmac_md5 "hmac-md5" +#define NID_hmac_md5 780 +#define OBJ_hmac_md5 OBJ_identified_organization,6L,1L,5L,5L,8L,1L,1L + +#define SN_hmac_sha1 "HMAC-SHA1" +#define LN_hmac_sha1 "hmac-sha1" +#define NID_hmac_sha1 781 +#define OBJ_hmac_sha1 OBJ_identified_organization,6L,1L,5L,5L,8L,1L,2L + +#define SN_x509ExtAdmission "x509ExtAdmission" +#define LN_x509ExtAdmission "Professional Information or basis for Admission" +#define NID_x509ExtAdmission 1093 +#define OBJ_x509ExtAdmission OBJ_identified_organization,36L,8L,3L,3L + +#define SN_certicom_arc "certicom-arc" +#define NID_certicom_arc 677 +#define OBJ_certicom_arc OBJ_identified_organization,132L + +#define SN_ieee "ieee" +#define NID_ieee 1170 +#define OBJ_ieee OBJ_identified_organization,111L + +#define SN_ieee_siswg "ieee-siswg" +#define LN_ieee_siswg "IEEE Security in Storage Working Group" +#define NID_ieee_siswg 1171 +#define OBJ_ieee_siswg OBJ_ieee,2L,1619L + +#define SN_international_organizations "international-organizations" +#define LN_international_organizations "International Organizations" +#define NID_international_organizations 647 +#define OBJ_international_organizations OBJ_joint_iso_itu_t,23L + +#define SN_wap "wap" +#define NID_wap 678 +#define OBJ_wap OBJ_international_organizations,43L + +#define SN_wap_wsg "wap-wsg" +#define NID_wap_wsg 679 +#define OBJ_wap_wsg OBJ_wap,1L + +#define SN_selected_attribute_types "selected-attribute-types" +#define LN_selected_attribute_types "Selected Attribute Types" +#define NID_selected_attribute_types 394 +#define OBJ_selected_attribute_types OBJ_joint_iso_itu_t,5L,1L,5L + +#define SN_clearance "clearance" +#define NID_clearance 395 +#define OBJ_clearance OBJ_selected_attribute_types,55L + +#define SN_ISO_US "ISO-US" +#define LN_ISO_US "ISO US Member Body" +#define NID_ISO_US 183 +#define OBJ_ISO_US OBJ_member_body,840L + +#define SN_X9_57 "X9-57" +#define LN_X9_57 "X9.57" +#define NID_X9_57 184 +#define OBJ_X9_57 OBJ_ISO_US,10040L + +#define SN_X9cm "X9cm" +#define LN_X9cm "X9.57 CM ?" +#define NID_X9cm 185 +#define OBJ_X9cm OBJ_X9_57,4L + +#define SN_ISO_CN "ISO-CN" +#define LN_ISO_CN "ISO CN Member Body" +#define NID_ISO_CN 1140 +#define OBJ_ISO_CN OBJ_member_body,156L + +#define SN_oscca "oscca" +#define NID_oscca 1141 +#define OBJ_oscca OBJ_ISO_CN,10197L + +#define SN_sm_scheme "sm-scheme" +#define NID_sm_scheme 1142 +#define OBJ_sm_scheme OBJ_oscca,1L + +#define SN_dsa "DSA" +#define LN_dsa "dsaEncryption" +#define NID_dsa 116 +#define OBJ_dsa OBJ_X9cm,1L + +#define SN_dsaWithSHA1 "DSA-SHA1" +#define LN_dsaWithSHA1 "dsaWithSHA1" +#define NID_dsaWithSHA1 113 +#define OBJ_dsaWithSHA1 OBJ_X9cm,3L + +#define SN_ansi_X9_62 "ansi-X9-62" +#define LN_ansi_X9_62 "ANSI X9.62" +#define NID_ansi_X9_62 405 +#define OBJ_ansi_X9_62 OBJ_ISO_US,10045L + +#define OBJ_X9_62_id_fieldType OBJ_ansi_X9_62,1L + +#define SN_X9_62_prime_field "prime-field" +#define NID_X9_62_prime_field 406 +#define OBJ_X9_62_prime_field OBJ_X9_62_id_fieldType,1L + +#define SN_X9_62_characteristic_two_field "characteristic-two-field" +#define NID_X9_62_characteristic_two_field 407 +#define OBJ_X9_62_characteristic_two_field OBJ_X9_62_id_fieldType,2L + +#define SN_X9_62_id_characteristic_two_basis "id-characteristic-two-basis" +#define NID_X9_62_id_characteristic_two_basis 680 +#define OBJ_X9_62_id_characteristic_two_basis OBJ_X9_62_characteristic_two_field,3L + +#define SN_X9_62_onBasis "onBasis" +#define NID_X9_62_onBasis 681 +#define OBJ_X9_62_onBasis OBJ_X9_62_id_characteristic_two_basis,1L + +#define SN_X9_62_tpBasis "tpBasis" +#define NID_X9_62_tpBasis 682 +#define OBJ_X9_62_tpBasis OBJ_X9_62_id_characteristic_two_basis,2L + +#define SN_X9_62_ppBasis "ppBasis" +#define NID_X9_62_ppBasis 683 +#define OBJ_X9_62_ppBasis OBJ_X9_62_id_characteristic_two_basis,3L + +#define OBJ_X9_62_id_publicKeyType OBJ_ansi_X9_62,2L + +#define SN_X9_62_id_ecPublicKey "id-ecPublicKey" +#define NID_X9_62_id_ecPublicKey 408 +#define OBJ_X9_62_id_ecPublicKey OBJ_X9_62_id_publicKeyType,1L + +#define OBJ_X9_62_ellipticCurve OBJ_ansi_X9_62,3L + +#define OBJ_X9_62_c_TwoCurve OBJ_X9_62_ellipticCurve,0L + +#define SN_X9_62_c2pnb163v1 "c2pnb163v1" +#define NID_X9_62_c2pnb163v1 684 +#define OBJ_X9_62_c2pnb163v1 OBJ_X9_62_c_TwoCurve,1L + +#define SN_X9_62_c2pnb163v2 "c2pnb163v2" +#define NID_X9_62_c2pnb163v2 685 +#define OBJ_X9_62_c2pnb163v2 OBJ_X9_62_c_TwoCurve,2L + +#define SN_X9_62_c2pnb163v3 "c2pnb163v3" +#define NID_X9_62_c2pnb163v3 686 +#define OBJ_X9_62_c2pnb163v3 OBJ_X9_62_c_TwoCurve,3L + +#define SN_X9_62_c2pnb176v1 "c2pnb176v1" +#define NID_X9_62_c2pnb176v1 687 +#define OBJ_X9_62_c2pnb176v1 OBJ_X9_62_c_TwoCurve,4L + +#define SN_X9_62_c2tnb191v1 "c2tnb191v1" +#define NID_X9_62_c2tnb191v1 688 +#define OBJ_X9_62_c2tnb191v1 OBJ_X9_62_c_TwoCurve,5L + +#define SN_X9_62_c2tnb191v2 "c2tnb191v2" +#define NID_X9_62_c2tnb191v2 689 +#define OBJ_X9_62_c2tnb191v2 OBJ_X9_62_c_TwoCurve,6L + +#define SN_X9_62_c2tnb191v3 "c2tnb191v3" +#define NID_X9_62_c2tnb191v3 690 +#define OBJ_X9_62_c2tnb191v3 OBJ_X9_62_c_TwoCurve,7L + +#define SN_X9_62_c2onb191v4 "c2onb191v4" +#define NID_X9_62_c2onb191v4 691 +#define OBJ_X9_62_c2onb191v4 OBJ_X9_62_c_TwoCurve,8L + +#define SN_X9_62_c2onb191v5 "c2onb191v5" +#define NID_X9_62_c2onb191v5 692 +#define OBJ_X9_62_c2onb191v5 OBJ_X9_62_c_TwoCurve,9L + +#define SN_X9_62_c2pnb208w1 "c2pnb208w1" +#define NID_X9_62_c2pnb208w1 693 +#define OBJ_X9_62_c2pnb208w1 OBJ_X9_62_c_TwoCurve,10L + +#define SN_X9_62_c2tnb239v1 "c2tnb239v1" +#define NID_X9_62_c2tnb239v1 694 +#define OBJ_X9_62_c2tnb239v1 OBJ_X9_62_c_TwoCurve,11L + +#define SN_X9_62_c2tnb239v2 "c2tnb239v2" +#define NID_X9_62_c2tnb239v2 695 +#define OBJ_X9_62_c2tnb239v2 OBJ_X9_62_c_TwoCurve,12L + +#define SN_X9_62_c2tnb239v3 "c2tnb239v3" +#define NID_X9_62_c2tnb239v3 696 +#define OBJ_X9_62_c2tnb239v3 OBJ_X9_62_c_TwoCurve,13L + +#define SN_X9_62_c2onb239v4 "c2onb239v4" +#define NID_X9_62_c2onb239v4 697 +#define OBJ_X9_62_c2onb239v4 OBJ_X9_62_c_TwoCurve,14L + +#define SN_X9_62_c2onb239v5 "c2onb239v5" +#define NID_X9_62_c2onb239v5 698 +#define OBJ_X9_62_c2onb239v5 OBJ_X9_62_c_TwoCurve,15L + +#define SN_X9_62_c2pnb272w1 "c2pnb272w1" +#define NID_X9_62_c2pnb272w1 699 +#define OBJ_X9_62_c2pnb272w1 OBJ_X9_62_c_TwoCurve,16L + +#define SN_X9_62_c2pnb304w1 "c2pnb304w1" +#define NID_X9_62_c2pnb304w1 700 +#define OBJ_X9_62_c2pnb304w1 OBJ_X9_62_c_TwoCurve,17L + +#define SN_X9_62_c2tnb359v1 "c2tnb359v1" +#define NID_X9_62_c2tnb359v1 701 +#define OBJ_X9_62_c2tnb359v1 OBJ_X9_62_c_TwoCurve,18L + +#define SN_X9_62_c2pnb368w1 "c2pnb368w1" +#define NID_X9_62_c2pnb368w1 702 +#define OBJ_X9_62_c2pnb368w1 OBJ_X9_62_c_TwoCurve,19L + +#define SN_X9_62_c2tnb431r1 "c2tnb431r1" +#define NID_X9_62_c2tnb431r1 703 +#define OBJ_X9_62_c2tnb431r1 OBJ_X9_62_c_TwoCurve,20L + +#define OBJ_X9_62_primeCurve OBJ_X9_62_ellipticCurve,1L + +#define SN_X9_62_prime192v1 "prime192v1" +#define NID_X9_62_prime192v1 409 +#define OBJ_X9_62_prime192v1 OBJ_X9_62_primeCurve,1L + +#define SN_X9_62_prime192v2 "prime192v2" +#define NID_X9_62_prime192v2 410 +#define OBJ_X9_62_prime192v2 OBJ_X9_62_primeCurve,2L + +#define SN_X9_62_prime192v3 "prime192v3" +#define NID_X9_62_prime192v3 411 +#define OBJ_X9_62_prime192v3 OBJ_X9_62_primeCurve,3L + +#define SN_X9_62_prime239v1 "prime239v1" +#define NID_X9_62_prime239v1 412 +#define OBJ_X9_62_prime239v1 OBJ_X9_62_primeCurve,4L + +#define SN_X9_62_prime239v2 "prime239v2" +#define NID_X9_62_prime239v2 413 +#define OBJ_X9_62_prime239v2 OBJ_X9_62_primeCurve,5L + +#define SN_X9_62_prime239v3 "prime239v3" +#define NID_X9_62_prime239v3 414 +#define OBJ_X9_62_prime239v3 OBJ_X9_62_primeCurve,6L + +#define SN_X9_62_prime256v1 "prime256v1" +#define NID_X9_62_prime256v1 415 +#define OBJ_X9_62_prime256v1 OBJ_X9_62_primeCurve,7L + +#define OBJ_X9_62_id_ecSigType OBJ_ansi_X9_62,4L + +#define SN_ecdsa_with_SHA1 "ecdsa-with-SHA1" +#define NID_ecdsa_with_SHA1 416 +#define OBJ_ecdsa_with_SHA1 OBJ_X9_62_id_ecSigType,1L + +#define SN_ecdsa_with_Recommended "ecdsa-with-Recommended" +#define NID_ecdsa_with_Recommended 791 +#define OBJ_ecdsa_with_Recommended OBJ_X9_62_id_ecSigType,2L + +#define SN_ecdsa_with_Specified "ecdsa-with-Specified" +#define NID_ecdsa_with_Specified 792 +#define OBJ_ecdsa_with_Specified OBJ_X9_62_id_ecSigType,3L + +#define SN_ecdsa_with_SHA224 "ecdsa-with-SHA224" +#define NID_ecdsa_with_SHA224 793 +#define OBJ_ecdsa_with_SHA224 OBJ_ecdsa_with_Specified,1L + +#define SN_ecdsa_with_SHA256 "ecdsa-with-SHA256" +#define NID_ecdsa_with_SHA256 794 +#define OBJ_ecdsa_with_SHA256 OBJ_ecdsa_with_Specified,2L + +#define SN_ecdsa_with_SHA384 "ecdsa-with-SHA384" +#define NID_ecdsa_with_SHA384 795 +#define OBJ_ecdsa_with_SHA384 OBJ_ecdsa_with_Specified,3L + +#define SN_ecdsa_with_SHA512 "ecdsa-with-SHA512" +#define NID_ecdsa_with_SHA512 796 +#define OBJ_ecdsa_with_SHA512 OBJ_ecdsa_with_Specified,4L + +#define OBJ_secg_ellipticCurve OBJ_certicom_arc,0L + +#define SN_secp112r1 "secp112r1" +#define NID_secp112r1 704 +#define OBJ_secp112r1 OBJ_secg_ellipticCurve,6L + +#define SN_secp112r2 "secp112r2" +#define NID_secp112r2 705 +#define OBJ_secp112r2 OBJ_secg_ellipticCurve,7L + +#define SN_secp128r1 "secp128r1" +#define NID_secp128r1 706 +#define OBJ_secp128r1 OBJ_secg_ellipticCurve,28L + +#define SN_secp128r2 "secp128r2" +#define NID_secp128r2 707 +#define OBJ_secp128r2 OBJ_secg_ellipticCurve,29L + +#define SN_secp160k1 "secp160k1" +#define NID_secp160k1 708 +#define OBJ_secp160k1 OBJ_secg_ellipticCurve,9L + +#define SN_secp160r1 "secp160r1" +#define NID_secp160r1 709 +#define OBJ_secp160r1 OBJ_secg_ellipticCurve,8L + +#define SN_secp160r2 "secp160r2" +#define NID_secp160r2 710 +#define OBJ_secp160r2 OBJ_secg_ellipticCurve,30L + +#define SN_secp192k1 "secp192k1" +#define NID_secp192k1 711 +#define OBJ_secp192k1 OBJ_secg_ellipticCurve,31L + +#define SN_secp224k1 "secp224k1" +#define NID_secp224k1 712 +#define OBJ_secp224k1 OBJ_secg_ellipticCurve,32L + +#define SN_secp224r1 "secp224r1" +#define NID_secp224r1 713 +#define OBJ_secp224r1 OBJ_secg_ellipticCurve,33L + +#define SN_secp256k1 "secp256k1" +#define NID_secp256k1 714 +#define OBJ_secp256k1 OBJ_secg_ellipticCurve,10L + +#define SN_secp384r1 "secp384r1" +#define NID_secp384r1 715 +#define OBJ_secp384r1 OBJ_secg_ellipticCurve,34L + +#define SN_secp521r1 "secp521r1" +#define NID_secp521r1 716 +#define OBJ_secp521r1 OBJ_secg_ellipticCurve,35L + +#define SN_sect113r1 "sect113r1" +#define NID_sect113r1 717 +#define OBJ_sect113r1 OBJ_secg_ellipticCurve,4L + +#define SN_sect113r2 "sect113r2" +#define NID_sect113r2 718 +#define OBJ_sect113r2 OBJ_secg_ellipticCurve,5L + +#define SN_sect131r1 "sect131r1" +#define NID_sect131r1 719 +#define OBJ_sect131r1 OBJ_secg_ellipticCurve,22L + +#define SN_sect131r2 "sect131r2" +#define NID_sect131r2 720 +#define OBJ_sect131r2 OBJ_secg_ellipticCurve,23L + +#define SN_sect163k1 "sect163k1" +#define NID_sect163k1 721 +#define OBJ_sect163k1 OBJ_secg_ellipticCurve,1L + +#define SN_sect163r1 "sect163r1" +#define NID_sect163r1 722 +#define OBJ_sect163r1 OBJ_secg_ellipticCurve,2L + +#define SN_sect163r2 "sect163r2" +#define NID_sect163r2 723 +#define OBJ_sect163r2 OBJ_secg_ellipticCurve,15L + +#define SN_sect193r1 "sect193r1" +#define NID_sect193r1 724 +#define OBJ_sect193r1 OBJ_secg_ellipticCurve,24L + +#define SN_sect193r2 "sect193r2" +#define NID_sect193r2 725 +#define OBJ_sect193r2 OBJ_secg_ellipticCurve,25L + +#define SN_sect233k1 "sect233k1" +#define NID_sect233k1 726 +#define OBJ_sect233k1 OBJ_secg_ellipticCurve,26L + +#define SN_sect233r1 "sect233r1" +#define NID_sect233r1 727 +#define OBJ_sect233r1 OBJ_secg_ellipticCurve,27L + +#define SN_sect239k1 "sect239k1" +#define NID_sect239k1 728 +#define OBJ_sect239k1 OBJ_secg_ellipticCurve,3L + +#define SN_sect283k1 "sect283k1" +#define NID_sect283k1 729 +#define OBJ_sect283k1 OBJ_secg_ellipticCurve,16L + +#define SN_sect283r1 "sect283r1" +#define NID_sect283r1 730 +#define OBJ_sect283r1 OBJ_secg_ellipticCurve,17L + +#define SN_sect409k1 "sect409k1" +#define NID_sect409k1 731 +#define OBJ_sect409k1 OBJ_secg_ellipticCurve,36L + +#define SN_sect409r1 "sect409r1" +#define NID_sect409r1 732 +#define OBJ_sect409r1 OBJ_secg_ellipticCurve,37L + +#define SN_sect571k1 "sect571k1" +#define NID_sect571k1 733 +#define OBJ_sect571k1 OBJ_secg_ellipticCurve,38L + +#define SN_sect571r1 "sect571r1" +#define NID_sect571r1 734 +#define OBJ_sect571r1 OBJ_secg_ellipticCurve,39L + +#define OBJ_wap_wsg_idm_ecid OBJ_wap_wsg,4L + +#define SN_wap_wsg_idm_ecid_wtls1 "wap-wsg-idm-ecid-wtls1" +#define NID_wap_wsg_idm_ecid_wtls1 735 +#define OBJ_wap_wsg_idm_ecid_wtls1 OBJ_wap_wsg_idm_ecid,1L + +#define SN_wap_wsg_idm_ecid_wtls3 "wap-wsg-idm-ecid-wtls3" +#define NID_wap_wsg_idm_ecid_wtls3 736 +#define OBJ_wap_wsg_idm_ecid_wtls3 OBJ_wap_wsg_idm_ecid,3L + +#define SN_wap_wsg_idm_ecid_wtls4 "wap-wsg-idm-ecid-wtls4" +#define NID_wap_wsg_idm_ecid_wtls4 737 +#define OBJ_wap_wsg_idm_ecid_wtls4 OBJ_wap_wsg_idm_ecid,4L + +#define SN_wap_wsg_idm_ecid_wtls5 "wap-wsg-idm-ecid-wtls5" +#define NID_wap_wsg_idm_ecid_wtls5 738 +#define OBJ_wap_wsg_idm_ecid_wtls5 OBJ_wap_wsg_idm_ecid,5L + +#define SN_wap_wsg_idm_ecid_wtls6 "wap-wsg-idm-ecid-wtls6" +#define NID_wap_wsg_idm_ecid_wtls6 739 +#define OBJ_wap_wsg_idm_ecid_wtls6 OBJ_wap_wsg_idm_ecid,6L + +#define SN_wap_wsg_idm_ecid_wtls7 "wap-wsg-idm-ecid-wtls7" +#define NID_wap_wsg_idm_ecid_wtls7 740 +#define OBJ_wap_wsg_idm_ecid_wtls7 OBJ_wap_wsg_idm_ecid,7L + +#define SN_wap_wsg_idm_ecid_wtls8 "wap-wsg-idm-ecid-wtls8" +#define NID_wap_wsg_idm_ecid_wtls8 741 +#define OBJ_wap_wsg_idm_ecid_wtls8 OBJ_wap_wsg_idm_ecid,8L + +#define SN_wap_wsg_idm_ecid_wtls9 "wap-wsg-idm-ecid-wtls9" +#define NID_wap_wsg_idm_ecid_wtls9 742 +#define OBJ_wap_wsg_idm_ecid_wtls9 OBJ_wap_wsg_idm_ecid,9L + +#define SN_wap_wsg_idm_ecid_wtls10 "wap-wsg-idm-ecid-wtls10" +#define NID_wap_wsg_idm_ecid_wtls10 743 +#define OBJ_wap_wsg_idm_ecid_wtls10 OBJ_wap_wsg_idm_ecid,10L + +#define SN_wap_wsg_idm_ecid_wtls11 "wap-wsg-idm-ecid-wtls11" +#define NID_wap_wsg_idm_ecid_wtls11 744 +#define OBJ_wap_wsg_idm_ecid_wtls11 OBJ_wap_wsg_idm_ecid,11L + +#define SN_wap_wsg_idm_ecid_wtls12 "wap-wsg-idm-ecid-wtls12" +#define NID_wap_wsg_idm_ecid_wtls12 745 +#define OBJ_wap_wsg_idm_ecid_wtls12 OBJ_wap_wsg_idm_ecid,12L + +#define SN_cast5_cbc "CAST5-CBC" +#define LN_cast5_cbc "cast5-cbc" +#define NID_cast5_cbc 108 +#define OBJ_cast5_cbc OBJ_ISO_US,113533L,7L,66L,10L + +#define SN_cast5_ecb "CAST5-ECB" +#define LN_cast5_ecb "cast5-ecb" +#define NID_cast5_ecb 109 + +#define SN_cast5_cfb64 "CAST5-CFB" +#define LN_cast5_cfb64 "cast5-cfb" +#define NID_cast5_cfb64 110 + +#define SN_cast5_ofb64 "CAST5-OFB" +#define LN_cast5_ofb64 "cast5-ofb" +#define NID_cast5_ofb64 111 + +#define LN_pbeWithMD5AndCast5_CBC "pbeWithMD5AndCast5CBC" +#define NID_pbeWithMD5AndCast5_CBC 112 +#define OBJ_pbeWithMD5AndCast5_CBC OBJ_ISO_US,113533L,7L,66L,12L + +#define SN_id_PasswordBasedMAC "id-PasswordBasedMAC" +#define LN_id_PasswordBasedMAC "password based MAC" +#define NID_id_PasswordBasedMAC 782 +#define OBJ_id_PasswordBasedMAC OBJ_ISO_US,113533L,7L,66L,13L + +#define SN_id_DHBasedMac "id-DHBasedMac" +#define LN_id_DHBasedMac "Diffie-Hellman based MAC" +#define NID_id_DHBasedMac 783 +#define OBJ_id_DHBasedMac OBJ_ISO_US,113533L,7L,66L,30L + +#define SN_rsadsi "rsadsi" +#define LN_rsadsi "RSA Data Security, Inc." +#define NID_rsadsi 1 +#define OBJ_rsadsi OBJ_ISO_US,113549L + +#define SN_pkcs "pkcs" +#define LN_pkcs "RSA Data Security, Inc. PKCS" +#define NID_pkcs 2 +#define OBJ_pkcs OBJ_rsadsi,1L + +#define SN_pkcs1 "pkcs1" +#define NID_pkcs1 186 +#define OBJ_pkcs1 OBJ_pkcs,1L + +#define LN_rsaEncryption "rsaEncryption" +#define NID_rsaEncryption 6 +#define OBJ_rsaEncryption OBJ_pkcs1,1L + +#define SN_md2WithRSAEncryption "RSA-MD2" +#define LN_md2WithRSAEncryption "md2WithRSAEncryption" +#define NID_md2WithRSAEncryption 7 +#define OBJ_md2WithRSAEncryption OBJ_pkcs1,2L + +#define SN_md4WithRSAEncryption "RSA-MD4" +#define LN_md4WithRSAEncryption "md4WithRSAEncryption" +#define NID_md4WithRSAEncryption 396 +#define OBJ_md4WithRSAEncryption OBJ_pkcs1,3L + +#define SN_md5WithRSAEncryption "RSA-MD5" +#define LN_md5WithRSAEncryption "md5WithRSAEncryption" +#define NID_md5WithRSAEncryption 8 +#define OBJ_md5WithRSAEncryption OBJ_pkcs1,4L + +#define SN_sha1WithRSAEncryption "RSA-SHA1" +#define LN_sha1WithRSAEncryption "sha1WithRSAEncryption" +#define NID_sha1WithRSAEncryption 65 +#define OBJ_sha1WithRSAEncryption OBJ_pkcs1,5L + +#define SN_rsaesOaep "RSAES-OAEP" +#define LN_rsaesOaep "rsaesOaep" +#define NID_rsaesOaep 919 +#define OBJ_rsaesOaep OBJ_pkcs1,7L + +#define SN_mgf1 "MGF1" +#define LN_mgf1 "mgf1" +#define NID_mgf1 911 +#define OBJ_mgf1 OBJ_pkcs1,8L + +#define SN_pSpecified "PSPECIFIED" +#define LN_pSpecified "pSpecified" +#define NID_pSpecified 935 +#define OBJ_pSpecified OBJ_pkcs1,9L + +#define SN_rsassaPss "RSASSA-PSS" +#define LN_rsassaPss "rsassaPss" +#define NID_rsassaPss 912 +#define OBJ_rsassaPss OBJ_pkcs1,10L + +#define SN_sha256WithRSAEncryption "RSA-SHA256" +#define LN_sha256WithRSAEncryption "sha256WithRSAEncryption" +#define NID_sha256WithRSAEncryption 668 +#define OBJ_sha256WithRSAEncryption OBJ_pkcs1,11L + +#define SN_sha384WithRSAEncryption "RSA-SHA384" +#define LN_sha384WithRSAEncryption "sha384WithRSAEncryption" +#define NID_sha384WithRSAEncryption 669 +#define OBJ_sha384WithRSAEncryption OBJ_pkcs1,12L + +#define SN_sha512WithRSAEncryption "RSA-SHA512" +#define LN_sha512WithRSAEncryption "sha512WithRSAEncryption" +#define NID_sha512WithRSAEncryption 670 +#define OBJ_sha512WithRSAEncryption OBJ_pkcs1,13L + +#define SN_sha224WithRSAEncryption "RSA-SHA224" +#define LN_sha224WithRSAEncryption "sha224WithRSAEncryption" +#define NID_sha224WithRSAEncryption 671 +#define OBJ_sha224WithRSAEncryption OBJ_pkcs1,14L + +#define SN_sha512_224WithRSAEncryption "RSA-SHA512/224" +#define LN_sha512_224WithRSAEncryption "sha512-224WithRSAEncryption" +#define NID_sha512_224WithRSAEncryption 1145 +#define OBJ_sha512_224WithRSAEncryption OBJ_pkcs1,15L + +#define SN_sha512_256WithRSAEncryption "RSA-SHA512/256" +#define LN_sha512_256WithRSAEncryption "sha512-256WithRSAEncryption" +#define NID_sha512_256WithRSAEncryption 1146 +#define OBJ_sha512_256WithRSAEncryption OBJ_pkcs1,16L + +#define SN_pkcs3 "pkcs3" +#define NID_pkcs3 27 +#define OBJ_pkcs3 OBJ_pkcs,3L + +#define LN_dhKeyAgreement "dhKeyAgreement" +#define NID_dhKeyAgreement 28 +#define OBJ_dhKeyAgreement OBJ_pkcs3,1L + +#define SN_pkcs5 "pkcs5" +#define NID_pkcs5 187 +#define OBJ_pkcs5 OBJ_pkcs,5L + +#define SN_pbeWithMD2AndDES_CBC "PBE-MD2-DES" +#define LN_pbeWithMD2AndDES_CBC "pbeWithMD2AndDES-CBC" +#define NID_pbeWithMD2AndDES_CBC 9 +#define OBJ_pbeWithMD2AndDES_CBC OBJ_pkcs5,1L + +#define SN_pbeWithMD5AndDES_CBC "PBE-MD5-DES" +#define LN_pbeWithMD5AndDES_CBC "pbeWithMD5AndDES-CBC" +#define NID_pbeWithMD5AndDES_CBC 10 +#define OBJ_pbeWithMD5AndDES_CBC OBJ_pkcs5,3L + +#define SN_pbeWithMD2AndRC2_CBC "PBE-MD2-RC2-64" +#define LN_pbeWithMD2AndRC2_CBC "pbeWithMD2AndRC2-CBC" +#define NID_pbeWithMD2AndRC2_CBC 168 +#define OBJ_pbeWithMD2AndRC2_CBC OBJ_pkcs5,4L + +#define SN_pbeWithMD5AndRC2_CBC "PBE-MD5-RC2-64" +#define LN_pbeWithMD5AndRC2_CBC "pbeWithMD5AndRC2-CBC" +#define NID_pbeWithMD5AndRC2_CBC 169 +#define OBJ_pbeWithMD5AndRC2_CBC OBJ_pkcs5,6L + +#define SN_pbeWithSHA1AndDES_CBC "PBE-SHA1-DES" +#define LN_pbeWithSHA1AndDES_CBC "pbeWithSHA1AndDES-CBC" +#define NID_pbeWithSHA1AndDES_CBC 170 +#define OBJ_pbeWithSHA1AndDES_CBC OBJ_pkcs5,10L + +#define SN_pbeWithSHA1AndRC2_CBC "PBE-SHA1-RC2-64" +#define LN_pbeWithSHA1AndRC2_CBC "pbeWithSHA1AndRC2-CBC" +#define NID_pbeWithSHA1AndRC2_CBC 68 +#define OBJ_pbeWithSHA1AndRC2_CBC OBJ_pkcs5,11L + +#define LN_id_pbkdf2 "PBKDF2" +#define NID_id_pbkdf2 69 +#define OBJ_id_pbkdf2 OBJ_pkcs5,12L + +#define LN_pbes2 "PBES2" +#define NID_pbes2 161 +#define OBJ_pbes2 OBJ_pkcs5,13L + +#define LN_pbmac1 "PBMAC1" +#define NID_pbmac1 162 +#define OBJ_pbmac1 OBJ_pkcs5,14L + +#define SN_pkcs7 "pkcs7" +#define NID_pkcs7 20 +#define OBJ_pkcs7 OBJ_pkcs,7L + +#define LN_pkcs7_data "pkcs7-data" +#define NID_pkcs7_data 21 +#define OBJ_pkcs7_data OBJ_pkcs7,1L + +#define LN_pkcs7_signed "pkcs7-signedData" +#define NID_pkcs7_signed 22 +#define OBJ_pkcs7_signed OBJ_pkcs7,2L + +#define LN_pkcs7_enveloped "pkcs7-envelopedData" +#define NID_pkcs7_enveloped 23 +#define OBJ_pkcs7_enveloped OBJ_pkcs7,3L + +#define LN_pkcs7_signedAndEnveloped "pkcs7-signedAndEnvelopedData" +#define NID_pkcs7_signedAndEnveloped 24 +#define OBJ_pkcs7_signedAndEnveloped OBJ_pkcs7,4L + +#define LN_pkcs7_digest "pkcs7-digestData" +#define NID_pkcs7_digest 25 +#define OBJ_pkcs7_digest OBJ_pkcs7,5L + +#define LN_pkcs7_encrypted "pkcs7-encryptedData" +#define NID_pkcs7_encrypted 26 +#define OBJ_pkcs7_encrypted OBJ_pkcs7,6L + +#define SN_pkcs9 "pkcs9" +#define NID_pkcs9 47 +#define OBJ_pkcs9 OBJ_pkcs,9L + +#define LN_pkcs9_emailAddress "emailAddress" +#define NID_pkcs9_emailAddress 48 +#define OBJ_pkcs9_emailAddress OBJ_pkcs9,1L + +#define LN_pkcs9_unstructuredName "unstructuredName" +#define NID_pkcs9_unstructuredName 49 +#define OBJ_pkcs9_unstructuredName OBJ_pkcs9,2L + +#define LN_pkcs9_contentType "contentType" +#define NID_pkcs9_contentType 50 +#define OBJ_pkcs9_contentType OBJ_pkcs9,3L + +#define LN_pkcs9_messageDigest "messageDigest" +#define NID_pkcs9_messageDigest 51 +#define OBJ_pkcs9_messageDigest OBJ_pkcs9,4L + +#define LN_pkcs9_signingTime "signingTime" +#define NID_pkcs9_signingTime 52 +#define OBJ_pkcs9_signingTime OBJ_pkcs9,5L + +#define LN_pkcs9_countersignature "countersignature" +#define NID_pkcs9_countersignature 53 +#define OBJ_pkcs9_countersignature OBJ_pkcs9,6L + +#define LN_pkcs9_challengePassword "challengePassword" +#define NID_pkcs9_challengePassword 54 +#define OBJ_pkcs9_challengePassword OBJ_pkcs9,7L + +#define LN_pkcs9_unstructuredAddress "unstructuredAddress" +#define NID_pkcs9_unstructuredAddress 55 +#define OBJ_pkcs9_unstructuredAddress OBJ_pkcs9,8L + +#define LN_pkcs9_extCertAttributes "extendedCertificateAttributes" +#define NID_pkcs9_extCertAttributes 56 +#define OBJ_pkcs9_extCertAttributes OBJ_pkcs9,9L + +#define SN_ext_req "extReq" +#define LN_ext_req "Extension Request" +#define NID_ext_req 172 +#define OBJ_ext_req OBJ_pkcs9,14L + +#define SN_SMIMECapabilities "SMIME-CAPS" +#define LN_SMIMECapabilities "S/MIME Capabilities" +#define NID_SMIMECapabilities 167 +#define OBJ_SMIMECapabilities OBJ_pkcs9,15L + +#define SN_SMIME "SMIME" +#define LN_SMIME "S/MIME" +#define NID_SMIME 188 +#define OBJ_SMIME OBJ_pkcs9,16L + +#define SN_id_smime_mod "id-smime-mod" +#define NID_id_smime_mod 189 +#define OBJ_id_smime_mod OBJ_SMIME,0L + +#define SN_id_smime_ct "id-smime-ct" +#define NID_id_smime_ct 190 +#define OBJ_id_smime_ct OBJ_SMIME,1L + +#define SN_id_smime_aa "id-smime-aa" +#define NID_id_smime_aa 191 +#define OBJ_id_smime_aa OBJ_SMIME,2L + +#define SN_id_smime_alg "id-smime-alg" +#define NID_id_smime_alg 192 +#define OBJ_id_smime_alg OBJ_SMIME,3L + +#define SN_id_smime_cd "id-smime-cd" +#define NID_id_smime_cd 193 +#define OBJ_id_smime_cd OBJ_SMIME,4L + +#define SN_id_smime_spq "id-smime-spq" +#define NID_id_smime_spq 194 +#define OBJ_id_smime_spq OBJ_SMIME,5L + +#define SN_id_smime_cti "id-smime-cti" +#define NID_id_smime_cti 195 +#define OBJ_id_smime_cti OBJ_SMIME,6L + +#define SN_id_smime_mod_cms "id-smime-mod-cms" +#define NID_id_smime_mod_cms 196 +#define OBJ_id_smime_mod_cms OBJ_id_smime_mod,1L + +#define SN_id_smime_mod_ess "id-smime-mod-ess" +#define NID_id_smime_mod_ess 197 +#define OBJ_id_smime_mod_ess OBJ_id_smime_mod,2L + +#define SN_id_smime_mod_oid "id-smime-mod-oid" +#define NID_id_smime_mod_oid 198 +#define OBJ_id_smime_mod_oid OBJ_id_smime_mod,3L + +#define SN_id_smime_mod_msg_v3 "id-smime-mod-msg-v3" +#define NID_id_smime_mod_msg_v3 199 +#define OBJ_id_smime_mod_msg_v3 OBJ_id_smime_mod,4L + +#define SN_id_smime_mod_ets_eSignature_88 "id-smime-mod-ets-eSignature-88" +#define NID_id_smime_mod_ets_eSignature_88 200 +#define OBJ_id_smime_mod_ets_eSignature_88 OBJ_id_smime_mod,5L + +#define SN_id_smime_mod_ets_eSignature_97 "id-smime-mod-ets-eSignature-97" +#define NID_id_smime_mod_ets_eSignature_97 201 +#define OBJ_id_smime_mod_ets_eSignature_97 OBJ_id_smime_mod,6L + +#define SN_id_smime_mod_ets_eSigPolicy_88 "id-smime-mod-ets-eSigPolicy-88" +#define NID_id_smime_mod_ets_eSigPolicy_88 202 +#define OBJ_id_smime_mod_ets_eSigPolicy_88 OBJ_id_smime_mod,7L + +#define SN_id_smime_mod_ets_eSigPolicy_97 "id-smime-mod-ets-eSigPolicy-97" +#define NID_id_smime_mod_ets_eSigPolicy_97 203 +#define OBJ_id_smime_mod_ets_eSigPolicy_97 OBJ_id_smime_mod,8L + +#define SN_id_smime_ct_receipt "id-smime-ct-receipt" +#define NID_id_smime_ct_receipt 204 +#define OBJ_id_smime_ct_receipt OBJ_id_smime_ct,1L + +#define SN_id_smime_ct_authData "id-smime-ct-authData" +#define NID_id_smime_ct_authData 205 +#define OBJ_id_smime_ct_authData OBJ_id_smime_ct,2L + +#define SN_id_smime_ct_publishCert "id-smime-ct-publishCert" +#define NID_id_smime_ct_publishCert 206 +#define OBJ_id_smime_ct_publishCert OBJ_id_smime_ct,3L + +#define SN_id_smime_ct_TSTInfo "id-smime-ct-TSTInfo" +#define NID_id_smime_ct_TSTInfo 207 +#define OBJ_id_smime_ct_TSTInfo OBJ_id_smime_ct,4L + +#define SN_id_smime_ct_TDTInfo "id-smime-ct-TDTInfo" +#define NID_id_smime_ct_TDTInfo 208 +#define OBJ_id_smime_ct_TDTInfo OBJ_id_smime_ct,5L + +#define SN_id_smime_ct_contentInfo "id-smime-ct-contentInfo" +#define NID_id_smime_ct_contentInfo 209 +#define OBJ_id_smime_ct_contentInfo OBJ_id_smime_ct,6L + +#define SN_id_smime_ct_DVCSRequestData "id-smime-ct-DVCSRequestData" +#define NID_id_smime_ct_DVCSRequestData 210 +#define OBJ_id_smime_ct_DVCSRequestData OBJ_id_smime_ct,7L + +#define SN_id_smime_ct_DVCSResponseData "id-smime-ct-DVCSResponseData" +#define NID_id_smime_ct_DVCSResponseData 211 +#define OBJ_id_smime_ct_DVCSResponseData OBJ_id_smime_ct,8L + +#define SN_id_smime_ct_compressedData "id-smime-ct-compressedData" +#define NID_id_smime_ct_compressedData 786 +#define OBJ_id_smime_ct_compressedData OBJ_id_smime_ct,9L + +#define SN_id_smime_ct_contentCollection "id-smime-ct-contentCollection" +#define NID_id_smime_ct_contentCollection 1058 +#define OBJ_id_smime_ct_contentCollection OBJ_id_smime_ct,19L + +#define SN_id_smime_ct_authEnvelopedData "id-smime-ct-authEnvelopedData" +#define NID_id_smime_ct_authEnvelopedData 1059 +#define OBJ_id_smime_ct_authEnvelopedData OBJ_id_smime_ct,23L + +#define SN_id_ct_asciiTextWithCRLF "id-ct-asciiTextWithCRLF" +#define NID_id_ct_asciiTextWithCRLF 787 +#define OBJ_id_ct_asciiTextWithCRLF OBJ_id_smime_ct,27L + +#define SN_id_ct_xml "id-ct-xml" +#define NID_id_ct_xml 1060 +#define OBJ_id_ct_xml OBJ_id_smime_ct,28L + +#define SN_id_smime_aa_receiptRequest "id-smime-aa-receiptRequest" +#define NID_id_smime_aa_receiptRequest 212 +#define OBJ_id_smime_aa_receiptRequest OBJ_id_smime_aa,1L + +#define SN_id_smime_aa_securityLabel "id-smime-aa-securityLabel" +#define NID_id_smime_aa_securityLabel 213 +#define OBJ_id_smime_aa_securityLabel OBJ_id_smime_aa,2L + +#define SN_id_smime_aa_mlExpandHistory "id-smime-aa-mlExpandHistory" +#define NID_id_smime_aa_mlExpandHistory 214 +#define OBJ_id_smime_aa_mlExpandHistory OBJ_id_smime_aa,3L + +#define SN_id_smime_aa_contentHint "id-smime-aa-contentHint" +#define NID_id_smime_aa_contentHint 215 +#define OBJ_id_smime_aa_contentHint OBJ_id_smime_aa,4L + +#define SN_id_smime_aa_msgSigDigest "id-smime-aa-msgSigDigest" +#define NID_id_smime_aa_msgSigDigest 216 +#define OBJ_id_smime_aa_msgSigDigest OBJ_id_smime_aa,5L + +#define SN_id_smime_aa_encapContentType "id-smime-aa-encapContentType" +#define NID_id_smime_aa_encapContentType 217 +#define OBJ_id_smime_aa_encapContentType OBJ_id_smime_aa,6L + +#define SN_id_smime_aa_contentIdentifier "id-smime-aa-contentIdentifier" +#define NID_id_smime_aa_contentIdentifier 218 +#define OBJ_id_smime_aa_contentIdentifier OBJ_id_smime_aa,7L + +#define SN_id_smime_aa_macValue "id-smime-aa-macValue" +#define NID_id_smime_aa_macValue 219 +#define OBJ_id_smime_aa_macValue OBJ_id_smime_aa,8L + +#define SN_id_smime_aa_equivalentLabels "id-smime-aa-equivalentLabels" +#define NID_id_smime_aa_equivalentLabels 220 +#define OBJ_id_smime_aa_equivalentLabels OBJ_id_smime_aa,9L + +#define SN_id_smime_aa_contentReference "id-smime-aa-contentReference" +#define NID_id_smime_aa_contentReference 221 +#define OBJ_id_smime_aa_contentReference OBJ_id_smime_aa,10L + +#define SN_id_smime_aa_encrypKeyPref "id-smime-aa-encrypKeyPref" +#define NID_id_smime_aa_encrypKeyPref 222 +#define OBJ_id_smime_aa_encrypKeyPref OBJ_id_smime_aa,11L + +#define SN_id_smime_aa_signingCertificate "id-smime-aa-signingCertificate" +#define NID_id_smime_aa_signingCertificate 223 +#define OBJ_id_smime_aa_signingCertificate OBJ_id_smime_aa,12L + +#define SN_id_smime_aa_smimeEncryptCerts "id-smime-aa-smimeEncryptCerts" +#define NID_id_smime_aa_smimeEncryptCerts 224 +#define OBJ_id_smime_aa_smimeEncryptCerts OBJ_id_smime_aa,13L + +#define SN_id_smime_aa_timeStampToken "id-smime-aa-timeStampToken" +#define NID_id_smime_aa_timeStampToken 225 +#define OBJ_id_smime_aa_timeStampToken OBJ_id_smime_aa,14L + +#define SN_id_smime_aa_ets_sigPolicyId "id-smime-aa-ets-sigPolicyId" +#define NID_id_smime_aa_ets_sigPolicyId 226 +#define OBJ_id_smime_aa_ets_sigPolicyId OBJ_id_smime_aa,15L + +#define SN_id_smime_aa_ets_commitmentType "id-smime-aa-ets-commitmentType" +#define NID_id_smime_aa_ets_commitmentType 227 +#define OBJ_id_smime_aa_ets_commitmentType OBJ_id_smime_aa,16L + +#define SN_id_smime_aa_ets_signerLocation "id-smime-aa-ets-signerLocation" +#define NID_id_smime_aa_ets_signerLocation 228 +#define OBJ_id_smime_aa_ets_signerLocation OBJ_id_smime_aa,17L + +#define SN_id_smime_aa_ets_signerAttr "id-smime-aa-ets-signerAttr" +#define NID_id_smime_aa_ets_signerAttr 229 +#define OBJ_id_smime_aa_ets_signerAttr OBJ_id_smime_aa,18L + +#define SN_id_smime_aa_ets_otherSigCert "id-smime-aa-ets-otherSigCert" +#define NID_id_smime_aa_ets_otherSigCert 230 +#define OBJ_id_smime_aa_ets_otherSigCert OBJ_id_smime_aa,19L + +#define SN_id_smime_aa_ets_contentTimestamp "id-smime-aa-ets-contentTimestamp" +#define NID_id_smime_aa_ets_contentTimestamp 231 +#define OBJ_id_smime_aa_ets_contentTimestamp OBJ_id_smime_aa,20L + +#define SN_id_smime_aa_ets_CertificateRefs "id-smime-aa-ets-CertificateRefs" +#define NID_id_smime_aa_ets_CertificateRefs 232 +#define OBJ_id_smime_aa_ets_CertificateRefs OBJ_id_smime_aa,21L + +#define SN_id_smime_aa_ets_RevocationRefs "id-smime-aa-ets-RevocationRefs" +#define NID_id_smime_aa_ets_RevocationRefs 233 +#define OBJ_id_smime_aa_ets_RevocationRefs OBJ_id_smime_aa,22L + +#define SN_id_smime_aa_ets_certValues "id-smime-aa-ets-certValues" +#define NID_id_smime_aa_ets_certValues 234 +#define OBJ_id_smime_aa_ets_certValues OBJ_id_smime_aa,23L + +#define SN_id_smime_aa_ets_revocationValues "id-smime-aa-ets-revocationValues" +#define NID_id_smime_aa_ets_revocationValues 235 +#define OBJ_id_smime_aa_ets_revocationValues OBJ_id_smime_aa,24L + +#define SN_id_smime_aa_ets_escTimeStamp "id-smime-aa-ets-escTimeStamp" +#define NID_id_smime_aa_ets_escTimeStamp 236 +#define OBJ_id_smime_aa_ets_escTimeStamp OBJ_id_smime_aa,25L + +#define SN_id_smime_aa_ets_certCRLTimestamp "id-smime-aa-ets-certCRLTimestamp" +#define NID_id_smime_aa_ets_certCRLTimestamp 237 +#define OBJ_id_smime_aa_ets_certCRLTimestamp OBJ_id_smime_aa,26L + +#define SN_id_smime_aa_ets_archiveTimeStamp "id-smime-aa-ets-archiveTimeStamp" +#define NID_id_smime_aa_ets_archiveTimeStamp 238 +#define OBJ_id_smime_aa_ets_archiveTimeStamp OBJ_id_smime_aa,27L + +#define SN_id_smime_aa_signatureType "id-smime-aa-signatureType" +#define NID_id_smime_aa_signatureType 239 +#define OBJ_id_smime_aa_signatureType OBJ_id_smime_aa,28L + +#define SN_id_smime_aa_dvcs_dvc "id-smime-aa-dvcs-dvc" +#define NID_id_smime_aa_dvcs_dvc 240 +#define OBJ_id_smime_aa_dvcs_dvc OBJ_id_smime_aa,29L + +#define SN_id_smime_aa_signingCertificateV2 "id-smime-aa-signingCertificateV2" +#define NID_id_smime_aa_signingCertificateV2 1086 +#define OBJ_id_smime_aa_signingCertificateV2 OBJ_id_smime_aa,47L + +#define SN_id_smime_alg_ESDHwith3DES "id-smime-alg-ESDHwith3DES" +#define NID_id_smime_alg_ESDHwith3DES 241 +#define OBJ_id_smime_alg_ESDHwith3DES OBJ_id_smime_alg,1L + +#define SN_id_smime_alg_ESDHwithRC2 "id-smime-alg-ESDHwithRC2" +#define NID_id_smime_alg_ESDHwithRC2 242 +#define OBJ_id_smime_alg_ESDHwithRC2 OBJ_id_smime_alg,2L + +#define SN_id_smime_alg_3DESwrap "id-smime-alg-3DESwrap" +#define NID_id_smime_alg_3DESwrap 243 +#define OBJ_id_smime_alg_3DESwrap OBJ_id_smime_alg,3L + +#define SN_id_smime_alg_RC2wrap "id-smime-alg-RC2wrap" +#define NID_id_smime_alg_RC2wrap 244 +#define OBJ_id_smime_alg_RC2wrap OBJ_id_smime_alg,4L + +#define SN_id_smime_alg_ESDH "id-smime-alg-ESDH" +#define NID_id_smime_alg_ESDH 245 +#define OBJ_id_smime_alg_ESDH OBJ_id_smime_alg,5L + +#define SN_id_smime_alg_CMS3DESwrap "id-smime-alg-CMS3DESwrap" +#define NID_id_smime_alg_CMS3DESwrap 246 +#define OBJ_id_smime_alg_CMS3DESwrap OBJ_id_smime_alg,6L + +#define SN_id_smime_alg_CMSRC2wrap "id-smime-alg-CMSRC2wrap" +#define NID_id_smime_alg_CMSRC2wrap 247 +#define OBJ_id_smime_alg_CMSRC2wrap OBJ_id_smime_alg,7L + +#define SN_id_alg_PWRI_KEK "id-alg-PWRI-KEK" +#define NID_id_alg_PWRI_KEK 893 +#define OBJ_id_alg_PWRI_KEK OBJ_id_smime_alg,9L + +#define SN_id_smime_cd_ldap "id-smime-cd-ldap" +#define NID_id_smime_cd_ldap 248 +#define OBJ_id_smime_cd_ldap OBJ_id_smime_cd,1L + +#define SN_id_smime_spq_ets_sqt_uri "id-smime-spq-ets-sqt-uri" +#define NID_id_smime_spq_ets_sqt_uri 249 +#define OBJ_id_smime_spq_ets_sqt_uri OBJ_id_smime_spq,1L + +#define SN_id_smime_spq_ets_sqt_unotice "id-smime-spq-ets-sqt-unotice" +#define NID_id_smime_spq_ets_sqt_unotice 250 +#define OBJ_id_smime_spq_ets_sqt_unotice OBJ_id_smime_spq,2L + +#define SN_id_smime_cti_ets_proofOfOrigin "id-smime-cti-ets-proofOfOrigin" +#define NID_id_smime_cti_ets_proofOfOrigin 251 +#define OBJ_id_smime_cti_ets_proofOfOrigin OBJ_id_smime_cti,1L + +#define SN_id_smime_cti_ets_proofOfReceipt "id-smime-cti-ets-proofOfReceipt" +#define NID_id_smime_cti_ets_proofOfReceipt 252 +#define OBJ_id_smime_cti_ets_proofOfReceipt OBJ_id_smime_cti,2L + +#define SN_id_smime_cti_ets_proofOfDelivery "id-smime-cti-ets-proofOfDelivery" +#define NID_id_smime_cti_ets_proofOfDelivery 253 +#define OBJ_id_smime_cti_ets_proofOfDelivery OBJ_id_smime_cti,3L + +#define SN_id_smime_cti_ets_proofOfSender "id-smime-cti-ets-proofOfSender" +#define NID_id_smime_cti_ets_proofOfSender 254 +#define OBJ_id_smime_cti_ets_proofOfSender OBJ_id_smime_cti,4L + +#define SN_id_smime_cti_ets_proofOfApproval "id-smime-cti-ets-proofOfApproval" +#define NID_id_smime_cti_ets_proofOfApproval 255 +#define OBJ_id_smime_cti_ets_proofOfApproval OBJ_id_smime_cti,5L + +#define SN_id_smime_cti_ets_proofOfCreation "id-smime-cti-ets-proofOfCreation" +#define NID_id_smime_cti_ets_proofOfCreation 256 +#define OBJ_id_smime_cti_ets_proofOfCreation OBJ_id_smime_cti,6L + +#define LN_friendlyName "friendlyName" +#define NID_friendlyName 156 +#define OBJ_friendlyName OBJ_pkcs9,20L + +#define LN_localKeyID "localKeyID" +#define NID_localKeyID 157 +#define OBJ_localKeyID OBJ_pkcs9,21L + +#define SN_ms_csp_name "CSPName" +#define LN_ms_csp_name "Microsoft CSP Name" +#define NID_ms_csp_name 417 +#define OBJ_ms_csp_name 1L,3L,6L,1L,4L,1L,311L,17L,1L + +#define SN_LocalKeySet "LocalKeySet" +#define LN_LocalKeySet "Microsoft Local Key set" +#define NID_LocalKeySet 856 +#define OBJ_LocalKeySet 1L,3L,6L,1L,4L,1L,311L,17L,2L + +#define OBJ_certTypes OBJ_pkcs9,22L + +#define LN_x509Certificate "x509Certificate" +#define NID_x509Certificate 158 +#define OBJ_x509Certificate OBJ_certTypes,1L + +#define LN_sdsiCertificate "sdsiCertificate" +#define NID_sdsiCertificate 159 +#define OBJ_sdsiCertificate OBJ_certTypes,2L + +#define OBJ_crlTypes OBJ_pkcs9,23L + +#define LN_x509Crl "x509Crl" +#define NID_x509Crl 160 +#define OBJ_x509Crl OBJ_crlTypes,1L + +#define OBJ_pkcs12 OBJ_pkcs,12L + +#define OBJ_pkcs12_pbeids OBJ_pkcs12,1L + +#define SN_pbe_WithSHA1And128BitRC4 "PBE-SHA1-RC4-128" +#define LN_pbe_WithSHA1And128BitRC4 "pbeWithSHA1And128BitRC4" +#define NID_pbe_WithSHA1And128BitRC4 144 +#define OBJ_pbe_WithSHA1And128BitRC4 OBJ_pkcs12_pbeids,1L + +#define SN_pbe_WithSHA1And40BitRC4 "PBE-SHA1-RC4-40" +#define LN_pbe_WithSHA1And40BitRC4 "pbeWithSHA1And40BitRC4" +#define NID_pbe_WithSHA1And40BitRC4 145 +#define OBJ_pbe_WithSHA1And40BitRC4 OBJ_pkcs12_pbeids,2L + +#define SN_pbe_WithSHA1And3_Key_TripleDES_CBC "PBE-SHA1-3DES" +#define LN_pbe_WithSHA1And3_Key_TripleDES_CBC "pbeWithSHA1And3-KeyTripleDES-CBC" +#define NID_pbe_WithSHA1And3_Key_TripleDES_CBC 146 +#define OBJ_pbe_WithSHA1And3_Key_TripleDES_CBC OBJ_pkcs12_pbeids,3L + +#define SN_pbe_WithSHA1And2_Key_TripleDES_CBC "PBE-SHA1-2DES" +#define LN_pbe_WithSHA1And2_Key_TripleDES_CBC "pbeWithSHA1And2-KeyTripleDES-CBC" +#define NID_pbe_WithSHA1And2_Key_TripleDES_CBC 147 +#define OBJ_pbe_WithSHA1And2_Key_TripleDES_CBC OBJ_pkcs12_pbeids,4L + +#define SN_pbe_WithSHA1And128BitRC2_CBC "PBE-SHA1-RC2-128" +#define LN_pbe_WithSHA1And128BitRC2_CBC "pbeWithSHA1And128BitRC2-CBC" +#define NID_pbe_WithSHA1And128BitRC2_CBC 148 +#define OBJ_pbe_WithSHA1And128BitRC2_CBC OBJ_pkcs12_pbeids,5L + +#define SN_pbe_WithSHA1And40BitRC2_CBC "PBE-SHA1-RC2-40" +#define LN_pbe_WithSHA1And40BitRC2_CBC "pbeWithSHA1And40BitRC2-CBC" +#define NID_pbe_WithSHA1And40BitRC2_CBC 149 +#define OBJ_pbe_WithSHA1And40BitRC2_CBC OBJ_pkcs12_pbeids,6L + +#define OBJ_pkcs12_Version1 OBJ_pkcs12,10L + +#define OBJ_pkcs12_BagIds OBJ_pkcs12_Version1,1L + +#define LN_keyBag "keyBag" +#define NID_keyBag 150 +#define OBJ_keyBag OBJ_pkcs12_BagIds,1L + +#define LN_pkcs8ShroudedKeyBag "pkcs8ShroudedKeyBag" +#define NID_pkcs8ShroudedKeyBag 151 +#define OBJ_pkcs8ShroudedKeyBag OBJ_pkcs12_BagIds,2L + +#define LN_certBag "certBag" +#define NID_certBag 152 +#define OBJ_certBag OBJ_pkcs12_BagIds,3L + +#define LN_crlBag "crlBag" +#define NID_crlBag 153 +#define OBJ_crlBag OBJ_pkcs12_BagIds,4L + +#define LN_secretBag "secretBag" +#define NID_secretBag 154 +#define OBJ_secretBag OBJ_pkcs12_BagIds,5L + +#define LN_safeContentsBag "safeContentsBag" +#define NID_safeContentsBag 155 +#define OBJ_safeContentsBag OBJ_pkcs12_BagIds,6L + +#define SN_md2 "MD2" +#define LN_md2 "md2" +#define NID_md2 3 +#define OBJ_md2 OBJ_rsadsi,2L,2L + +#define SN_md4 "MD4" +#define LN_md4 "md4" +#define NID_md4 257 +#define OBJ_md4 OBJ_rsadsi,2L,4L + +#define SN_md5 "MD5" +#define LN_md5 "md5" +#define NID_md5 4 +#define OBJ_md5 OBJ_rsadsi,2L,5L + +#define SN_md5_sha1 "MD5-SHA1" +#define LN_md5_sha1 "md5-sha1" +#define NID_md5_sha1 114 + +#define LN_hmacWithMD5 "hmacWithMD5" +#define NID_hmacWithMD5 797 +#define OBJ_hmacWithMD5 OBJ_rsadsi,2L,6L + +#define LN_hmacWithSHA1 "hmacWithSHA1" +#define NID_hmacWithSHA1 163 +#define OBJ_hmacWithSHA1 OBJ_rsadsi,2L,7L + +#define SN_sm2 "SM2" +#define LN_sm2 "sm2" +#define NID_sm2 1172 +#define OBJ_sm2 OBJ_sm_scheme,301L + +#define SN_sm3 "SM3" +#define LN_sm3 "sm3" +#define NID_sm3 1143 +#define OBJ_sm3 OBJ_sm_scheme,401L + +#define SN_sm3WithRSAEncryption "RSA-SM3" +#define LN_sm3WithRSAEncryption "sm3WithRSAEncryption" +#define NID_sm3WithRSAEncryption 1144 +#define OBJ_sm3WithRSAEncryption OBJ_sm_scheme,504L + +#define LN_hmacWithSHA224 "hmacWithSHA224" +#define NID_hmacWithSHA224 798 +#define OBJ_hmacWithSHA224 OBJ_rsadsi,2L,8L + +#define LN_hmacWithSHA256 "hmacWithSHA256" +#define NID_hmacWithSHA256 799 +#define OBJ_hmacWithSHA256 OBJ_rsadsi,2L,9L + +#define LN_hmacWithSHA384 "hmacWithSHA384" +#define NID_hmacWithSHA384 800 +#define OBJ_hmacWithSHA384 OBJ_rsadsi,2L,10L + +#define LN_hmacWithSHA512 "hmacWithSHA512" +#define NID_hmacWithSHA512 801 +#define OBJ_hmacWithSHA512 OBJ_rsadsi,2L,11L + +#define LN_hmacWithSHA512_224 "hmacWithSHA512-224" +#define NID_hmacWithSHA512_224 1193 +#define OBJ_hmacWithSHA512_224 OBJ_rsadsi,2L,12L + +#define LN_hmacWithSHA512_256 "hmacWithSHA512-256" +#define NID_hmacWithSHA512_256 1194 +#define OBJ_hmacWithSHA512_256 OBJ_rsadsi,2L,13L + +#define SN_rc2_cbc "RC2-CBC" +#define LN_rc2_cbc "rc2-cbc" +#define NID_rc2_cbc 37 +#define OBJ_rc2_cbc OBJ_rsadsi,3L,2L + +#define SN_rc2_ecb "RC2-ECB" +#define LN_rc2_ecb "rc2-ecb" +#define NID_rc2_ecb 38 + +#define SN_rc2_cfb64 "RC2-CFB" +#define LN_rc2_cfb64 "rc2-cfb" +#define NID_rc2_cfb64 39 + +#define SN_rc2_ofb64 "RC2-OFB" +#define LN_rc2_ofb64 "rc2-ofb" +#define NID_rc2_ofb64 40 + +#define SN_rc2_40_cbc "RC2-40-CBC" +#define LN_rc2_40_cbc "rc2-40-cbc" +#define NID_rc2_40_cbc 98 + +#define SN_rc2_64_cbc "RC2-64-CBC" +#define LN_rc2_64_cbc "rc2-64-cbc" +#define NID_rc2_64_cbc 166 + +#define SN_rc4 "RC4" +#define LN_rc4 "rc4" +#define NID_rc4 5 +#define OBJ_rc4 OBJ_rsadsi,3L,4L + +#define SN_rc4_40 "RC4-40" +#define LN_rc4_40 "rc4-40" +#define NID_rc4_40 97 + +#define SN_des_ede3_cbc "DES-EDE3-CBC" +#define LN_des_ede3_cbc "des-ede3-cbc" +#define NID_des_ede3_cbc 44 +#define OBJ_des_ede3_cbc OBJ_rsadsi,3L,7L + +#define SN_rc5_cbc "RC5-CBC" +#define LN_rc5_cbc "rc5-cbc" +#define NID_rc5_cbc 120 +#define OBJ_rc5_cbc OBJ_rsadsi,3L,8L + +#define SN_rc5_ecb "RC5-ECB" +#define LN_rc5_ecb "rc5-ecb" +#define NID_rc5_ecb 121 + +#define SN_rc5_cfb64 "RC5-CFB" +#define LN_rc5_cfb64 "rc5-cfb" +#define NID_rc5_cfb64 122 + +#define SN_rc5_ofb64 "RC5-OFB" +#define LN_rc5_ofb64 "rc5-ofb" +#define NID_rc5_ofb64 123 + +#define SN_ms_ext_req "msExtReq" +#define LN_ms_ext_req "Microsoft Extension Request" +#define NID_ms_ext_req 171 +#define OBJ_ms_ext_req 1L,3L,6L,1L,4L,1L,311L,2L,1L,14L + +#define SN_ms_code_ind "msCodeInd" +#define LN_ms_code_ind "Microsoft Individual Code Signing" +#define NID_ms_code_ind 134 +#define OBJ_ms_code_ind 1L,3L,6L,1L,4L,1L,311L,2L,1L,21L + +#define SN_ms_code_com "msCodeCom" +#define LN_ms_code_com "Microsoft Commercial Code Signing" +#define NID_ms_code_com 135 +#define OBJ_ms_code_com 1L,3L,6L,1L,4L,1L,311L,2L,1L,22L + +#define SN_ms_ctl_sign "msCTLSign" +#define LN_ms_ctl_sign "Microsoft Trust List Signing" +#define NID_ms_ctl_sign 136 +#define OBJ_ms_ctl_sign 1L,3L,6L,1L,4L,1L,311L,10L,3L,1L + +#define SN_ms_sgc "msSGC" +#define LN_ms_sgc "Microsoft Server Gated Crypto" +#define NID_ms_sgc 137 +#define OBJ_ms_sgc 1L,3L,6L,1L,4L,1L,311L,10L,3L,3L + +#define SN_ms_efs "msEFS" +#define LN_ms_efs "Microsoft Encrypted File System" +#define NID_ms_efs 138 +#define OBJ_ms_efs 1L,3L,6L,1L,4L,1L,311L,10L,3L,4L + +#define SN_ms_smartcard_login "msSmartcardLogin" +#define LN_ms_smartcard_login "Microsoft Smartcard Login" +#define NID_ms_smartcard_login 648 +#define OBJ_ms_smartcard_login 1L,3L,6L,1L,4L,1L,311L,20L,2L,2L + +#define SN_ms_upn "msUPN" +#define LN_ms_upn "Microsoft User Principal Name" +#define NID_ms_upn 649 +#define OBJ_ms_upn 1L,3L,6L,1L,4L,1L,311L,20L,2L,3L + +#define SN_idea_cbc "IDEA-CBC" +#define LN_idea_cbc "idea-cbc" +#define NID_idea_cbc 34 +#define OBJ_idea_cbc 1L,3L,6L,1L,4L,1L,188L,7L,1L,1L,2L + +#define SN_idea_ecb "IDEA-ECB" +#define LN_idea_ecb "idea-ecb" +#define NID_idea_ecb 36 + +#define SN_idea_cfb64 "IDEA-CFB" +#define LN_idea_cfb64 "idea-cfb" +#define NID_idea_cfb64 35 + +#define SN_idea_ofb64 "IDEA-OFB" +#define LN_idea_ofb64 "idea-ofb" +#define NID_idea_ofb64 46 + +#define SN_bf_cbc "BF-CBC" +#define LN_bf_cbc "bf-cbc" +#define NID_bf_cbc 91 +#define OBJ_bf_cbc 1L,3L,6L,1L,4L,1L,3029L,1L,2L + +#define SN_bf_ecb "BF-ECB" +#define LN_bf_ecb "bf-ecb" +#define NID_bf_ecb 92 + +#define SN_bf_cfb64 "BF-CFB" +#define LN_bf_cfb64 "bf-cfb" +#define NID_bf_cfb64 93 + +#define SN_bf_ofb64 "BF-OFB" +#define LN_bf_ofb64 "bf-ofb" +#define NID_bf_ofb64 94 + +#define SN_id_pkix "PKIX" +#define NID_id_pkix 127 +#define OBJ_id_pkix 1L,3L,6L,1L,5L,5L,7L + +#define SN_id_pkix_mod "id-pkix-mod" +#define NID_id_pkix_mod 258 +#define OBJ_id_pkix_mod OBJ_id_pkix,0L + +#define SN_id_pe "id-pe" +#define NID_id_pe 175 +#define OBJ_id_pe OBJ_id_pkix,1L + +#define SN_id_qt "id-qt" +#define NID_id_qt 259 +#define OBJ_id_qt OBJ_id_pkix,2L + +#define SN_id_kp "id-kp" +#define NID_id_kp 128 +#define OBJ_id_kp OBJ_id_pkix,3L + +#define SN_id_it "id-it" +#define NID_id_it 260 +#define OBJ_id_it OBJ_id_pkix,4L + +#define SN_id_pkip "id-pkip" +#define NID_id_pkip 261 +#define OBJ_id_pkip OBJ_id_pkix,5L + +#define SN_id_alg "id-alg" +#define NID_id_alg 262 +#define OBJ_id_alg OBJ_id_pkix,6L + +#define SN_id_cmc "id-cmc" +#define NID_id_cmc 263 +#define OBJ_id_cmc OBJ_id_pkix,7L + +#define SN_id_on "id-on" +#define NID_id_on 264 +#define OBJ_id_on OBJ_id_pkix,8L + +#define SN_id_pda "id-pda" +#define NID_id_pda 265 +#define OBJ_id_pda OBJ_id_pkix,9L + +#define SN_id_aca "id-aca" +#define NID_id_aca 266 +#define OBJ_id_aca OBJ_id_pkix,10L + +#define SN_id_qcs "id-qcs" +#define NID_id_qcs 267 +#define OBJ_id_qcs OBJ_id_pkix,11L + +#define SN_id_cct "id-cct" +#define NID_id_cct 268 +#define OBJ_id_cct OBJ_id_pkix,12L + +#define SN_id_ppl "id-ppl" +#define NID_id_ppl 662 +#define OBJ_id_ppl OBJ_id_pkix,21L + +#define SN_id_ad "id-ad" +#define NID_id_ad 176 +#define OBJ_id_ad OBJ_id_pkix,48L + +#define SN_id_pkix1_explicit_88 "id-pkix1-explicit-88" +#define NID_id_pkix1_explicit_88 269 +#define OBJ_id_pkix1_explicit_88 OBJ_id_pkix_mod,1L + +#define SN_id_pkix1_implicit_88 "id-pkix1-implicit-88" +#define NID_id_pkix1_implicit_88 270 +#define OBJ_id_pkix1_implicit_88 OBJ_id_pkix_mod,2L + +#define SN_id_pkix1_explicit_93 "id-pkix1-explicit-93" +#define NID_id_pkix1_explicit_93 271 +#define OBJ_id_pkix1_explicit_93 OBJ_id_pkix_mod,3L + +#define SN_id_pkix1_implicit_93 "id-pkix1-implicit-93" +#define NID_id_pkix1_implicit_93 272 +#define OBJ_id_pkix1_implicit_93 OBJ_id_pkix_mod,4L + +#define SN_id_mod_crmf "id-mod-crmf" +#define NID_id_mod_crmf 273 +#define OBJ_id_mod_crmf OBJ_id_pkix_mod,5L + +#define SN_id_mod_cmc "id-mod-cmc" +#define NID_id_mod_cmc 274 +#define OBJ_id_mod_cmc OBJ_id_pkix_mod,6L + +#define SN_id_mod_kea_profile_88 "id-mod-kea-profile-88" +#define NID_id_mod_kea_profile_88 275 +#define OBJ_id_mod_kea_profile_88 OBJ_id_pkix_mod,7L + +#define SN_id_mod_kea_profile_93 "id-mod-kea-profile-93" +#define NID_id_mod_kea_profile_93 276 +#define OBJ_id_mod_kea_profile_93 OBJ_id_pkix_mod,8L + +#define SN_id_mod_cmp "id-mod-cmp" +#define NID_id_mod_cmp 277 +#define OBJ_id_mod_cmp OBJ_id_pkix_mod,9L + +#define SN_id_mod_qualified_cert_88 "id-mod-qualified-cert-88" +#define NID_id_mod_qualified_cert_88 278 +#define OBJ_id_mod_qualified_cert_88 OBJ_id_pkix_mod,10L + +#define SN_id_mod_qualified_cert_93 "id-mod-qualified-cert-93" +#define NID_id_mod_qualified_cert_93 279 +#define OBJ_id_mod_qualified_cert_93 OBJ_id_pkix_mod,11L + +#define SN_id_mod_attribute_cert "id-mod-attribute-cert" +#define NID_id_mod_attribute_cert 280 +#define OBJ_id_mod_attribute_cert OBJ_id_pkix_mod,12L + +#define SN_id_mod_timestamp_protocol "id-mod-timestamp-protocol" +#define NID_id_mod_timestamp_protocol 281 +#define OBJ_id_mod_timestamp_protocol OBJ_id_pkix_mod,13L + +#define SN_id_mod_ocsp "id-mod-ocsp" +#define NID_id_mod_ocsp 282 +#define OBJ_id_mod_ocsp OBJ_id_pkix_mod,14L + +#define SN_id_mod_dvcs "id-mod-dvcs" +#define NID_id_mod_dvcs 283 +#define OBJ_id_mod_dvcs OBJ_id_pkix_mod,15L + +#define SN_id_mod_cmp2000 "id-mod-cmp2000" +#define NID_id_mod_cmp2000 284 +#define OBJ_id_mod_cmp2000 OBJ_id_pkix_mod,16L + +#define SN_info_access "authorityInfoAccess" +#define LN_info_access "Authority Information Access" +#define NID_info_access 177 +#define OBJ_info_access OBJ_id_pe,1L + +#define SN_biometricInfo "biometricInfo" +#define LN_biometricInfo "Biometric Info" +#define NID_biometricInfo 285 +#define OBJ_biometricInfo OBJ_id_pe,2L + +#define SN_qcStatements "qcStatements" +#define NID_qcStatements 286 +#define OBJ_qcStatements OBJ_id_pe,3L + +#define SN_ac_auditEntity "ac-auditEntity" +#define NID_ac_auditEntity 287 +#define OBJ_ac_auditEntity OBJ_id_pe,4L + +#define SN_ac_targeting "ac-targeting" +#define NID_ac_targeting 288 +#define OBJ_ac_targeting OBJ_id_pe,5L + +#define SN_aaControls "aaControls" +#define NID_aaControls 289 +#define OBJ_aaControls OBJ_id_pe,6L + +#define SN_sbgp_ipAddrBlock "sbgp-ipAddrBlock" +#define NID_sbgp_ipAddrBlock 290 +#define OBJ_sbgp_ipAddrBlock OBJ_id_pe,7L + +#define SN_sbgp_autonomousSysNum "sbgp-autonomousSysNum" +#define NID_sbgp_autonomousSysNum 291 +#define OBJ_sbgp_autonomousSysNum OBJ_id_pe,8L + +#define SN_sbgp_routerIdentifier "sbgp-routerIdentifier" +#define NID_sbgp_routerIdentifier 292 +#define OBJ_sbgp_routerIdentifier OBJ_id_pe,9L + +#define SN_ac_proxying "ac-proxying" +#define NID_ac_proxying 397 +#define OBJ_ac_proxying OBJ_id_pe,10L + +#define SN_sinfo_access "subjectInfoAccess" +#define LN_sinfo_access "Subject Information Access" +#define NID_sinfo_access 398 +#define OBJ_sinfo_access OBJ_id_pe,11L + +#define SN_proxyCertInfo "proxyCertInfo" +#define LN_proxyCertInfo "Proxy Certificate Information" +#define NID_proxyCertInfo 663 +#define OBJ_proxyCertInfo OBJ_id_pe,14L + +#define SN_tlsfeature "tlsfeature" +#define LN_tlsfeature "TLS Feature" +#define NID_tlsfeature 1020 +#define OBJ_tlsfeature OBJ_id_pe,24L + +#define SN_id_qt_cps "id-qt-cps" +#define LN_id_qt_cps "Policy Qualifier CPS" +#define NID_id_qt_cps 164 +#define OBJ_id_qt_cps OBJ_id_qt,1L + +#define SN_id_qt_unotice "id-qt-unotice" +#define LN_id_qt_unotice "Policy Qualifier User Notice" +#define NID_id_qt_unotice 165 +#define OBJ_id_qt_unotice OBJ_id_qt,2L + +#define SN_textNotice "textNotice" +#define NID_textNotice 293 +#define OBJ_textNotice OBJ_id_qt,3L + +#define SN_server_auth "serverAuth" +#define LN_server_auth "TLS Web Server Authentication" +#define NID_server_auth 129 +#define OBJ_server_auth OBJ_id_kp,1L + +#define SN_client_auth "clientAuth" +#define LN_client_auth "TLS Web Client Authentication" +#define NID_client_auth 130 +#define OBJ_client_auth OBJ_id_kp,2L + +#define SN_code_sign "codeSigning" +#define LN_code_sign "Code Signing" +#define NID_code_sign 131 +#define OBJ_code_sign OBJ_id_kp,3L + +#define SN_email_protect "emailProtection" +#define LN_email_protect "E-mail Protection" +#define NID_email_protect 132 +#define OBJ_email_protect OBJ_id_kp,4L + +#define SN_ipsecEndSystem "ipsecEndSystem" +#define LN_ipsecEndSystem "IPSec End System" +#define NID_ipsecEndSystem 294 +#define OBJ_ipsecEndSystem OBJ_id_kp,5L + +#define SN_ipsecTunnel "ipsecTunnel" +#define LN_ipsecTunnel "IPSec Tunnel" +#define NID_ipsecTunnel 295 +#define OBJ_ipsecTunnel OBJ_id_kp,6L + +#define SN_ipsecUser "ipsecUser" +#define LN_ipsecUser "IPSec User" +#define NID_ipsecUser 296 +#define OBJ_ipsecUser OBJ_id_kp,7L + +#define SN_time_stamp "timeStamping" +#define LN_time_stamp "Time Stamping" +#define NID_time_stamp 133 +#define OBJ_time_stamp OBJ_id_kp,8L + +#define SN_OCSP_sign "OCSPSigning" +#define LN_OCSP_sign "OCSP Signing" +#define NID_OCSP_sign 180 +#define OBJ_OCSP_sign OBJ_id_kp,9L + +#define SN_dvcs "DVCS" +#define LN_dvcs "dvcs" +#define NID_dvcs 297 +#define OBJ_dvcs OBJ_id_kp,10L + +#define SN_ipsec_IKE "ipsecIKE" +#define LN_ipsec_IKE "ipsec Internet Key Exchange" +#define NID_ipsec_IKE 1022 +#define OBJ_ipsec_IKE OBJ_id_kp,17L + +#define SN_capwapAC "capwapAC" +#define LN_capwapAC "Ctrl/provision WAP Access" +#define NID_capwapAC 1023 +#define OBJ_capwapAC OBJ_id_kp,18L + +#define SN_capwapWTP "capwapWTP" +#define LN_capwapWTP "Ctrl/Provision WAP Termination" +#define NID_capwapWTP 1024 +#define OBJ_capwapWTP OBJ_id_kp,19L + +#define SN_sshClient "secureShellClient" +#define LN_sshClient "SSH Client" +#define NID_sshClient 1025 +#define OBJ_sshClient OBJ_id_kp,21L + +#define SN_sshServer "secureShellServer" +#define LN_sshServer "SSH Server" +#define NID_sshServer 1026 +#define OBJ_sshServer OBJ_id_kp,22L + +#define SN_sendRouter "sendRouter" +#define LN_sendRouter "Send Router" +#define NID_sendRouter 1027 +#define OBJ_sendRouter OBJ_id_kp,23L + +#define SN_sendProxiedRouter "sendProxiedRouter" +#define LN_sendProxiedRouter "Send Proxied Router" +#define NID_sendProxiedRouter 1028 +#define OBJ_sendProxiedRouter OBJ_id_kp,24L + +#define SN_sendOwner "sendOwner" +#define LN_sendOwner "Send Owner" +#define NID_sendOwner 1029 +#define OBJ_sendOwner OBJ_id_kp,25L + +#define SN_sendProxiedOwner "sendProxiedOwner" +#define LN_sendProxiedOwner "Send Proxied Owner" +#define NID_sendProxiedOwner 1030 +#define OBJ_sendProxiedOwner OBJ_id_kp,26L + +#define SN_cmcCA "cmcCA" +#define LN_cmcCA "CMC Certificate Authority" +#define NID_cmcCA 1131 +#define OBJ_cmcCA OBJ_id_kp,27L + +#define SN_cmcRA "cmcRA" +#define LN_cmcRA "CMC Registration Authority" +#define NID_cmcRA 1132 +#define OBJ_cmcRA OBJ_id_kp,28L + +#define SN_id_it_caProtEncCert "id-it-caProtEncCert" +#define NID_id_it_caProtEncCert 298 +#define OBJ_id_it_caProtEncCert OBJ_id_it,1L + +#define SN_id_it_signKeyPairTypes "id-it-signKeyPairTypes" +#define NID_id_it_signKeyPairTypes 299 +#define OBJ_id_it_signKeyPairTypes OBJ_id_it,2L + +#define SN_id_it_encKeyPairTypes "id-it-encKeyPairTypes" +#define NID_id_it_encKeyPairTypes 300 +#define OBJ_id_it_encKeyPairTypes OBJ_id_it,3L + +#define SN_id_it_preferredSymmAlg "id-it-preferredSymmAlg" +#define NID_id_it_preferredSymmAlg 301 +#define OBJ_id_it_preferredSymmAlg OBJ_id_it,4L + +#define SN_id_it_caKeyUpdateInfo "id-it-caKeyUpdateInfo" +#define NID_id_it_caKeyUpdateInfo 302 +#define OBJ_id_it_caKeyUpdateInfo OBJ_id_it,5L + +#define SN_id_it_currentCRL "id-it-currentCRL" +#define NID_id_it_currentCRL 303 +#define OBJ_id_it_currentCRL OBJ_id_it,6L + +#define SN_id_it_unsupportedOIDs "id-it-unsupportedOIDs" +#define NID_id_it_unsupportedOIDs 304 +#define OBJ_id_it_unsupportedOIDs OBJ_id_it,7L + +#define SN_id_it_subscriptionRequest "id-it-subscriptionRequest" +#define NID_id_it_subscriptionRequest 305 +#define OBJ_id_it_subscriptionRequest OBJ_id_it,8L + +#define SN_id_it_subscriptionResponse "id-it-subscriptionResponse" +#define NID_id_it_subscriptionResponse 306 +#define OBJ_id_it_subscriptionResponse OBJ_id_it,9L + +#define SN_id_it_keyPairParamReq "id-it-keyPairParamReq" +#define NID_id_it_keyPairParamReq 307 +#define OBJ_id_it_keyPairParamReq OBJ_id_it,10L + +#define SN_id_it_keyPairParamRep "id-it-keyPairParamRep" +#define NID_id_it_keyPairParamRep 308 +#define OBJ_id_it_keyPairParamRep OBJ_id_it,11L + +#define SN_id_it_revPassphrase "id-it-revPassphrase" +#define NID_id_it_revPassphrase 309 +#define OBJ_id_it_revPassphrase OBJ_id_it,12L + +#define SN_id_it_implicitConfirm "id-it-implicitConfirm" +#define NID_id_it_implicitConfirm 310 +#define OBJ_id_it_implicitConfirm OBJ_id_it,13L + +#define SN_id_it_confirmWaitTime "id-it-confirmWaitTime" +#define NID_id_it_confirmWaitTime 311 +#define OBJ_id_it_confirmWaitTime OBJ_id_it,14L + +#define SN_id_it_origPKIMessage "id-it-origPKIMessage" +#define NID_id_it_origPKIMessage 312 +#define OBJ_id_it_origPKIMessage OBJ_id_it,15L + +#define SN_id_it_suppLangTags "id-it-suppLangTags" +#define NID_id_it_suppLangTags 784 +#define OBJ_id_it_suppLangTags OBJ_id_it,16L + +#define SN_id_regCtrl "id-regCtrl" +#define NID_id_regCtrl 313 +#define OBJ_id_regCtrl OBJ_id_pkip,1L + +#define SN_id_regInfo "id-regInfo" +#define NID_id_regInfo 314 +#define OBJ_id_regInfo OBJ_id_pkip,2L + +#define SN_id_regCtrl_regToken "id-regCtrl-regToken" +#define NID_id_regCtrl_regToken 315 +#define OBJ_id_regCtrl_regToken OBJ_id_regCtrl,1L + +#define SN_id_regCtrl_authenticator "id-regCtrl-authenticator" +#define NID_id_regCtrl_authenticator 316 +#define OBJ_id_regCtrl_authenticator OBJ_id_regCtrl,2L + +#define SN_id_regCtrl_pkiPublicationInfo "id-regCtrl-pkiPublicationInfo" +#define NID_id_regCtrl_pkiPublicationInfo 317 +#define OBJ_id_regCtrl_pkiPublicationInfo OBJ_id_regCtrl,3L + +#define SN_id_regCtrl_pkiArchiveOptions "id-regCtrl-pkiArchiveOptions" +#define NID_id_regCtrl_pkiArchiveOptions 318 +#define OBJ_id_regCtrl_pkiArchiveOptions OBJ_id_regCtrl,4L + +#define SN_id_regCtrl_oldCertID "id-regCtrl-oldCertID" +#define NID_id_regCtrl_oldCertID 319 +#define OBJ_id_regCtrl_oldCertID OBJ_id_regCtrl,5L + +#define SN_id_regCtrl_protocolEncrKey "id-regCtrl-protocolEncrKey" +#define NID_id_regCtrl_protocolEncrKey 320 +#define OBJ_id_regCtrl_protocolEncrKey OBJ_id_regCtrl,6L + +#define SN_id_regInfo_utf8Pairs "id-regInfo-utf8Pairs" +#define NID_id_regInfo_utf8Pairs 321 +#define OBJ_id_regInfo_utf8Pairs OBJ_id_regInfo,1L + +#define SN_id_regInfo_certReq "id-regInfo-certReq" +#define NID_id_regInfo_certReq 322 +#define OBJ_id_regInfo_certReq OBJ_id_regInfo,2L + +#define SN_id_alg_des40 "id-alg-des40" +#define NID_id_alg_des40 323 +#define OBJ_id_alg_des40 OBJ_id_alg,1L + +#define SN_id_alg_noSignature "id-alg-noSignature" +#define NID_id_alg_noSignature 324 +#define OBJ_id_alg_noSignature OBJ_id_alg,2L + +#define SN_id_alg_dh_sig_hmac_sha1 "id-alg-dh-sig-hmac-sha1" +#define NID_id_alg_dh_sig_hmac_sha1 325 +#define OBJ_id_alg_dh_sig_hmac_sha1 OBJ_id_alg,3L + +#define SN_id_alg_dh_pop "id-alg-dh-pop" +#define NID_id_alg_dh_pop 326 +#define OBJ_id_alg_dh_pop OBJ_id_alg,4L + +#define SN_id_cmc_statusInfo "id-cmc-statusInfo" +#define NID_id_cmc_statusInfo 327 +#define OBJ_id_cmc_statusInfo OBJ_id_cmc,1L + +#define SN_id_cmc_identification "id-cmc-identification" +#define NID_id_cmc_identification 328 +#define OBJ_id_cmc_identification OBJ_id_cmc,2L + +#define SN_id_cmc_identityProof "id-cmc-identityProof" +#define NID_id_cmc_identityProof 329 +#define OBJ_id_cmc_identityProof OBJ_id_cmc,3L + +#define SN_id_cmc_dataReturn "id-cmc-dataReturn" +#define NID_id_cmc_dataReturn 330 +#define OBJ_id_cmc_dataReturn OBJ_id_cmc,4L + +#define SN_id_cmc_transactionId "id-cmc-transactionId" +#define NID_id_cmc_transactionId 331 +#define OBJ_id_cmc_transactionId OBJ_id_cmc,5L + +#define SN_id_cmc_senderNonce "id-cmc-senderNonce" +#define NID_id_cmc_senderNonce 332 +#define OBJ_id_cmc_senderNonce OBJ_id_cmc,6L + +#define SN_id_cmc_recipientNonce "id-cmc-recipientNonce" +#define NID_id_cmc_recipientNonce 333 +#define OBJ_id_cmc_recipientNonce OBJ_id_cmc,7L + +#define SN_id_cmc_addExtensions "id-cmc-addExtensions" +#define NID_id_cmc_addExtensions 334 +#define OBJ_id_cmc_addExtensions OBJ_id_cmc,8L + +#define SN_id_cmc_encryptedPOP "id-cmc-encryptedPOP" +#define NID_id_cmc_encryptedPOP 335 +#define OBJ_id_cmc_encryptedPOP OBJ_id_cmc,9L + +#define SN_id_cmc_decryptedPOP "id-cmc-decryptedPOP" +#define NID_id_cmc_decryptedPOP 336 +#define OBJ_id_cmc_decryptedPOP OBJ_id_cmc,10L + +#define SN_id_cmc_lraPOPWitness "id-cmc-lraPOPWitness" +#define NID_id_cmc_lraPOPWitness 337 +#define OBJ_id_cmc_lraPOPWitness OBJ_id_cmc,11L + +#define SN_id_cmc_getCert "id-cmc-getCert" +#define NID_id_cmc_getCert 338 +#define OBJ_id_cmc_getCert OBJ_id_cmc,15L + +#define SN_id_cmc_getCRL "id-cmc-getCRL" +#define NID_id_cmc_getCRL 339 +#define OBJ_id_cmc_getCRL OBJ_id_cmc,16L + +#define SN_id_cmc_revokeRequest "id-cmc-revokeRequest" +#define NID_id_cmc_revokeRequest 340 +#define OBJ_id_cmc_revokeRequest OBJ_id_cmc,17L + +#define SN_id_cmc_regInfo "id-cmc-regInfo" +#define NID_id_cmc_regInfo 341 +#define OBJ_id_cmc_regInfo OBJ_id_cmc,18L + +#define SN_id_cmc_responseInfo "id-cmc-responseInfo" +#define NID_id_cmc_responseInfo 342 +#define OBJ_id_cmc_responseInfo OBJ_id_cmc,19L + +#define SN_id_cmc_queryPending "id-cmc-queryPending" +#define NID_id_cmc_queryPending 343 +#define OBJ_id_cmc_queryPending OBJ_id_cmc,21L + +#define SN_id_cmc_popLinkRandom "id-cmc-popLinkRandom" +#define NID_id_cmc_popLinkRandom 344 +#define OBJ_id_cmc_popLinkRandom OBJ_id_cmc,22L + +#define SN_id_cmc_popLinkWitness "id-cmc-popLinkWitness" +#define NID_id_cmc_popLinkWitness 345 +#define OBJ_id_cmc_popLinkWitness OBJ_id_cmc,23L + +#define SN_id_cmc_confirmCertAcceptance "id-cmc-confirmCertAcceptance" +#define NID_id_cmc_confirmCertAcceptance 346 +#define OBJ_id_cmc_confirmCertAcceptance OBJ_id_cmc,24L + +#define SN_id_on_personalData "id-on-personalData" +#define NID_id_on_personalData 347 +#define OBJ_id_on_personalData OBJ_id_on,1L + +#define SN_id_on_permanentIdentifier "id-on-permanentIdentifier" +#define LN_id_on_permanentIdentifier "Permanent Identifier" +#define NID_id_on_permanentIdentifier 858 +#define OBJ_id_on_permanentIdentifier OBJ_id_on,3L + +#define SN_id_pda_dateOfBirth "id-pda-dateOfBirth" +#define NID_id_pda_dateOfBirth 348 +#define OBJ_id_pda_dateOfBirth OBJ_id_pda,1L + +#define SN_id_pda_placeOfBirth "id-pda-placeOfBirth" +#define NID_id_pda_placeOfBirth 349 +#define OBJ_id_pda_placeOfBirth OBJ_id_pda,2L + +#define SN_id_pda_gender "id-pda-gender" +#define NID_id_pda_gender 351 +#define OBJ_id_pda_gender OBJ_id_pda,3L + +#define SN_id_pda_countryOfCitizenship "id-pda-countryOfCitizenship" +#define NID_id_pda_countryOfCitizenship 352 +#define OBJ_id_pda_countryOfCitizenship OBJ_id_pda,4L + +#define SN_id_pda_countryOfResidence "id-pda-countryOfResidence" +#define NID_id_pda_countryOfResidence 353 +#define OBJ_id_pda_countryOfResidence OBJ_id_pda,5L + +#define SN_id_aca_authenticationInfo "id-aca-authenticationInfo" +#define NID_id_aca_authenticationInfo 354 +#define OBJ_id_aca_authenticationInfo OBJ_id_aca,1L + +#define SN_id_aca_accessIdentity "id-aca-accessIdentity" +#define NID_id_aca_accessIdentity 355 +#define OBJ_id_aca_accessIdentity OBJ_id_aca,2L + +#define SN_id_aca_chargingIdentity "id-aca-chargingIdentity" +#define NID_id_aca_chargingIdentity 356 +#define OBJ_id_aca_chargingIdentity OBJ_id_aca,3L + +#define SN_id_aca_group "id-aca-group" +#define NID_id_aca_group 357 +#define OBJ_id_aca_group OBJ_id_aca,4L + +#define SN_id_aca_role "id-aca-role" +#define NID_id_aca_role 358 +#define OBJ_id_aca_role OBJ_id_aca,5L + +#define SN_id_aca_encAttrs "id-aca-encAttrs" +#define NID_id_aca_encAttrs 399 +#define OBJ_id_aca_encAttrs OBJ_id_aca,6L + +#define SN_id_qcs_pkixQCSyntax_v1 "id-qcs-pkixQCSyntax-v1" +#define NID_id_qcs_pkixQCSyntax_v1 359 +#define OBJ_id_qcs_pkixQCSyntax_v1 OBJ_id_qcs,1L + +#define SN_id_cct_crs "id-cct-crs" +#define NID_id_cct_crs 360 +#define OBJ_id_cct_crs OBJ_id_cct,1L + +#define SN_id_cct_PKIData "id-cct-PKIData" +#define NID_id_cct_PKIData 361 +#define OBJ_id_cct_PKIData OBJ_id_cct,2L + +#define SN_id_cct_PKIResponse "id-cct-PKIResponse" +#define NID_id_cct_PKIResponse 362 +#define OBJ_id_cct_PKIResponse OBJ_id_cct,3L + +#define SN_id_ppl_anyLanguage "id-ppl-anyLanguage" +#define LN_id_ppl_anyLanguage "Any language" +#define NID_id_ppl_anyLanguage 664 +#define OBJ_id_ppl_anyLanguage OBJ_id_ppl,0L + +#define SN_id_ppl_inheritAll "id-ppl-inheritAll" +#define LN_id_ppl_inheritAll "Inherit all" +#define NID_id_ppl_inheritAll 665 +#define OBJ_id_ppl_inheritAll OBJ_id_ppl,1L + +#define SN_Independent "id-ppl-independent" +#define LN_Independent "Independent" +#define NID_Independent 667 +#define OBJ_Independent OBJ_id_ppl,2L + +#define SN_ad_OCSP "OCSP" +#define LN_ad_OCSP "OCSP" +#define NID_ad_OCSP 178 +#define OBJ_ad_OCSP OBJ_id_ad,1L + +#define SN_ad_ca_issuers "caIssuers" +#define LN_ad_ca_issuers "CA Issuers" +#define NID_ad_ca_issuers 179 +#define OBJ_ad_ca_issuers OBJ_id_ad,2L + +#define SN_ad_timeStamping "ad_timestamping" +#define LN_ad_timeStamping "AD Time Stamping" +#define NID_ad_timeStamping 363 +#define OBJ_ad_timeStamping OBJ_id_ad,3L + +#define SN_ad_dvcs "AD_DVCS" +#define LN_ad_dvcs "ad dvcs" +#define NID_ad_dvcs 364 +#define OBJ_ad_dvcs OBJ_id_ad,4L + +#define SN_caRepository "caRepository" +#define LN_caRepository "CA Repository" +#define NID_caRepository 785 +#define OBJ_caRepository OBJ_id_ad,5L + +#define OBJ_id_pkix_OCSP OBJ_ad_OCSP + +#define SN_id_pkix_OCSP_basic "basicOCSPResponse" +#define LN_id_pkix_OCSP_basic "Basic OCSP Response" +#define NID_id_pkix_OCSP_basic 365 +#define OBJ_id_pkix_OCSP_basic OBJ_id_pkix_OCSP,1L + +#define SN_id_pkix_OCSP_Nonce "Nonce" +#define LN_id_pkix_OCSP_Nonce "OCSP Nonce" +#define NID_id_pkix_OCSP_Nonce 366 +#define OBJ_id_pkix_OCSP_Nonce OBJ_id_pkix_OCSP,2L + +#define SN_id_pkix_OCSP_CrlID "CrlID" +#define LN_id_pkix_OCSP_CrlID "OCSP CRL ID" +#define NID_id_pkix_OCSP_CrlID 367 +#define OBJ_id_pkix_OCSP_CrlID OBJ_id_pkix_OCSP,3L + +#define SN_id_pkix_OCSP_acceptableResponses "acceptableResponses" +#define LN_id_pkix_OCSP_acceptableResponses "Acceptable OCSP Responses" +#define NID_id_pkix_OCSP_acceptableResponses 368 +#define OBJ_id_pkix_OCSP_acceptableResponses OBJ_id_pkix_OCSP,4L + +#define SN_id_pkix_OCSP_noCheck "noCheck" +#define LN_id_pkix_OCSP_noCheck "OCSP No Check" +#define NID_id_pkix_OCSP_noCheck 369 +#define OBJ_id_pkix_OCSP_noCheck OBJ_id_pkix_OCSP,5L + +#define SN_id_pkix_OCSP_archiveCutoff "archiveCutoff" +#define LN_id_pkix_OCSP_archiveCutoff "OCSP Archive Cutoff" +#define NID_id_pkix_OCSP_archiveCutoff 370 +#define OBJ_id_pkix_OCSP_archiveCutoff OBJ_id_pkix_OCSP,6L + +#define SN_id_pkix_OCSP_serviceLocator "serviceLocator" +#define LN_id_pkix_OCSP_serviceLocator "OCSP Service Locator" +#define NID_id_pkix_OCSP_serviceLocator 371 +#define OBJ_id_pkix_OCSP_serviceLocator OBJ_id_pkix_OCSP,7L + +#define SN_id_pkix_OCSP_extendedStatus "extendedStatus" +#define LN_id_pkix_OCSP_extendedStatus "Extended OCSP Status" +#define NID_id_pkix_OCSP_extendedStatus 372 +#define OBJ_id_pkix_OCSP_extendedStatus OBJ_id_pkix_OCSP,8L + +#define SN_id_pkix_OCSP_valid "valid" +#define NID_id_pkix_OCSP_valid 373 +#define OBJ_id_pkix_OCSP_valid OBJ_id_pkix_OCSP,9L + +#define SN_id_pkix_OCSP_path "path" +#define NID_id_pkix_OCSP_path 374 +#define OBJ_id_pkix_OCSP_path OBJ_id_pkix_OCSP,10L + +#define SN_id_pkix_OCSP_trustRoot "trustRoot" +#define LN_id_pkix_OCSP_trustRoot "Trust Root" +#define NID_id_pkix_OCSP_trustRoot 375 +#define OBJ_id_pkix_OCSP_trustRoot OBJ_id_pkix_OCSP,11L + +#define SN_algorithm "algorithm" +#define LN_algorithm "algorithm" +#define NID_algorithm 376 +#define OBJ_algorithm 1L,3L,14L,3L,2L + +#define SN_md5WithRSA "RSA-NP-MD5" +#define LN_md5WithRSA "md5WithRSA" +#define NID_md5WithRSA 104 +#define OBJ_md5WithRSA OBJ_algorithm,3L + +#define SN_des_ecb "DES-ECB" +#define LN_des_ecb "des-ecb" +#define NID_des_ecb 29 +#define OBJ_des_ecb OBJ_algorithm,6L + +#define SN_des_cbc "DES-CBC" +#define LN_des_cbc "des-cbc" +#define NID_des_cbc 31 +#define OBJ_des_cbc OBJ_algorithm,7L + +#define SN_des_ofb64 "DES-OFB" +#define LN_des_ofb64 "des-ofb" +#define NID_des_ofb64 45 +#define OBJ_des_ofb64 OBJ_algorithm,8L + +#define SN_des_cfb64 "DES-CFB" +#define LN_des_cfb64 "des-cfb" +#define NID_des_cfb64 30 +#define OBJ_des_cfb64 OBJ_algorithm,9L + +#define SN_rsaSignature "rsaSignature" +#define NID_rsaSignature 377 +#define OBJ_rsaSignature OBJ_algorithm,11L + +#define SN_dsa_2 "DSA-old" +#define LN_dsa_2 "dsaEncryption-old" +#define NID_dsa_2 67 +#define OBJ_dsa_2 OBJ_algorithm,12L + +#define SN_dsaWithSHA "DSA-SHA" +#define LN_dsaWithSHA "dsaWithSHA" +#define NID_dsaWithSHA 66 +#define OBJ_dsaWithSHA OBJ_algorithm,13L + +#define SN_shaWithRSAEncryption "RSA-SHA" +#define LN_shaWithRSAEncryption "shaWithRSAEncryption" +#define NID_shaWithRSAEncryption 42 +#define OBJ_shaWithRSAEncryption OBJ_algorithm,15L + +#define SN_des_ede_ecb "DES-EDE" +#define LN_des_ede_ecb "des-ede" +#define NID_des_ede_ecb 32 +#define OBJ_des_ede_ecb OBJ_algorithm,17L + +#define SN_des_ede3_ecb "DES-EDE3" +#define LN_des_ede3_ecb "des-ede3" +#define NID_des_ede3_ecb 33 + +#define SN_des_ede_cbc "DES-EDE-CBC" +#define LN_des_ede_cbc "des-ede-cbc" +#define NID_des_ede_cbc 43 + +#define SN_des_ede_cfb64 "DES-EDE-CFB" +#define LN_des_ede_cfb64 "des-ede-cfb" +#define NID_des_ede_cfb64 60 + +#define SN_des_ede3_cfb64 "DES-EDE3-CFB" +#define LN_des_ede3_cfb64 "des-ede3-cfb" +#define NID_des_ede3_cfb64 61 + +#define SN_des_ede_ofb64 "DES-EDE-OFB" +#define LN_des_ede_ofb64 "des-ede-ofb" +#define NID_des_ede_ofb64 62 + +#define SN_des_ede3_ofb64 "DES-EDE3-OFB" +#define LN_des_ede3_ofb64 "des-ede3-ofb" +#define NID_des_ede3_ofb64 63 + +#define SN_desx_cbc "DESX-CBC" +#define LN_desx_cbc "desx-cbc" +#define NID_desx_cbc 80 + +#define SN_sha "SHA" +#define LN_sha "sha" +#define NID_sha 41 +#define OBJ_sha OBJ_algorithm,18L + +#define SN_sha1 "SHA1" +#define LN_sha1 "sha1" +#define NID_sha1 64 +#define OBJ_sha1 OBJ_algorithm,26L + +#define SN_dsaWithSHA1_2 "DSA-SHA1-old" +#define LN_dsaWithSHA1_2 "dsaWithSHA1-old" +#define NID_dsaWithSHA1_2 70 +#define OBJ_dsaWithSHA1_2 OBJ_algorithm,27L + +#define SN_sha1WithRSA "RSA-SHA1-2" +#define LN_sha1WithRSA "sha1WithRSA" +#define NID_sha1WithRSA 115 +#define OBJ_sha1WithRSA OBJ_algorithm,29L + +#define SN_ripemd160 "RIPEMD160" +#define LN_ripemd160 "ripemd160" +#define NID_ripemd160 117 +#define OBJ_ripemd160 1L,3L,36L,3L,2L,1L + +#define SN_ripemd160WithRSA "RSA-RIPEMD160" +#define LN_ripemd160WithRSA "ripemd160WithRSA" +#define NID_ripemd160WithRSA 119 +#define OBJ_ripemd160WithRSA 1L,3L,36L,3L,3L,1L,2L + +#define SN_blake2b512 "BLAKE2b512" +#define LN_blake2b512 "blake2b512" +#define NID_blake2b512 1056 +#define OBJ_blake2b512 1L,3L,6L,1L,4L,1L,1722L,12L,2L,1L,16L + +#define SN_blake2s256 "BLAKE2s256" +#define LN_blake2s256 "blake2s256" +#define NID_blake2s256 1057 +#define OBJ_blake2s256 1L,3L,6L,1L,4L,1L,1722L,12L,2L,2L,8L + +#define SN_sxnet "SXNetID" +#define LN_sxnet "Strong Extranet ID" +#define NID_sxnet 143 +#define OBJ_sxnet 1L,3L,101L,1L,4L,1L + +#define SN_X500 "X500" +#define LN_X500 "directory services (X.500)" +#define NID_X500 11 +#define OBJ_X500 2L,5L + +#define SN_X509 "X509" +#define NID_X509 12 +#define OBJ_X509 OBJ_X500,4L + +#define SN_commonName "CN" +#define LN_commonName "commonName" +#define NID_commonName 13 +#define OBJ_commonName OBJ_X509,3L + +#define SN_surname "SN" +#define LN_surname "surname" +#define NID_surname 100 +#define OBJ_surname OBJ_X509,4L + +#define LN_serialNumber "serialNumber" +#define NID_serialNumber 105 +#define OBJ_serialNumber OBJ_X509,5L + +#define SN_countryName "C" +#define LN_countryName "countryName" +#define NID_countryName 14 +#define OBJ_countryName OBJ_X509,6L + +#define SN_localityName "L" +#define LN_localityName "localityName" +#define NID_localityName 15 +#define OBJ_localityName OBJ_X509,7L + +#define SN_stateOrProvinceName "ST" +#define LN_stateOrProvinceName "stateOrProvinceName" +#define NID_stateOrProvinceName 16 +#define OBJ_stateOrProvinceName OBJ_X509,8L + +#define SN_streetAddress "street" +#define LN_streetAddress "streetAddress" +#define NID_streetAddress 660 +#define OBJ_streetAddress OBJ_X509,9L + +#define SN_organizationName "O" +#define LN_organizationName "organizationName" +#define NID_organizationName 17 +#define OBJ_organizationName OBJ_X509,10L + +#define SN_organizationalUnitName "OU" +#define LN_organizationalUnitName "organizationalUnitName" +#define NID_organizationalUnitName 18 +#define OBJ_organizationalUnitName OBJ_X509,11L + +#define SN_title "title" +#define LN_title "title" +#define NID_title 106 +#define OBJ_title OBJ_X509,12L + +#define LN_description "description" +#define NID_description 107 +#define OBJ_description OBJ_X509,13L + +#define LN_searchGuide "searchGuide" +#define NID_searchGuide 859 +#define OBJ_searchGuide OBJ_X509,14L + +#define LN_businessCategory "businessCategory" +#define NID_businessCategory 860 +#define OBJ_businessCategory OBJ_X509,15L + +#define LN_postalAddress "postalAddress" +#define NID_postalAddress 861 +#define OBJ_postalAddress OBJ_X509,16L + +#define LN_postalCode "postalCode" +#define NID_postalCode 661 +#define OBJ_postalCode OBJ_X509,17L + +#define LN_postOfficeBox "postOfficeBox" +#define NID_postOfficeBox 862 +#define OBJ_postOfficeBox OBJ_X509,18L + +#define LN_physicalDeliveryOfficeName "physicalDeliveryOfficeName" +#define NID_physicalDeliveryOfficeName 863 +#define OBJ_physicalDeliveryOfficeName OBJ_X509,19L + +#define LN_telephoneNumber "telephoneNumber" +#define NID_telephoneNumber 864 +#define OBJ_telephoneNumber OBJ_X509,20L + +#define LN_telexNumber "telexNumber" +#define NID_telexNumber 865 +#define OBJ_telexNumber OBJ_X509,21L + +#define LN_teletexTerminalIdentifier "teletexTerminalIdentifier" +#define NID_teletexTerminalIdentifier 866 +#define OBJ_teletexTerminalIdentifier OBJ_X509,22L + +#define LN_facsimileTelephoneNumber "facsimileTelephoneNumber" +#define NID_facsimileTelephoneNumber 867 +#define OBJ_facsimileTelephoneNumber OBJ_X509,23L + +#define LN_x121Address "x121Address" +#define NID_x121Address 868 +#define OBJ_x121Address OBJ_X509,24L + +#define LN_internationaliSDNNumber "internationaliSDNNumber" +#define NID_internationaliSDNNumber 869 +#define OBJ_internationaliSDNNumber OBJ_X509,25L + +#define LN_registeredAddress "registeredAddress" +#define NID_registeredAddress 870 +#define OBJ_registeredAddress OBJ_X509,26L + +#define LN_destinationIndicator "destinationIndicator" +#define NID_destinationIndicator 871 +#define OBJ_destinationIndicator OBJ_X509,27L + +#define LN_preferredDeliveryMethod "preferredDeliveryMethod" +#define NID_preferredDeliveryMethod 872 +#define OBJ_preferredDeliveryMethod OBJ_X509,28L + +#define LN_presentationAddress "presentationAddress" +#define NID_presentationAddress 873 +#define OBJ_presentationAddress OBJ_X509,29L + +#define LN_supportedApplicationContext "supportedApplicationContext" +#define NID_supportedApplicationContext 874 +#define OBJ_supportedApplicationContext OBJ_X509,30L + +#define SN_member "member" +#define NID_member 875 +#define OBJ_member OBJ_X509,31L + +#define SN_owner "owner" +#define NID_owner 876 +#define OBJ_owner OBJ_X509,32L + +#define LN_roleOccupant "roleOccupant" +#define NID_roleOccupant 877 +#define OBJ_roleOccupant OBJ_X509,33L + +#define SN_seeAlso "seeAlso" +#define NID_seeAlso 878 +#define OBJ_seeAlso OBJ_X509,34L + +#define LN_userPassword "userPassword" +#define NID_userPassword 879 +#define OBJ_userPassword OBJ_X509,35L + +#define LN_userCertificate "userCertificate" +#define NID_userCertificate 880 +#define OBJ_userCertificate OBJ_X509,36L + +#define LN_cACertificate "cACertificate" +#define NID_cACertificate 881 +#define OBJ_cACertificate OBJ_X509,37L + +#define LN_authorityRevocationList "authorityRevocationList" +#define NID_authorityRevocationList 882 +#define OBJ_authorityRevocationList OBJ_X509,38L + +#define LN_certificateRevocationList "certificateRevocationList" +#define NID_certificateRevocationList 883 +#define OBJ_certificateRevocationList OBJ_X509,39L + +#define LN_crossCertificatePair "crossCertificatePair" +#define NID_crossCertificatePair 884 +#define OBJ_crossCertificatePair OBJ_X509,40L + +#define SN_name "name" +#define LN_name "name" +#define NID_name 173 +#define OBJ_name OBJ_X509,41L + +#define SN_givenName "GN" +#define LN_givenName "givenName" +#define NID_givenName 99 +#define OBJ_givenName OBJ_X509,42L + +#define SN_initials "initials" +#define LN_initials "initials" +#define NID_initials 101 +#define OBJ_initials OBJ_X509,43L + +#define LN_generationQualifier "generationQualifier" +#define NID_generationQualifier 509 +#define OBJ_generationQualifier OBJ_X509,44L + +#define LN_x500UniqueIdentifier "x500UniqueIdentifier" +#define NID_x500UniqueIdentifier 503 +#define OBJ_x500UniqueIdentifier OBJ_X509,45L + +#define SN_dnQualifier "dnQualifier" +#define LN_dnQualifier "dnQualifier" +#define NID_dnQualifier 174 +#define OBJ_dnQualifier OBJ_X509,46L + +#define LN_enhancedSearchGuide "enhancedSearchGuide" +#define NID_enhancedSearchGuide 885 +#define OBJ_enhancedSearchGuide OBJ_X509,47L + +#define LN_protocolInformation "protocolInformation" +#define NID_protocolInformation 886 +#define OBJ_protocolInformation OBJ_X509,48L + +#define LN_distinguishedName "distinguishedName" +#define NID_distinguishedName 887 +#define OBJ_distinguishedName OBJ_X509,49L + +#define LN_uniqueMember "uniqueMember" +#define NID_uniqueMember 888 +#define OBJ_uniqueMember OBJ_X509,50L + +#define LN_houseIdentifier "houseIdentifier" +#define NID_houseIdentifier 889 +#define OBJ_houseIdentifier OBJ_X509,51L + +#define LN_supportedAlgorithms "supportedAlgorithms" +#define NID_supportedAlgorithms 890 +#define OBJ_supportedAlgorithms OBJ_X509,52L + +#define LN_deltaRevocationList "deltaRevocationList" +#define NID_deltaRevocationList 891 +#define OBJ_deltaRevocationList OBJ_X509,53L + +#define SN_dmdName "dmdName" +#define NID_dmdName 892 +#define OBJ_dmdName OBJ_X509,54L + +#define LN_pseudonym "pseudonym" +#define NID_pseudonym 510 +#define OBJ_pseudonym OBJ_X509,65L + +#define SN_role "role" +#define LN_role "role" +#define NID_role 400 +#define OBJ_role OBJ_X509,72L + +#define LN_organizationIdentifier "organizationIdentifier" +#define NID_organizationIdentifier 1089 +#define OBJ_organizationIdentifier OBJ_X509,97L + +#define SN_countryCode3c "c3" +#define LN_countryCode3c "countryCode3c" +#define NID_countryCode3c 1090 +#define OBJ_countryCode3c OBJ_X509,98L + +#define SN_countryCode3n "n3" +#define LN_countryCode3n "countryCode3n" +#define NID_countryCode3n 1091 +#define OBJ_countryCode3n OBJ_X509,99L + +#define LN_dnsName "dnsName" +#define NID_dnsName 1092 +#define OBJ_dnsName OBJ_X509,100L + +#define SN_X500algorithms "X500algorithms" +#define LN_X500algorithms "directory services - algorithms" +#define NID_X500algorithms 378 +#define OBJ_X500algorithms OBJ_X500,8L + +#define SN_rsa "RSA" +#define LN_rsa "rsa" +#define NID_rsa 19 +#define OBJ_rsa OBJ_X500algorithms,1L,1L + +#define SN_mdc2WithRSA "RSA-MDC2" +#define LN_mdc2WithRSA "mdc2WithRSA" +#define NID_mdc2WithRSA 96 +#define OBJ_mdc2WithRSA OBJ_X500algorithms,3L,100L + +#define SN_mdc2 "MDC2" +#define LN_mdc2 "mdc2" +#define NID_mdc2 95 +#define OBJ_mdc2 OBJ_X500algorithms,3L,101L + +#define SN_id_ce "id-ce" +#define NID_id_ce 81 +#define OBJ_id_ce OBJ_X500,29L + +#define SN_subject_directory_attributes "subjectDirectoryAttributes" +#define LN_subject_directory_attributes "X509v3 Subject Directory Attributes" +#define NID_subject_directory_attributes 769 +#define OBJ_subject_directory_attributes OBJ_id_ce,9L + +#define SN_subject_key_identifier "subjectKeyIdentifier" +#define LN_subject_key_identifier "X509v3 Subject Key Identifier" +#define NID_subject_key_identifier 82 +#define OBJ_subject_key_identifier OBJ_id_ce,14L + +#define SN_key_usage "keyUsage" +#define LN_key_usage "X509v3 Key Usage" +#define NID_key_usage 83 +#define OBJ_key_usage OBJ_id_ce,15L + +#define SN_private_key_usage_period "privateKeyUsagePeriod" +#define LN_private_key_usage_period "X509v3 Private Key Usage Period" +#define NID_private_key_usage_period 84 +#define OBJ_private_key_usage_period OBJ_id_ce,16L + +#define SN_subject_alt_name "subjectAltName" +#define LN_subject_alt_name "X509v3 Subject Alternative Name" +#define NID_subject_alt_name 85 +#define OBJ_subject_alt_name OBJ_id_ce,17L + +#define SN_issuer_alt_name "issuerAltName" +#define LN_issuer_alt_name "X509v3 Issuer Alternative Name" +#define NID_issuer_alt_name 86 +#define OBJ_issuer_alt_name OBJ_id_ce,18L + +#define SN_basic_constraints "basicConstraints" +#define LN_basic_constraints "X509v3 Basic Constraints" +#define NID_basic_constraints 87 +#define OBJ_basic_constraints OBJ_id_ce,19L + +#define SN_crl_number "crlNumber" +#define LN_crl_number "X509v3 CRL Number" +#define NID_crl_number 88 +#define OBJ_crl_number OBJ_id_ce,20L + +#define SN_crl_reason "CRLReason" +#define LN_crl_reason "X509v3 CRL Reason Code" +#define NID_crl_reason 141 +#define OBJ_crl_reason OBJ_id_ce,21L + +#define SN_invalidity_date "invalidityDate" +#define LN_invalidity_date "Invalidity Date" +#define NID_invalidity_date 142 +#define OBJ_invalidity_date OBJ_id_ce,24L + +#define SN_delta_crl "deltaCRL" +#define LN_delta_crl "X509v3 Delta CRL Indicator" +#define NID_delta_crl 140 +#define OBJ_delta_crl OBJ_id_ce,27L + +#define SN_issuing_distribution_point "issuingDistributionPoint" +#define LN_issuing_distribution_point "X509v3 Issuing Distribution Point" +#define NID_issuing_distribution_point 770 +#define OBJ_issuing_distribution_point OBJ_id_ce,28L + +#define SN_certificate_issuer "certificateIssuer" +#define LN_certificate_issuer "X509v3 Certificate Issuer" +#define NID_certificate_issuer 771 +#define OBJ_certificate_issuer OBJ_id_ce,29L + +#define SN_name_constraints "nameConstraints" +#define LN_name_constraints "X509v3 Name Constraints" +#define NID_name_constraints 666 +#define OBJ_name_constraints OBJ_id_ce,30L + +#define SN_crl_distribution_points "crlDistributionPoints" +#define LN_crl_distribution_points "X509v3 CRL Distribution Points" +#define NID_crl_distribution_points 103 +#define OBJ_crl_distribution_points OBJ_id_ce,31L + +#define SN_certificate_policies "certificatePolicies" +#define LN_certificate_policies "X509v3 Certificate Policies" +#define NID_certificate_policies 89 +#define OBJ_certificate_policies OBJ_id_ce,32L + +#define SN_any_policy "anyPolicy" +#define LN_any_policy "X509v3 Any Policy" +#define NID_any_policy 746 +#define OBJ_any_policy OBJ_certificate_policies,0L + +#define SN_policy_mappings "policyMappings" +#define LN_policy_mappings "X509v3 Policy Mappings" +#define NID_policy_mappings 747 +#define OBJ_policy_mappings OBJ_id_ce,33L + +#define SN_authority_key_identifier "authorityKeyIdentifier" +#define LN_authority_key_identifier "X509v3 Authority Key Identifier" +#define NID_authority_key_identifier 90 +#define OBJ_authority_key_identifier OBJ_id_ce,35L + +#define SN_policy_constraints "policyConstraints" +#define LN_policy_constraints "X509v3 Policy Constraints" +#define NID_policy_constraints 401 +#define OBJ_policy_constraints OBJ_id_ce,36L + +#define SN_ext_key_usage "extendedKeyUsage" +#define LN_ext_key_usage "X509v3 Extended Key Usage" +#define NID_ext_key_usage 126 +#define OBJ_ext_key_usage OBJ_id_ce,37L + +#define SN_freshest_crl "freshestCRL" +#define LN_freshest_crl "X509v3 Freshest CRL" +#define NID_freshest_crl 857 +#define OBJ_freshest_crl OBJ_id_ce,46L + +#define SN_inhibit_any_policy "inhibitAnyPolicy" +#define LN_inhibit_any_policy "X509v3 Inhibit Any Policy" +#define NID_inhibit_any_policy 748 +#define OBJ_inhibit_any_policy OBJ_id_ce,54L + +#define SN_target_information "targetInformation" +#define LN_target_information "X509v3 AC Targeting" +#define NID_target_information 402 +#define OBJ_target_information OBJ_id_ce,55L + +#define SN_no_rev_avail "noRevAvail" +#define LN_no_rev_avail "X509v3 No Revocation Available" +#define NID_no_rev_avail 403 +#define OBJ_no_rev_avail OBJ_id_ce,56L + +#define SN_anyExtendedKeyUsage "anyExtendedKeyUsage" +#define LN_anyExtendedKeyUsage "Any Extended Key Usage" +#define NID_anyExtendedKeyUsage 910 +#define OBJ_anyExtendedKeyUsage OBJ_ext_key_usage,0L + +#define SN_netscape "Netscape" +#define LN_netscape "Netscape Communications Corp." +#define NID_netscape 57 +#define OBJ_netscape 2L,16L,840L,1L,113730L + +#define SN_netscape_cert_extension "nsCertExt" +#define LN_netscape_cert_extension "Netscape Certificate Extension" +#define NID_netscape_cert_extension 58 +#define OBJ_netscape_cert_extension OBJ_netscape,1L + +#define SN_netscape_data_type "nsDataType" +#define LN_netscape_data_type "Netscape Data Type" +#define NID_netscape_data_type 59 +#define OBJ_netscape_data_type OBJ_netscape,2L + +#define SN_netscape_cert_type "nsCertType" +#define LN_netscape_cert_type "Netscape Cert Type" +#define NID_netscape_cert_type 71 +#define OBJ_netscape_cert_type OBJ_netscape_cert_extension,1L + +#define SN_netscape_base_url "nsBaseUrl" +#define LN_netscape_base_url "Netscape Base Url" +#define NID_netscape_base_url 72 +#define OBJ_netscape_base_url OBJ_netscape_cert_extension,2L + +#define SN_netscape_revocation_url "nsRevocationUrl" +#define LN_netscape_revocation_url "Netscape Revocation Url" +#define NID_netscape_revocation_url 73 +#define OBJ_netscape_revocation_url OBJ_netscape_cert_extension,3L + +#define SN_netscape_ca_revocation_url "nsCaRevocationUrl" +#define LN_netscape_ca_revocation_url "Netscape CA Revocation Url" +#define NID_netscape_ca_revocation_url 74 +#define OBJ_netscape_ca_revocation_url OBJ_netscape_cert_extension,4L + +#define SN_netscape_renewal_url "nsRenewalUrl" +#define LN_netscape_renewal_url "Netscape Renewal Url" +#define NID_netscape_renewal_url 75 +#define OBJ_netscape_renewal_url OBJ_netscape_cert_extension,7L + +#define SN_netscape_ca_policy_url "nsCaPolicyUrl" +#define LN_netscape_ca_policy_url "Netscape CA Policy Url" +#define NID_netscape_ca_policy_url 76 +#define OBJ_netscape_ca_policy_url OBJ_netscape_cert_extension,8L + +#define SN_netscape_ssl_server_name "nsSslServerName" +#define LN_netscape_ssl_server_name "Netscape SSL Server Name" +#define NID_netscape_ssl_server_name 77 +#define OBJ_netscape_ssl_server_name OBJ_netscape_cert_extension,12L + +#define SN_netscape_comment "nsComment" +#define LN_netscape_comment "Netscape Comment" +#define NID_netscape_comment 78 +#define OBJ_netscape_comment OBJ_netscape_cert_extension,13L + +#define SN_netscape_cert_sequence "nsCertSequence" +#define LN_netscape_cert_sequence "Netscape Certificate Sequence" +#define NID_netscape_cert_sequence 79 +#define OBJ_netscape_cert_sequence OBJ_netscape_data_type,5L + +#define SN_ns_sgc "nsSGC" +#define LN_ns_sgc "Netscape Server Gated Crypto" +#define NID_ns_sgc 139 +#define OBJ_ns_sgc OBJ_netscape,4L,1L + +#define SN_org "ORG" +#define LN_org "org" +#define NID_org 379 +#define OBJ_org OBJ_iso,3L + +#define SN_dod "DOD" +#define LN_dod "dod" +#define NID_dod 380 +#define OBJ_dod OBJ_org,6L + +#define SN_iana "IANA" +#define LN_iana "iana" +#define NID_iana 381 +#define OBJ_iana OBJ_dod,1L + +#define OBJ_internet OBJ_iana + +#define SN_Directory "directory" +#define LN_Directory "Directory" +#define NID_Directory 382 +#define OBJ_Directory OBJ_internet,1L + +#define SN_Management "mgmt" +#define LN_Management "Management" +#define NID_Management 383 +#define OBJ_Management OBJ_internet,2L + +#define SN_Experimental "experimental" +#define LN_Experimental "Experimental" +#define NID_Experimental 384 +#define OBJ_Experimental OBJ_internet,3L + +#define SN_Private "private" +#define LN_Private "Private" +#define NID_Private 385 +#define OBJ_Private OBJ_internet,4L + +#define SN_Security "security" +#define LN_Security "Security" +#define NID_Security 386 +#define OBJ_Security OBJ_internet,5L + +#define SN_SNMPv2 "snmpv2" +#define LN_SNMPv2 "SNMPv2" +#define NID_SNMPv2 387 +#define OBJ_SNMPv2 OBJ_internet,6L + +#define LN_Mail "Mail" +#define NID_Mail 388 +#define OBJ_Mail OBJ_internet,7L + +#define SN_Enterprises "enterprises" +#define LN_Enterprises "Enterprises" +#define NID_Enterprises 389 +#define OBJ_Enterprises OBJ_Private,1L + +#define SN_dcObject "dcobject" +#define LN_dcObject "dcObject" +#define NID_dcObject 390 +#define OBJ_dcObject OBJ_Enterprises,1466L,344L + +#define SN_mime_mhs "mime-mhs" +#define LN_mime_mhs "MIME MHS" +#define NID_mime_mhs 504 +#define OBJ_mime_mhs OBJ_Mail,1L + +#define SN_mime_mhs_headings "mime-mhs-headings" +#define LN_mime_mhs_headings "mime-mhs-headings" +#define NID_mime_mhs_headings 505 +#define OBJ_mime_mhs_headings OBJ_mime_mhs,1L + +#define SN_mime_mhs_bodies "mime-mhs-bodies" +#define LN_mime_mhs_bodies "mime-mhs-bodies" +#define NID_mime_mhs_bodies 506 +#define OBJ_mime_mhs_bodies OBJ_mime_mhs,2L + +#define SN_id_hex_partial_message "id-hex-partial-message" +#define LN_id_hex_partial_message "id-hex-partial-message" +#define NID_id_hex_partial_message 507 +#define OBJ_id_hex_partial_message OBJ_mime_mhs_headings,1L + +#define SN_id_hex_multipart_message "id-hex-multipart-message" +#define LN_id_hex_multipart_message "id-hex-multipart-message" +#define NID_id_hex_multipart_message 508 +#define OBJ_id_hex_multipart_message OBJ_mime_mhs_headings,2L + +#define SN_zlib_compression "ZLIB" +#define LN_zlib_compression "zlib compression" +#define NID_zlib_compression 125 +#define OBJ_zlib_compression OBJ_id_smime_alg,8L + +#define OBJ_csor 2L,16L,840L,1L,101L,3L + +#define OBJ_nistAlgorithms OBJ_csor,4L + +#define OBJ_aes OBJ_nistAlgorithms,1L + +#define SN_aes_128_ecb "AES-128-ECB" +#define LN_aes_128_ecb "aes-128-ecb" +#define NID_aes_128_ecb 418 +#define OBJ_aes_128_ecb OBJ_aes,1L + +#define SN_aes_128_cbc "AES-128-CBC" +#define LN_aes_128_cbc "aes-128-cbc" +#define NID_aes_128_cbc 419 +#define OBJ_aes_128_cbc OBJ_aes,2L + +#define SN_aes_128_ofb128 "AES-128-OFB" +#define LN_aes_128_ofb128 "aes-128-ofb" +#define NID_aes_128_ofb128 420 +#define OBJ_aes_128_ofb128 OBJ_aes,3L + +#define SN_aes_128_cfb128 "AES-128-CFB" +#define LN_aes_128_cfb128 "aes-128-cfb" +#define NID_aes_128_cfb128 421 +#define OBJ_aes_128_cfb128 OBJ_aes,4L + +#define SN_id_aes128_wrap "id-aes128-wrap" +#define NID_id_aes128_wrap 788 +#define OBJ_id_aes128_wrap OBJ_aes,5L + +#define SN_aes_128_gcm "id-aes128-GCM" +#define LN_aes_128_gcm "aes-128-gcm" +#define NID_aes_128_gcm 895 +#define OBJ_aes_128_gcm OBJ_aes,6L + +#define SN_aes_128_ccm "id-aes128-CCM" +#define LN_aes_128_ccm "aes-128-ccm" +#define NID_aes_128_ccm 896 +#define OBJ_aes_128_ccm OBJ_aes,7L + +#define SN_id_aes128_wrap_pad "id-aes128-wrap-pad" +#define NID_id_aes128_wrap_pad 897 +#define OBJ_id_aes128_wrap_pad OBJ_aes,8L + +#define SN_aes_192_ecb "AES-192-ECB" +#define LN_aes_192_ecb "aes-192-ecb" +#define NID_aes_192_ecb 422 +#define OBJ_aes_192_ecb OBJ_aes,21L + +#define SN_aes_192_cbc "AES-192-CBC" +#define LN_aes_192_cbc "aes-192-cbc" +#define NID_aes_192_cbc 423 +#define OBJ_aes_192_cbc OBJ_aes,22L + +#define SN_aes_192_ofb128 "AES-192-OFB" +#define LN_aes_192_ofb128 "aes-192-ofb" +#define NID_aes_192_ofb128 424 +#define OBJ_aes_192_ofb128 OBJ_aes,23L + +#define SN_aes_192_cfb128 "AES-192-CFB" +#define LN_aes_192_cfb128 "aes-192-cfb" +#define NID_aes_192_cfb128 425 +#define OBJ_aes_192_cfb128 OBJ_aes,24L + +#define SN_id_aes192_wrap "id-aes192-wrap" +#define NID_id_aes192_wrap 789 +#define OBJ_id_aes192_wrap OBJ_aes,25L + +#define SN_aes_192_gcm "id-aes192-GCM" +#define LN_aes_192_gcm "aes-192-gcm" +#define NID_aes_192_gcm 898 +#define OBJ_aes_192_gcm OBJ_aes,26L + +#define SN_aes_192_ccm "id-aes192-CCM" +#define LN_aes_192_ccm "aes-192-ccm" +#define NID_aes_192_ccm 899 +#define OBJ_aes_192_ccm OBJ_aes,27L + +#define SN_id_aes192_wrap_pad "id-aes192-wrap-pad" +#define NID_id_aes192_wrap_pad 900 +#define OBJ_id_aes192_wrap_pad OBJ_aes,28L + +#define SN_aes_256_ecb "AES-256-ECB" +#define LN_aes_256_ecb "aes-256-ecb" +#define NID_aes_256_ecb 426 +#define OBJ_aes_256_ecb OBJ_aes,41L + +#define SN_aes_256_cbc "AES-256-CBC" +#define LN_aes_256_cbc "aes-256-cbc" +#define NID_aes_256_cbc 427 +#define OBJ_aes_256_cbc OBJ_aes,42L + +#define SN_aes_256_ofb128 "AES-256-OFB" +#define LN_aes_256_ofb128 "aes-256-ofb" +#define NID_aes_256_ofb128 428 +#define OBJ_aes_256_ofb128 OBJ_aes,43L + +#define SN_aes_256_cfb128 "AES-256-CFB" +#define LN_aes_256_cfb128 "aes-256-cfb" +#define NID_aes_256_cfb128 429 +#define OBJ_aes_256_cfb128 OBJ_aes,44L + +#define SN_id_aes256_wrap "id-aes256-wrap" +#define NID_id_aes256_wrap 790 +#define OBJ_id_aes256_wrap OBJ_aes,45L + +#define SN_aes_256_gcm "id-aes256-GCM" +#define LN_aes_256_gcm "aes-256-gcm" +#define NID_aes_256_gcm 901 +#define OBJ_aes_256_gcm OBJ_aes,46L + +#define SN_aes_256_ccm "id-aes256-CCM" +#define LN_aes_256_ccm "aes-256-ccm" +#define NID_aes_256_ccm 902 +#define OBJ_aes_256_ccm OBJ_aes,47L + +#define SN_id_aes256_wrap_pad "id-aes256-wrap-pad" +#define NID_id_aes256_wrap_pad 903 +#define OBJ_id_aes256_wrap_pad OBJ_aes,48L + +#define SN_aes_128_xts "AES-128-XTS" +#define LN_aes_128_xts "aes-128-xts" +#define NID_aes_128_xts 913 +#define OBJ_aes_128_xts OBJ_ieee_siswg,0L,1L,1L + +#define SN_aes_256_xts "AES-256-XTS" +#define LN_aes_256_xts "aes-256-xts" +#define NID_aes_256_xts 914 +#define OBJ_aes_256_xts OBJ_ieee_siswg,0L,1L,2L + +#define SN_aes_128_cfb1 "AES-128-CFB1" +#define LN_aes_128_cfb1 "aes-128-cfb1" +#define NID_aes_128_cfb1 650 + +#define SN_aes_192_cfb1 "AES-192-CFB1" +#define LN_aes_192_cfb1 "aes-192-cfb1" +#define NID_aes_192_cfb1 651 + +#define SN_aes_256_cfb1 "AES-256-CFB1" +#define LN_aes_256_cfb1 "aes-256-cfb1" +#define NID_aes_256_cfb1 652 + +#define SN_aes_128_cfb8 "AES-128-CFB8" +#define LN_aes_128_cfb8 "aes-128-cfb8" +#define NID_aes_128_cfb8 653 + +#define SN_aes_192_cfb8 "AES-192-CFB8" +#define LN_aes_192_cfb8 "aes-192-cfb8" +#define NID_aes_192_cfb8 654 + +#define SN_aes_256_cfb8 "AES-256-CFB8" +#define LN_aes_256_cfb8 "aes-256-cfb8" +#define NID_aes_256_cfb8 655 + +#define SN_aes_128_ctr "AES-128-CTR" +#define LN_aes_128_ctr "aes-128-ctr" +#define NID_aes_128_ctr 904 + +#define SN_aes_192_ctr "AES-192-CTR" +#define LN_aes_192_ctr "aes-192-ctr" +#define NID_aes_192_ctr 905 + +#define SN_aes_256_ctr "AES-256-CTR" +#define LN_aes_256_ctr "aes-256-ctr" +#define NID_aes_256_ctr 906 + +#define SN_aes_128_ocb "AES-128-OCB" +#define LN_aes_128_ocb "aes-128-ocb" +#define NID_aes_128_ocb 958 + +#define SN_aes_192_ocb "AES-192-OCB" +#define LN_aes_192_ocb "aes-192-ocb" +#define NID_aes_192_ocb 959 + +#define SN_aes_256_ocb "AES-256-OCB" +#define LN_aes_256_ocb "aes-256-ocb" +#define NID_aes_256_ocb 960 + +#define SN_des_cfb1 "DES-CFB1" +#define LN_des_cfb1 "des-cfb1" +#define NID_des_cfb1 656 + +#define SN_des_cfb8 "DES-CFB8" +#define LN_des_cfb8 "des-cfb8" +#define NID_des_cfb8 657 + +#define SN_des_ede3_cfb1 "DES-EDE3-CFB1" +#define LN_des_ede3_cfb1 "des-ede3-cfb1" +#define NID_des_ede3_cfb1 658 + +#define SN_des_ede3_cfb8 "DES-EDE3-CFB8" +#define LN_des_ede3_cfb8 "des-ede3-cfb8" +#define NID_des_ede3_cfb8 659 + +#define OBJ_nist_hashalgs OBJ_nistAlgorithms,2L + +#define SN_sha256 "SHA256" +#define LN_sha256 "sha256" +#define NID_sha256 672 +#define OBJ_sha256 OBJ_nist_hashalgs,1L + +#define SN_sha384 "SHA384" +#define LN_sha384 "sha384" +#define NID_sha384 673 +#define OBJ_sha384 OBJ_nist_hashalgs,2L + +#define SN_sha512 "SHA512" +#define LN_sha512 "sha512" +#define NID_sha512 674 +#define OBJ_sha512 OBJ_nist_hashalgs,3L + +#define SN_sha224 "SHA224" +#define LN_sha224 "sha224" +#define NID_sha224 675 +#define OBJ_sha224 OBJ_nist_hashalgs,4L + +#define SN_sha512_224 "SHA512-224" +#define LN_sha512_224 "sha512-224" +#define NID_sha512_224 1094 +#define OBJ_sha512_224 OBJ_nist_hashalgs,5L + +#define SN_sha512_256 "SHA512-256" +#define LN_sha512_256 "sha512-256" +#define NID_sha512_256 1095 +#define OBJ_sha512_256 OBJ_nist_hashalgs,6L + +#define SN_sha3_224 "SHA3-224" +#define LN_sha3_224 "sha3-224" +#define NID_sha3_224 1096 +#define OBJ_sha3_224 OBJ_nist_hashalgs,7L + +#define SN_sha3_256 "SHA3-256" +#define LN_sha3_256 "sha3-256" +#define NID_sha3_256 1097 +#define OBJ_sha3_256 OBJ_nist_hashalgs,8L + +#define SN_sha3_384 "SHA3-384" +#define LN_sha3_384 "sha3-384" +#define NID_sha3_384 1098 +#define OBJ_sha3_384 OBJ_nist_hashalgs,9L + +#define SN_sha3_512 "SHA3-512" +#define LN_sha3_512 "sha3-512" +#define NID_sha3_512 1099 +#define OBJ_sha3_512 OBJ_nist_hashalgs,10L + +#define SN_shake128 "SHAKE128" +#define LN_shake128 "shake128" +#define NID_shake128 1100 +#define OBJ_shake128 OBJ_nist_hashalgs,11L + +#define SN_shake256 "SHAKE256" +#define LN_shake256 "shake256" +#define NID_shake256 1101 +#define OBJ_shake256 OBJ_nist_hashalgs,12L + +#define SN_hmac_sha3_224 "id-hmacWithSHA3-224" +#define LN_hmac_sha3_224 "hmac-sha3-224" +#define NID_hmac_sha3_224 1102 +#define OBJ_hmac_sha3_224 OBJ_nist_hashalgs,13L + +#define SN_hmac_sha3_256 "id-hmacWithSHA3-256" +#define LN_hmac_sha3_256 "hmac-sha3-256" +#define NID_hmac_sha3_256 1103 +#define OBJ_hmac_sha3_256 OBJ_nist_hashalgs,14L + +#define SN_hmac_sha3_384 "id-hmacWithSHA3-384" +#define LN_hmac_sha3_384 "hmac-sha3-384" +#define NID_hmac_sha3_384 1104 +#define OBJ_hmac_sha3_384 OBJ_nist_hashalgs,15L + +#define SN_hmac_sha3_512 "id-hmacWithSHA3-512" +#define LN_hmac_sha3_512 "hmac-sha3-512" +#define NID_hmac_sha3_512 1105 +#define OBJ_hmac_sha3_512 OBJ_nist_hashalgs,16L + +#define OBJ_dsa_with_sha2 OBJ_nistAlgorithms,3L + +#define SN_dsa_with_SHA224 "dsa_with_SHA224" +#define NID_dsa_with_SHA224 802 +#define OBJ_dsa_with_SHA224 OBJ_dsa_with_sha2,1L + +#define SN_dsa_with_SHA256 "dsa_with_SHA256" +#define NID_dsa_with_SHA256 803 +#define OBJ_dsa_with_SHA256 OBJ_dsa_with_sha2,2L + +#define OBJ_sigAlgs OBJ_nistAlgorithms,3L + +#define SN_dsa_with_SHA384 "id-dsa-with-sha384" +#define LN_dsa_with_SHA384 "dsa_with_SHA384" +#define NID_dsa_with_SHA384 1106 +#define OBJ_dsa_with_SHA384 OBJ_sigAlgs,3L + +#define SN_dsa_with_SHA512 "id-dsa-with-sha512" +#define LN_dsa_with_SHA512 "dsa_with_SHA512" +#define NID_dsa_with_SHA512 1107 +#define OBJ_dsa_with_SHA512 OBJ_sigAlgs,4L + +#define SN_dsa_with_SHA3_224 "id-dsa-with-sha3-224" +#define LN_dsa_with_SHA3_224 "dsa_with_SHA3-224" +#define NID_dsa_with_SHA3_224 1108 +#define OBJ_dsa_with_SHA3_224 OBJ_sigAlgs,5L + +#define SN_dsa_with_SHA3_256 "id-dsa-with-sha3-256" +#define LN_dsa_with_SHA3_256 "dsa_with_SHA3-256" +#define NID_dsa_with_SHA3_256 1109 +#define OBJ_dsa_with_SHA3_256 OBJ_sigAlgs,6L + +#define SN_dsa_with_SHA3_384 "id-dsa-with-sha3-384" +#define LN_dsa_with_SHA3_384 "dsa_with_SHA3-384" +#define NID_dsa_with_SHA3_384 1110 +#define OBJ_dsa_with_SHA3_384 OBJ_sigAlgs,7L + +#define SN_dsa_with_SHA3_512 "id-dsa-with-sha3-512" +#define LN_dsa_with_SHA3_512 "dsa_with_SHA3-512" +#define NID_dsa_with_SHA3_512 1111 +#define OBJ_dsa_with_SHA3_512 OBJ_sigAlgs,8L + +#define SN_ecdsa_with_SHA3_224 "id-ecdsa-with-sha3-224" +#define LN_ecdsa_with_SHA3_224 "ecdsa_with_SHA3-224" +#define NID_ecdsa_with_SHA3_224 1112 +#define OBJ_ecdsa_with_SHA3_224 OBJ_sigAlgs,9L + +#define SN_ecdsa_with_SHA3_256 "id-ecdsa-with-sha3-256" +#define LN_ecdsa_with_SHA3_256 "ecdsa_with_SHA3-256" +#define NID_ecdsa_with_SHA3_256 1113 +#define OBJ_ecdsa_with_SHA3_256 OBJ_sigAlgs,10L + +#define SN_ecdsa_with_SHA3_384 "id-ecdsa-with-sha3-384" +#define LN_ecdsa_with_SHA3_384 "ecdsa_with_SHA3-384" +#define NID_ecdsa_with_SHA3_384 1114 +#define OBJ_ecdsa_with_SHA3_384 OBJ_sigAlgs,11L + +#define SN_ecdsa_with_SHA3_512 "id-ecdsa-with-sha3-512" +#define LN_ecdsa_with_SHA3_512 "ecdsa_with_SHA3-512" +#define NID_ecdsa_with_SHA3_512 1115 +#define OBJ_ecdsa_with_SHA3_512 OBJ_sigAlgs,12L + +#define SN_RSA_SHA3_224 "id-rsassa-pkcs1-v1_5-with-sha3-224" +#define LN_RSA_SHA3_224 "RSA-SHA3-224" +#define NID_RSA_SHA3_224 1116 +#define OBJ_RSA_SHA3_224 OBJ_sigAlgs,13L + +#define SN_RSA_SHA3_256 "id-rsassa-pkcs1-v1_5-with-sha3-256" +#define LN_RSA_SHA3_256 "RSA-SHA3-256" +#define NID_RSA_SHA3_256 1117 +#define OBJ_RSA_SHA3_256 OBJ_sigAlgs,14L + +#define SN_RSA_SHA3_384 "id-rsassa-pkcs1-v1_5-with-sha3-384" +#define LN_RSA_SHA3_384 "RSA-SHA3-384" +#define NID_RSA_SHA3_384 1118 +#define OBJ_RSA_SHA3_384 OBJ_sigAlgs,15L + +#define SN_RSA_SHA3_512 "id-rsassa-pkcs1-v1_5-with-sha3-512" +#define LN_RSA_SHA3_512 "RSA-SHA3-512" +#define NID_RSA_SHA3_512 1119 +#define OBJ_RSA_SHA3_512 OBJ_sigAlgs,16L + +#define SN_hold_instruction_code "holdInstructionCode" +#define LN_hold_instruction_code "Hold Instruction Code" +#define NID_hold_instruction_code 430 +#define OBJ_hold_instruction_code OBJ_id_ce,23L + +#define OBJ_holdInstruction OBJ_X9_57,2L + +#define SN_hold_instruction_none "holdInstructionNone" +#define LN_hold_instruction_none "Hold Instruction None" +#define NID_hold_instruction_none 431 +#define OBJ_hold_instruction_none OBJ_holdInstruction,1L + +#define SN_hold_instruction_call_issuer "holdInstructionCallIssuer" +#define LN_hold_instruction_call_issuer "Hold Instruction Call Issuer" +#define NID_hold_instruction_call_issuer 432 +#define OBJ_hold_instruction_call_issuer OBJ_holdInstruction,2L + +#define SN_hold_instruction_reject "holdInstructionReject" +#define LN_hold_instruction_reject "Hold Instruction Reject" +#define NID_hold_instruction_reject 433 +#define OBJ_hold_instruction_reject OBJ_holdInstruction,3L + +#define SN_data "data" +#define NID_data 434 +#define OBJ_data OBJ_itu_t,9L + +#define SN_pss "pss" +#define NID_pss 435 +#define OBJ_pss OBJ_data,2342L + +#define SN_ucl "ucl" +#define NID_ucl 436 +#define OBJ_ucl OBJ_pss,19200300L + +#define SN_pilot "pilot" +#define NID_pilot 437 +#define OBJ_pilot OBJ_ucl,100L + +#define LN_pilotAttributeType "pilotAttributeType" +#define NID_pilotAttributeType 438 +#define OBJ_pilotAttributeType OBJ_pilot,1L + +#define LN_pilotAttributeSyntax "pilotAttributeSyntax" +#define NID_pilotAttributeSyntax 439 +#define OBJ_pilotAttributeSyntax OBJ_pilot,3L + +#define LN_pilotObjectClass "pilotObjectClass" +#define NID_pilotObjectClass 440 +#define OBJ_pilotObjectClass OBJ_pilot,4L + +#define LN_pilotGroups "pilotGroups" +#define NID_pilotGroups 441 +#define OBJ_pilotGroups OBJ_pilot,10L + +#define LN_iA5StringSyntax "iA5StringSyntax" +#define NID_iA5StringSyntax 442 +#define OBJ_iA5StringSyntax OBJ_pilotAttributeSyntax,4L + +#define LN_caseIgnoreIA5StringSyntax "caseIgnoreIA5StringSyntax" +#define NID_caseIgnoreIA5StringSyntax 443 +#define OBJ_caseIgnoreIA5StringSyntax OBJ_pilotAttributeSyntax,5L + +#define LN_pilotObject "pilotObject" +#define NID_pilotObject 444 +#define OBJ_pilotObject OBJ_pilotObjectClass,3L + +#define LN_pilotPerson "pilotPerson" +#define NID_pilotPerson 445 +#define OBJ_pilotPerson OBJ_pilotObjectClass,4L + +#define SN_account "account" +#define NID_account 446 +#define OBJ_account OBJ_pilotObjectClass,5L + +#define SN_document "document" +#define NID_document 447 +#define OBJ_document OBJ_pilotObjectClass,6L + +#define SN_room "room" +#define NID_room 448 +#define OBJ_room OBJ_pilotObjectClass,7L + +#define LN_documentSeries "documentSeries" +#define NID_documentSeries 449 +#define OBJ_documentSeries OBJ_pilotObjectClass,9L + +#define SN_Domain "domain" +#define LN_Domain "Domain" +#define NID_Domain 392 +#define OBJ_Domain OBJ_pilotObjectClass,13L + +#define LN_rFC822localPart "rFC822localPart" +#define NID_rFC822localPart 450 +#define OBJ_rFC822localPart OBJ_pilotObjectClass,14L + +#define LN_dNSDomain "dNSDomain" +#define NID_dNSDomain 451 +#define OBJ_dNSDomain OBJ_pilotObjectClass,15L + +#define LN_domainRelatedObject "domainRelatedObject" +#define NID_domainRelatedObject 452 +#define OBJ_domainRelatedObject OBJ_pilotObjectClass,17L + +#define LN_friendlyCountry "friendlyCountry" +#define NID_friendlyCountry 453 +#define OBJ_friendlyCountry OBJ_pilotObjectClass,18L + +#define LN_simpleSecurityObject "simpleSecurityObject" +#define NID_simpleSecurityObject 454 +#define OBJ_simpleSecurityObject OBJ_pilotObjectClass,19L + +#define LN_pilotOrganization "pilotOrganization" +#define NID_pilotOrganization 455 +#define OBJ_pilotOrganization OBJ_pilotObjectClass,20L + +#define LN_pilotDSA "pilotDSA" +#define NID_pilotDSA 456 +#define OBJ_pilotDSA OBJ_pilotObjectClass,21L + +#define LN_qualityLabelledData "qualityLabelledData" +#define NID_qualityLabelledData 457 +#define OBJ_qualityLabelledData OBJ_pilotObjectClass,22L + +#define SN_userId "UID" +#define LN_userId "userId" +#define NID_userId 458 +#define OBJ_userId OBJ_pilotAttributeType,1L + +#define LN_textEncodedORAddress "textEncodedORAddress" +#define NID_textEncodedORAddress 459 +#define OBJ_textEncodedORAddress OBJ_pilotAttributeType,2L + +#define SN_rfc822Mailbox "mail" +#define LN_rfc822Mailbox "rfc822Mailbox" +#define NID_rfc822Mailbox 460 +#define OBJ_rfc822Mailbox OBJ_pilotAttributeType,3L + +#define SN_info "info" +#define NID_info 461 +#define OBJ_info OBJ_pilotAttributeType,4L + +#define LN_favouriteDrink "favouriteDrink" +#define NID_favouriteDrink 462 +#define OBJ_favouriteDrink OBJ_pilotAttributeType,5L + +#define LN_roomNumber "roomNumber" +#define NID_roomNumber 463 +#define OBJ_roomNumber OBJ_pilotAttributeType,6L + +#define SN_photo "photo" +#define NID_photo 464 +#define OBJ_photo OBJ_pilotAttributeType,7L + +#define LN_userClass "userClass" +#define NID_userClass 465 +#define OBJ_userClass OBJ_pilotAttributeType,8L + +#define SN_host "host" +#define NID_host 466 +#define OBJ_host OBJ_pilotAttributeType,9L + +#define SN_manager "manager" +#define NID_manager 467 +#define OBJ_manager OBJ_pilotAttributeType,10L + +#define LN_documentIdentifier "documentIdentifier" +#define NID_documentIdentifier 468 +#define OBJ_documentIdentifier OBJ_pilotAttributeType,11L + +#define LN_documentTitle "documentTitle" +#define NID_documentTitle 469 +#define OBJ_documentTitle OBJ_pilotAttributeType,12L + +#define LN_documentVersion "documentVersion" +#define NID_documentVersion 470 +#define OBJ_documentVersion OBJ_pilotAttributeType,13L + +#define LN_documentAuthor "documentAuthor" +#define NID_documentAuthor 471 +#define OBJ_documentAuthor OBJ_pilotAttributeType,14L + +#define LN_documentLocation "documentLocation" +#define NID_documentLocation 472 +#define OBJ_documentLocation OBJ_pilotAttributeType,15L + +#define LN_homeTelephoneNumber "homeTelephoneNumber" +#define NID_homeTelephoneNumber 473 +#define OBJ_homeTelephoneNumber OBJ_pilotAttributeType,20L + +#define SN_secretary "secretary" +#define NID_secretary 474 +#define OBJ_secretary OBJ_pilotAttributeType,21L + +#define LN_otherMailbox "otherMailbox" +#define NID_otherMailbox 475 +#define OBJ_otherMailbox OBJ_pilotAttributeType,22L + +#define LN_lastModifiedTime "lastModifiedTime" +#define NID_lastModifiedTime 476 +#define OBJ_lastModifiedTime OBJ_pilotAttributeType,23L + +#define LN_lastModifiedBy "lastModifiedBy" +#define NID_lastModifiedBy 477 +#define OBJ_lastModifiedBy OBJ_pilotAttributeType,24L + +#define SN_domainComponent "DC" +#define LN_domainComponent "domainComponent" +#define NID_domainComponent 391 +#define OBJ_domainComponent OBJ_pilotAttributeType,25L + +#define LN_aRecord "aRecord" +#define NID_aRecord 478 +#define OBJ_aRecord OBJ_pilotAttributeType,26L + +#define LN_pilotAttributeType27 "pilotAttributeType27" +#define NID_pilotAttributeType27 479 +#define OBJ_pilotAttributeType27 OBJ_pilotAttributeType,27L + +#define LN_mXRecord "mXRecord" +#define NID_mXRecord 480 +#define OBJ_mXRecord OBJ_pilotAttributeType,28L + +#define LN_nSRecord "nSRecord" +#define NID_nSRecord 481 +#define OBJ_nSRecord OBJ_pilotAttributeType,29L + +#define LN_sOARecord "sOARecord" +#define NID_sOARecord 482 +#define OBJ_sOARecord OBJ_pilotAttributeType,30L + +#define LN_cNAMERecord "cNAMERecord" +#define NID_cNAMERecord 483 +#define OBJ_cNAMERecord OBJ_pilotAttributeType,31L + +#define LN_associatedDomain "associatedDomain" +#define NID_associatedDomain 484 +#define OBJ_associatedDomain OBJ_pilotAttributeType,37L + +#define LN_associatedName "associatedName" +#define NID_associatedName 485 +#define OBJ_associatedName OBJ_pilotAttributeType,38L + +#define LN_homePostalAddress "homePostalAddress" +#define NID_homePostalAddress 486 +#define OBJ_homePostalAddress OBJ_pilotAttributeType,39L + +#define LN_personalTitle "personalTitle" +#define NID_personalTitle 487 +#define OBJ_personalTitle OBJ_pilotAttributeType,40L + +#define LN_mobileTelephoneNumber "mobileTelephoneNumber" +#define NID_mobileTelephoneNumber 488 +#define OBJ_mobileTelephoneNumber OBJ_pilotAttributeType,41L + +#define LN_pagerTelephoneNumber "pagerTelephoneNumber" +#define NID_pagerTelephoneNumber 489 +#define OBJ_pagerTelephoneNumber OBJ_pilotAttributeType,42L + +#define LN_friendlyCountryName "friendlyCountryName" +#define NID_friendlyCountryName 490 +#define OBJ_friendlyCountryName OBJ_pilotAttributeType,43L + +#define SN_uniqueIdentifier "uid" +#define LN_uniqueIdentifier "uniqueIdentifier" +#define NID_uniqueIdentifier 102 +#define OBJ_uniqueIdentifier OBJ_pilotAttributeType,44L + +#define LN_organizationalStatus "organizationalStatus" +#define NID_organizationalStatus 491 +#define OBJ_organizationalStatus OBJ_pilotAttributeType,45L + +#define LN_janetMailbox "janetMailbox" +#define NID_janetMailbox 492 +#define OBJ_janetMailbox OBJ_pilotAttributeType,46L + +#define LN_mailPreferenceOption "mailPreferenceOption" +#define NID_mailPreferenceOption 493 +#define OBJ_mailPreferenceOption OBJ_pilotAttributeType,47L + +#define LN_buildingName "buildingName" +#define NID_buildingName 494 +#define OBJ_buildingName OBJ_pilotAttributeType,48L + +#define LN_dSAQuality "dSAQuality" +#define NID_dSAQuality 495 +#define OBJ_dSAQuality OBJ_pilotAttributeType,49L + +#define LN_singleLevelQuality "singleLevelQuality" +#define NID_singleLevelQuality 496 +#define OBJ_singleLevelQuality OBJ_pilotAttributeType,50L + +#define LN_subtreeMinimumQuality "subtreeMinimumQuality" +#define NID_subtreeMinimumQuality 497 +#define OBJ_subtreeMinimumQuality OBJ_pilotAttributeType,51L + +#define LN_subtreeMaximumQuality "subtreeMaximumQuality" +#define NID_subtreeMaximumQuality 498 +#define OBJ_subtreeMaximumQuality OBJ_pilotAttributeType,52L + +#define LN_personalSignature "personalSignature" +#define NID_personalSignature 499 +#define OBJ_personalSignature OBJ_pilotAttributeType,53L + +#define LN_dITRedirect "dITRedirect" +#define NID_dITRedirect 500 +#define OBJ_dITRedirect OBJ_pilotAttributeType,54L + +#define SN_audio "audio" +#define NID_audio 501 +#define OBJ_audio OBJ_pilotAttributeType,55L + +#define LN_documentPublisher "documentPublisher" +#define NID_documentPublisher 502 +#define OBJ_documentPublisher OBJ_pilotAttributeType,56L + +#define SN_id_set "id-set" +#define LN_id_set "Secure Electronic Transactions" +#define NID_id_set 512 +#define OBJ_id_set OBJ_international_organizations,42L + +#define SN_set_ctype "set-ctype" +#define LN_set_ctype "content types" +#define NID_set_ctype 513 +#define OBJ_set_ctype OBJ_id_set,0L + +#define SN_set_msgExt "set-msgExt" +#define LN_set_msgExt "message extensions" +#define NID_set_msgExt 514 +#define OBJ_set_msgExt OBJ_id_set,1L + +#define SN_set_attr "set-attr" +#define NID_set_attr 515 +#define OBJ_set_attr OBJ_id_set,3L + +#define SN_set_policy "set-policy" +#define NID_set_policy 516 +#define OBJ_set_policy OBJ_id_set,5L + +#define SN_set_certExt "set-certExt" +#define LN_set_certExt "certificate extensions" +#define NID_set_certExt 517 +#define OBJ_set_certExt OBJ_id_set,7L + +#define SN_set_brand "set-brand" +#define NID_set_brand 518 +#define OBJ_set_brand OBJ_id_set,8L + +#define SN_setct_PANData "setct-PANData" +#define NID_setct_PANData 519 +#define OBJ_setct_PANData OBJ_set_ctype,0L + +#define SN_setct_PANToken "setct-PANToken" +#define NID_setct_PANToken 520 +#define OBJ_setct_PANToken OBJ_set_ctype,1L + +#define SN_setct_PANOnly "setct-PANOnly" +#define NID_setct_PANOnly 521 +#define OBJ_setct_PANOnly OBJ_set_ctype,2L + +#define SN_setct_OIData "setct-OIData" +#define NID_setct_OIData 522 +#define OBJ_setct_OIData OBJ_set_ctype,3L + +#define SN_setct_PI "setct-PI" +#define NID_setct_PI 523 +#define OBJ_setct_PI OBJ_set_ctype,4L + +#define SN_setct_PIData "setct-PIData" +#define NID_setct_PIData 524 +#define OBJ_setct_PIData OBJ_set_ctype,5L + +#define SN_setct_PIDataUnsigned "setct-PIDataUnsigned" +#define NID_setct_PIDataUnsigned 525 +#define OBJ_setct_PIDataUnsigned OBJ_set_ctype,6L + +#define SN_setct_HODInput "setct-HODInput" +#define NID_setct_HODInput 526 +#define OBJ_setct_HODInput OBJ_set_ctype,7L + +#define SN_setct_AuthResBaggage "setct-AuthResBaggage" +#define NID_setct_AuthResBaggage 527 +#define OBJ_setct_AuthResBaggage OBJ_set_ctype,8L + +#define SN_setct_AuthRevReqBaggage "setct-AuthRevReqBaggage" +#define NID_setct_AuthRevReqBaggage 528 +#define OBJ_setct_AuthRevReqBaggage OBJ_set_ctype,9L + +#define SN_setct_AuthRevResBaggage "setct-AuthRevResBaggage" +#define NID_setct_AuthRevResBaggage 529 +#define OBJ_setct_AuthRevResBaggage OBJ_set_ctype,10L + +#define SN_setct_CapTokenSeq "setct-CapTokenSeq" +#define NID_setct_CapTokenSeq 530 +#define OBJ_setct_CapTokenSeq OBJ_set_ctype,11L + +#define SN_setct_PInitResData "setct-PInitResData" +#define NID_setct_PInitResData 531 +#define OBJ_setct_PInitResData OBJ_set_ctype,12L + +#define SN_setct_PI_TBS "setct-PI-TBS" +#define NID_setct_PI_TBS 532 +#define OBJ_setct_PI_TBS OBJ_set_ctype,13L + +#define SN_setct_PResData "setct-PResData" +#define NID_setct_PResData 533 +#define OBJ_setct_PResData OBJ_set_ctype,14L + +#define SN_setct_AuthReqTBS "setct-AuthReqTBS" +#define NID_setct_AuthReqTBS 534 +#define OBJ_setct_AuthReqTBS OBJ_set_ctype,16L + +#define SN_setct_AuthResTBS "setct-AuthResTBS" +#define NID_setct_AuthResTBS 535 +#define OBJ_setct_AuthResTBS OBJ_set_ctype,17L + +#define SN_setct_AuthResTBSX "setct-AuthResTBSX" +#define NID_setct_AuthResTBSX 536 +#define OBJ_setct_AuthResTBSX OBJ_set_ctype,18L + +#define SN_setct_AuthTokenTBS "setct-AuthTokenTBS" +#define NID_setct_AuthTokenTBS 537 +#define OBJ_setct_AuthTokenTBS OBJ_set_ctype,19L + +#define SN_setct_CapTokenData "setct-CapTokenData" +#define NID_setct_CapTokenData 538 +#define OBJ_setct_CapTokenData OBJ_set_ctype,20L + +#define SN_setct_CapTokenTBS "setct-CapTokenTBS" +#define NID_setct_CapTokenTBS 539 +#define OBJ_setct_CapTokenTBS OBJ_set_ctype,21L + +#define SN_setct_AcqCardCodeMsg "setct-AcqCardCodeMsg" +#define NID_setct_AcqCardCodeMsg 540 +#define OBJ_setct_AcqCardCodeMsg OBJ_set_ctype,22L + +#define SN_setct_AuthRevReqTBS "setct-AuthRevReqTBS" +#define NID_setct_AuthRevReqTBS 541 +#define OBJ_setct_AuthRevReqTBS OBJ_set_ctype,23L + +#define SN_setct_AuthRevResData "setct-AuthRevResData" +#define NID_setct_AuthRevResData 542 +#define OBJ_setct_AuthRevResData OBJ_set_ctype,24L + +#define SN_setct_AuthRevResTBS "setct-AuthRevResTBS" +#define NID_setct_AuthRevResTBS 543 +#define OBJ_setct_AuthRevResTBS OBJ_set_ctype,25L + +#define SN_setct_CapReqTBS "setct-CapReqTBS" +#define NID_setct_CapReqTBS 544 +#define OBJ_setct_CapReqTBS OBJ_set_ctype,26L + +#define SN_setct_CapReqTBSX "setct-CapReqTBSX" +#define NID_setct_CapReqTBSX 545 +#define OBJ_setct_CapReqTBSX OBJ_set_ctype,27L + +#define SN_setct_CapResData "setct-CapResData" +#define NID_setct_CapResData 546 +#define OBJ_setct_CapResData OBJ_set_ctype,28L + +#define SN_setct_CapRevReqTBS "setct-CapRevReqTBS" +#define NID_setct_CapRevReqTBS 547 +#define OBJ_setct_CapRevReqTBS OBJ_set_ctype,29L + +#define SN_setct_CapRevReqTBSX "setct-CapRevReqTBSX" +#define NID_setct_CapRevReqTBSX 548 +#define OBJ_setct_CapRevReqTBSX OBJ_set_ctype,30L + +#define SN_setct_CapRevResData "setct-CapRevResData" +#define NID_setct_CapRevResData 549 +#define OBJ_setct_CapRevResData OBJ_set_ctype,31L + +#define SN_setct_CredReqTBS "setct-CredReqTBS" +#define NID_setct_CredReqTBS 550 +#define OBJ_setct_CredReqTBS OBJ_set_ctype,32L + +#define SN_setct_CredReqTBSX "setct-CredReqTBSX" +#define NID_setct_CredReqTBSX 551 +#define OBJ_setct_CredReqTBSX OBJ_set_ctype,33L + +#define SN_setct_CredResData "setct-CredResData" +#define NID_setct_CredResData 552 +#define OBJ_setct_CredResData OBJ_set_ctype,34L + +#define SN_setct_CredRevReqTBS "setct-CredRevReqTBS" +#define NID_setct_CredRevReqTBS 553 +#define OBJ_setct_CredRevReqTBS OBJ_set_ctype,35L + +#define SN_setct_CredRevReqTBSX "setct-CredRevReqTBSX" +#define NID_setct_CredRevReqTBSX 554 +#define OBJ_setct_CredRevReqTBSX OBJ_set_ctype,36L + +#define SN_setct_CredRevResData "setct-CredRevResData" +#define NID_setct_CredRevResData 555 +#define OBJ_setct_CredRevResData OBJ_set_ctype,37L + +#define SN_setct_PCertReqData "setct-PCertReqData" +#define NID_setct_PCertReqData 556 +#define OBJ_setct_PCertReqData OBJ_set_ctype,38L + +#define SN_setct_PCertResTBS "setct-PCertResTBS" +#define NID_setct_PCertResTBS 557 +#define OBJ_setct_PCertResTBS OBJ_set_ctype,39L + +#define SN_setct_BatchAdminReqData "setct-BatchAdminReqData" +#define NID_setct_BatchAdminReqData 558 +#define OBJ_setct_BatchAdminReqData OBJ_set_ctype,40L + +#define SN_setct_BatchAdminResData "setct-BatchAdminResData" +#define NID_setct_BatchAdminResData 559 +#define OBJ_setct_BatchAdminResData OBJ_set_ctype,41L + +#define SN_setct_CardCInitResTBS "setct-CardCInitResTBS" +#define NID_setct_CardCInitResTBS 560 +#define OBJ_setct_CardCInitResTBS OBJ_set_ctype,42L + +#define SN_setct_MeAqCInitResTBS "setct-MeAqCInitResTBS" +#define NID_setct_MeAqCInitResTBS 561 +#define OBJ_setct_MeAqCInitResTBS OBJ_set_ctype,43L + +#define SN_setct_RegFormResTBS "setct-RegFormResTBS" +#define NID_setct_RegFormResTBS 562 +#define OBJ_setct_RegFormResTBS OBJ_set_ctype,44L + +#define SN_setct_CertReqData "setct-CertReqData" +#define NID_setct_CertReqData 563 +#define OBJ_setct_CertReqData OBJ_set_ctype,45L + +#define SN_setct_CertReqTBS "setct-CertReqTBS" +#define NID_setct_CertReqTBS 564 +#define OBJ_setct_CertReqTBS OBJ_set_ctype,46L + +#define SN_setct_CertResData "setct-CertResData" +#define NID_setct_CertResData 565 +#define OBJ_setct_CertResData OBJ_set_ctype,47L + +#define SN_setct_CertInqReqTBS "setct-CertInqReqTBS" +#define NID_setct_CertInqReqTBS 566 +#define OBJ_setct_CertInqReqTBS OBJ_set_ctype,48L + +#define SN_setct_ErrorTBS "setct-ErrorTBS" +#define NID_setct_ErrorTBS 567 +#define OBJ_setct_ErrorTBS OBJ_set_ctype,49L + +#define SN_setct_PIDualSignedTBE "setct-PIDualSignedTBE" +#define NID_setct_PIDualSignedTBE 568 +#define OBJ_setct_PIDualSignedTBE OBJ_set_ctype,50L + +#define SN_setct_PIUnsignedTBE "setct-PIUnsignedTBE" +#define NID_setct_PIUnsignedTBE 569 +#define OBJ_setct_PIUnsignedTBE OBJ_set_ctype,51L + +#define SN_setct_AuthReqTBE "setct-AuthReqTBE" +#define NID_setct_AuthReqTBE 570 +#define OBJ_setct_AuthReqTBE OBJ_set_ctype,52L + +#define SN_setct_AuthResTBE "setct-AuthResTBE" +#define NID_setct_AuthResTBE 571 +#define OBJ_setct_AuthResTBE OBJ_set_ctype,53L + +#define SN_setct_AuthResTBEX "setct-AuthResTBEX" +#define NID_setct_AuthResTBEX 572 +#define OBJ_setct_AuthResTBEX OBJ_set_ctype,54L + +#define SN_setct_AuthTokenTBE "setct-AuthTokenTBE" +#define NID_setct_AuthTokenTBE 573 +#define OBJ_setct_AuthTokenTBE OBJ_set_ctype,55L + +#define SN_setct_CapTokenTBE "setct-CapTokenTBE" +#define NID_setct_CapTokenTBE 574 +#define OBJ_setct_CapTokenTBE OBJ_set_ctype,56L + +#define SN_setct_CapTokenTBEX "setct-CapTokenTBEX" +#define NID_setct_CapTokenTBEX 575 +#define OBJ_setct_CapTokenTBEX OBJ_set_ctype,57L + +#define SN_setct_AcqCardCodeMsgTBE "setct-AcqCardCodeMsgTBE" +#define NID_setct_AcqCardCodeMsgTBE 576 +#define OBJ_setct_AcqCardCodeMsgTBE OBJ_set_ctype,58L + +#define SN_setct_AuthRevReqTBE "setct-AuthRevReqTBE" +#define NID_setct_AuthRevReqTBE 577 +#define OBJ_setct_AuthRevReqTBE OBJ_set_ctype,59L + +#define SN_setct_AuthRevResTBE "setct-AuthRevResTBE" +#define NID_setct_AuthRevResTBE 578 +#define OBJ_setct_AuthRevResTBE OBJ_set_ctype,60L + +#define SN_setct_AuthRevResTBEB "setct-AuthRevResTBEB" +#define NID_setct_AuthRevResTBEB 579 +#define OBJ_setct_AuthRevResTBEB OBJ_set_ctype,61L + +#define SN_setct_CapReqTBE "setct-CapReqTBE" +#define NID_setct_CapReqTBE 580 +#define OBJ_setct_CapReqTBE OBJ_set_ctype,62L + +#define SN_setct_CapReqTBEX "setct-CapReqTBEX" +#define NID_setct_CapReqTBEX 581 +#define OBJ_setct_CapReqTBEX OBJ_set_ctype,63L + +#define SN_setct_CapResTBE "setct-CapResTBE" +#define NID_setct_CapResTBE 582 +#define OBJ_setct_CapResTBE OBJ_set_ctype,64L + +#define SN_setct_CapRevReqTBE "setct-CapRevReqTBE" +#define NID_setct_CapRevReqTBE 583 +#define OBJ_setct_CapRevReqTBE OBJ_set_ctype,65L + +#define SN_setct_CapRevReqTBEX "setct-CapRevReqTBEX" +#define NID_setct_CapRevReqTBEX 584 +#define OBJ_setct_CapRevReqTBEX OBJ_set_ctype,66L + +#define SN_setct_CapRevResTBE "setct-CapRevResTBE" +#define NID_setct_CapRevResTBE 585 +#define OBJ_setct_CapRevResTBE OBJ_set_ctype,67L + +#define SN_setct_CredReqTBE "setct-CredReqTBE" +#define NID_setct_CredReqTBE 586 +#define OBJ_setct_CredReqTBE OBJ_set_ctype,68L + +#define SN_setct_CredReqTBEX "setct-CredReqTBEX" +#define NID_setct_CredReqTBEX 587 +#define OBJ_setct_CredReqTBEX OBJ_set_ctype,69L + +#define SN_setct_CredResTBE "setct-CredResTBE" +#define NID_setct_CredResTBE 588 +#define OBJ_setct_CredResTBE OBJ_set_ctype,70L + +#define SN_setct_CredRevReqTBE "setct-CredRevReqTBE" +#define NID_setct_CredRevReqTBE 589 +#define OBJ_setct_CredRevReqTBE OBJ_set_ctype,71L + +#define SN_setct_CredRevReqTBEX "setct-CredRevReqTBEX" +#define NID_setct_CredRevReqTBEX 590 +#define OBJ_setct_CredRevReqTBEX OBJ_set_ctype,72L + +#define SN_setct_CredRevResTBE "setct-CredRevResTBE" +#define NID_setct_CredRevResTBE 591 +#define OBJ_setct_CredRevResTBE OBJ_set_ctype,73L + +#define SN_setct_BatchAdminReqTBE "setct-BatchAdminReqTBE" +#define NID_setct_BatchAdminReqTBE 592 +#define OBJ_setct_BatchAdminReqTBE OBJ_set_ctype,74L + +#define SN_setct_BatchAdminResTBE "setct-BatchAdminResTBE" +#define NID_setct_BatchAdminResTBE 593 +#define OBJ_setct_BatchAdminResTBE OBJ_set_ctype,75L + +#define SN_setct_RegFormReqTBE "setct-RegFormReqTBE" +#define NID_setct_RegFormReqTBE 594 +#define OBJ_setct_RegFormReqTBE OBJ_set_ctype,76L + +#define SN_setct_CertReqTBE "setct-CertReqTBE" +#define NID_setct_CertReqTBE 595 +#define OBJ_setct_CertReqTBE OBJ_set_ctype,77L + +#define SN_setct_CertReqTBEX "setct-CertReqTBEX" +#define NID_setct_CertReqTBEX 596 +#define OBJ_setct_CertReqTBEX OBJ_set_ctype,78L + +#define SN_setct_CertResTBE "setct-CertResTBE" +#define NID_setct_CertResTBE 597 +#define OBJ_setct_CertResTBE OBJ_set_ctype,79L + +#define SN_setct_CRLNotificationTBS "setct-CRLNotificationTBS" +#define NID_setct_CRLNotificationTBS 598 +#define OBJ_setct_CRLNotificationTBS OBJ_set_ctype,80L + +#define SN_setct_CRLNotificationResTBS "setct-CRLNotificationResTBS" +#define NID_setct_CRLNotificationResTBS 599 +#define OBJ_setct_CRLNotificationResTBS OBJ_set_ctype,81L + +#define SN_setct_BCIDistributionTBS "setct-BCIDistributionTBS" +#define NID_setct_BCIDistributionTBS 600 +#define OBJ_setct_BCIDistributionTBS OBJ_set_ctype,82L + +#define SN_setext_genCrypt "setext-genCrypt" +#define LN_setext_genCrypt "generic cryptogram" +#define NID_setext_genCrypt 601 +#define OBJ_setext_genCrypt OBJ_set_msgExt,1L + +#define SN_setext_miAuth "setext-miAuth" +#define LN_setext_miAuth "merchant initiated auth" +#define NID_setext_miAuth 602 +#define OBJ_setext_miAuth OBJ_set_msgExt,3L + +#define SN_setext_pinSecure "setext-pinSecure" +#define NID_setext_pinSecure 603 +#define OBJ_setext_pinSecure OBJ_set_msgExt,4L + +#define SN_setext_pinAny "setext-pinAny" +#define NID_setext_pinAny 604 +#define OBJ_setext_pinAny OBJ_set_msgExt,5L + +#define SN_setext_track2 "setext-track2" +#define NID_setext_track2 605 +#define OBJ_setext_track2 OBJ_set_msgExt,7L + +#define SN_setext_cv "setext-cv" +#define LN_setext_cv "additional verification" +#define NID_setext_cv 606 +#define OBJ_setext_cv OBJ_set_msgExt,8L + +#define SN_set_policy_root "set-policy-root" +#define NID_set_policy_root 607 +#define OBJ_set_policy_root OBJ_set_policy,0L + +#define SN_setCext_hashedRoot "setCext-hashedRoot" +#define NID_setCext_hashedRoot 608 +#define OBJ_setCext_hashedRoot OBJ_set_certExt,0L + +#define SN_setCext_certType "setCext-certType" +#define NID_setCext_certType 609 +#define OBJ_setCext_certType OBJ_set_certExt,1L + +#define SN_setCext_merchData "setCext-merchData" +#define NID_setCext_merchData 610 +#define OBJ_setCext_merchData OBJ_set_certExt,2L + +#define SN_setCext_cCertRequired "setCext-cCertRequired" +#define NID_setCext_cCertRequired 611 +#define OBJ_setCext_cCertRequired OBJ_set_certExt,3L + +#define SN_setCext_tunneling "setCext-tunneling" +#define NID_setCext_tunneling 612 +#define OBJ_setCext_tunneling OBJ_set_certExt,4L + +#define SN_setCext_setExt "setCext-setExt" +#define NID_setCext_setExt 613 +#define OBJ_setCext_setExt OBJ_set_certExt,5L + +#define SN_setCext_setQualf "setCext-setQualf" +#define NID_setCext_setQualf 614 +#define OBJ_setCext_setQualf OBJ_set_certExt,6L + +#define SN_setCext_PGWYcapabilities "setCext-PGWYcapabilities" +#define NID_setCext_PGWYcapabilities 615 +#define OBJ_setCext_PGWYcapabilities OBJ_set_certExt,7L + +#define SN_setCext_TokenIdentifier "setCext-TokenIdentifier" +#define NID_setCext_TokenIdentifier 616 +#define OBJ_setCext_TokenIdentifier OBJ_set_certExt,8L + +#define SN_setCext_Track2Data "setCext-Track2Data" +#define NID_setCext_Track2Data 617 +#define OBJ_setCext_Track2Data OBJ_set_certExt,9L + +#define SN_setCext_TokenType "setCext-TokenType" +#define NID_setCext_TokenType 618 +#define OBJ_setCext_TokenType OBJ_set_certExt,10L + +#define SN_setCext_IssuerCapabilities "setCext-IssuerCapabilities" +#define NID_setCext_IssuerCapabilities 619 +#define OBJ_setCext_IssuerCapabilities OBJ_set_certExt,11L + +#define SN_setAttr_Cert "setAttr-Cert" +#define NID_setAttr_Cert 620 +#define OBJ_setAttr_Cert OBJ_set_attr,0L + +#define SN_setAttr_PGWYcap "setAttr-PGWYcap" +#define LN_setAttr_PGWYcap "payment gateway capabilities" +#define NID_setAttr_PGWYcap 621 +#define OBJ_setAttr_PGWYcap OBJ_set_attr,1L + +#define SN_setAttr_TokenType "setAttr-TokenType" +#define NID_setAttr_TokenType 622 +#define OBJ_setAttr_TokenType OBJ_set_attr,2L + +#define SN_setAttr_IssCap "setAttr-IssCap" +#define LN_setAttr_IssCap "issuer capabilities" +#define NID_setAttr_IssCap 623 +#define OBJ_setAttr_IssCap OBJ_set_attr,3L + +#define SN_set_rootKeyThumb "set-rootKeyThumb" +#define NID_set_rootKeyThumb 624 +#define OBJ_set_rootKeyThumb OBJ_setAttr_Cert,0L + +#define SN_set_addPolicy "set-addPolicy" +#define NID_set_addPolicy 625 +#define OBJ_set_addPolicy OBJ_setAttr_Cert,1L + +#define SN_setAttr_Token_EMV "setAttr-Token-EMV" +#define NID_setAttr_Token_EMV 626 +#define OBJ_setAttr_Token_EMV OBJ_setAttr_TokenType,1L + +#define SN_setAttr_Token_B0Prime "setAttr-Token-B0Prime" +#define NID_setAttr_Token_B0Prime 627 +#define OBJ_setAttr_Token_B0Prime OBJ_setAttr_TokenType,2L + +#define SN_setAttr_IssCap_CVM "setAttr-IssCap-CVM" +#define NID_setAttr_IssCap_CVM 628 +#define OBJ_setAttr_IssCap_CVM OBJ_setAttr_IssCap,3L + +#define SN_setAttr_IssCap_T2 "setAttr-IssCap-T2" +#define NID_setAttr_IssCap_T2 629 +#define OBJ_setAttr_IssCap_T2 OBJ_setAttr_IssCap,4L + +#define SN_setAttr_IssCap_Sig "setAttr-IssCap-Sig" +#define NID_setAttr_IssCap_Sig 630 +#define OBJ_setAttr_IssCap_Sig OBJ_setAttr_IssCap,5L + +#define SN_setAttr_GenCryptgrm "setAttr-GenCryptgrm" +#define LN_setAttr_GenCryptgrm "generate cryptogram" +#define NID_setAttr_GenCryptgrm 631 +#define OBJ_setAttr_GenCryptgrm OBJ_setAttr_IssCap_CVM,1L + +#define SN_setAttr_T2Enc "setAttr-T2Enc" +#define LN_setAttr_T2Enc "encrypted track 2" +#define NID_setAttr_T2Enc 632 +#define OBJ_setAttr_T2Enc OBJ_setAttr_IssCap_T2,1L + +#define SN_setAttr_T2cleartxt "setAttr-T2cleartxt" +#define LN_setAttr_T2cleartxt "cleartext track 2" +#define NID_setAttr_T2cleartxt 633 +#define OBJ_setAttr_T2cleartxt OBJ_setAttr_IssCap_T2,2L + +#define SN_setAttr_TokICCsig "setAttr-TokICCsig" +#define LN_setAttr_TokICCsig "ICC or token signature" +#define NID_setAttr_TokICCsig 634 +#define OBJ_setAttr_TokICCsig OBJ_setAttr_IssCap_Sig,1L + +#define SN_setAttr_SecDevSig "setAttr-SecDevSig" +#define LN_setAttr_SecDevSig "secure device signature" +#define NID_setAttr_SecDevSig 635 +#define OBJ_setAttr_SecDevSig OBJ_setAttr_IssCap_Sig,2L + +#define SN_set_brand_IATA_ATA "set-brand-IATA-ATA" +#define NID_set_brand_IATA_ATA 636 +#define OBJ_set_brand_IATA_ATA OBJ_set_brand,1L + +#define SN_set_brand_Diners "set-brand-Diners" +#define NID_set_brand_Diners 637 +#define OBJ_set_brand_Diners OBJ_set_brand,30L + +#define SN_set_brand_AmericanExpress "set-brand-AmericanExpress" +#define NID_set_brand_AmericanExpress 638 +#define OBJ_set_brand_AmericanExpress OBJ_set_brand,34L + +#define SN_set_brand_JCB "set-brand-JCB" +#define NID_set_brand_JCB 639 +#define OBJ_set_brand_JCB OBJ_set_brand,35L + +#define SN_set_brand_Visa "set-brand-Visa" +#define NID_set_brand_Visa 640 +#define OBJ_set_brand_Visa OBJ_set_brand,4L + +#define SN_set_brand_MasterCard "set-brand-MasterCard" +#define NID_set_brand_MasterCard 641 +#define OBJ_set_brand_MasterCard OBJ_set_brand,5L + +#define SN_set_brand_Novus "set-brand-Novus" +#define NID_set_brand_Novus 642 +#define OBJ_set_brand_Novus OBJ_set_brand,6011L + +#define SN_des_cdmf "DES-CDMF" +#define LN_des_cdmf "des-cdmf" +#define NID_des_cdmf 643 +#define OBJ_des_cdmf OBJ_rsadsi,3L,10L + +#define SN_rsaOAEPEncryptionSET "rsaOAEPEncryptionSET" +#define NID_rsaOAEPEncryptionSET 644 +#define OBJ_rsaOAEPEncryptionSET OBJ_rsadsi,1L,1L,6L + +#define SN_ipsec3 "Oakley-EC2N-3" +#define LN_ipsec3 "ipsec3" +#define NID_ipsec3 749 + +#define SN_ipsec4 "Oakley-EC2N-4" +#define LN_ipsec4 "ipsec4" +#define NID_ipsec4 750 + +#define SN_whirlpool "whirlpool" +#define NID_whirlpool 804 +#define OBJ_whirlpool OBJ_iso,0L,10118L,3L,0L,55L + +#define SN_cryptopro "cryptopro" +#define NID_cryptopro 805 +#define OBJ_cryptopro OBJ_member_body,643L,2L,2L + +#define SN_cryptocom "cryptocom" +#define NID_cryptocom 806 +#define OBJ_cryptocom OBJ_member_body,643L,2L,9L + +#define SN_id_tc26 "id-tc26" +#define NID_id_tc26 974 +#define OBJ_id_tc26 OBJ_member_body,643L,7L,1L + +#define SN_id_GostR3411_94_with_GostR3410_2001 "id-GostR3411-94-with-GostR3410-2001" +#define LN_id_GostR3411_94_with_GostR3410_2001 "GOST R 34.11-94 with GOST R 34.10-2001" +#define NID_id_GostR3411_94_with_GostR3410_2001 807 +#define OBJ_id_GostR3411_94_with_GostR3410_2001 OBJ_cryptopro,3L + +#define SN_id_GostR3411_94_with_GostR3410_94 "id-GostR3411-94-with-GostR3410-94" +#define LN_id_GostR3411_94_with_GostR3410_94 "GOST R 34.11-94 with GOST R 34.10-94" +#define NID_id_GostR3411_94_with_GostR3410_94 808 +#define OBJ_id_GostR3411_94_with_GostR3410_94 OBJ_cryptopro,4L + +#define SN_id_GostR3411_94 "md_gost94" +#define LN_id_GostR3411_94 "GOST R 34.11-94" +#define NID_id_GostR3411_94 809 +#define OBJ_id_GostR3411_94 OBJ_cryptopro,9L + +#define SN_id_HMACGostR3411_94 "id-HMACGostR3411-94" +#define LN_id_HMACGostR3411_94 "HMAC GOST 34.11-94" +#define NID_id_HMACGostR3411_94 810 +#define OBJ_id_HMACGostR3411_94 OBJ_cryptopro,10L + +#define SN_id_GostR3410_2001 "gost2001" +#define LN_id_GostR3410_2001 "GOST R 34.10-2001" +#define NID_id_GostR3410_2001 811 +#define OBJ_id_GostR3410_2001 OBJ_cryptopro,19L + +#define SN_id_GostR3410_94 "gost94" +#define LN_id_GostR3410_94 "GOST R 34.10-94" +#define NID_id_GostR3410_94 812 +#define OBJ_id_GostR3410_94 OBJ_cryptopro,20L + +#define SN_id_Gost28147_89 "gost89" +#define LN_id_Gost28147_89 "GOST 28147-89" +#define NID_id_Gost28147_89 813 +#define OBJ_id_Gost28147_89 OBJ_cryptopro,21L + +#define SN_gost89_cnt "gost89-cnt" +#define NID_gost89_cnt 814 + +#define SN_gost89_cnt_12 "gost89-cnt-12" +#define NID_gost89_cnt_12 975 + +#define SN_gost89_cbc "gost89-cbc" +#define NID_gost89_cbc 1009 + +#define SN_gost89_ecb "gost89-ecb" +#define NID_gost89_ecb 1010 + +#define SN_gost89_ctr "gost89-ctr" +#define NID_gost89_ctr 1011 + +#define SN_id_Gost28147_89_MAC "gost-mac" +#define LN_id_Gost28147_89_MAC "GOST 28147-89 MAC" +#define NID_id_Gost28147_89_MAC 815 +#define OBJ_id_Gost28147_89_MAC OBJ_cryptopro,22L + +#define SN_gost_mac_12 "gost-mac-12" +#define NID_gost_mac_12 976 + +#define SN_id_GostR3411_94_prf "prf-gostr3411-94" +#define LN_id_GostR3411_94_prf "GOST R 34.11-94 PRF" +#define NID_id_GostR3411_94_prf 816 +#define OBJ_id_GostR3411_94_prf OBJ_cryptopro,23L + +#define SN_id_GostR3410_2001DH "id-GostR3410-2001DH" +#define LN_id_GostR3410_2001DH "GOST R 34.10-2001 DH" +#define NID_id_GostR3410_2001DH 817 +#define OBJ_id_GostR3410_2001DH OBJ_cryptopro,98L + +#define SN_id_GostR3410_94DH "id-GostR3410-94DH" +#define LN_id_GostR3410_94DH "GOST R 34.10-94 DH" +#define NID_id_GostR3410_94DH 818 +#define OBJ_id_GostR3410_94DH OBJ_cryptopro,99L + +#define SN_id_Gost28147_89_CryptoPro_KeyMeshing "id-Gost28147-89-CryptoPro-KeyMeshing" +#define NID_id_Gost28147_89_CryptoPro_KeyMeshing 819 +#define OBJ_id_Gost28147_89_CryptoPro_KeyMeshing OBJ_cryptopro,14L,1L + +#define SN_id_Gost28147_89_None_KeyMeshing "id-Gost28147-89-None-KeyMeshing" +#define NID_id_Gost28147_89_None_KeyMeshing 820 +#define OBJ_id_Gost28147_89_None_KeyMeshing OBJ_cryptopro,14L,0L + +#define SN_id_GostR3411_94_TestParamSet "id-GostR3411-94-TestParamSet" +#define NID_id_GostR3411_94_TestParamSet 821 +#define OBJ_id_GostR3411_94_TestParamSet OBJ_cryptopro,30L,0L + +#define SN_id_GostR3411_94_CryptoProParamSet "id-GostR3411-94-CryptoProParamSet" +#define NID_id_GostR3411_94_CryptoProParamSet 822 +#define OBJ_id_GostR3411_94_CryptoProParamSet OBJ_cryptopro,30L,1L + +#define SN_id_Gost28147_89_TestParamSet "id-Gost28147-89-TestParamSet" +#define NID_id_Gost28147_89_TestParamSet 823 +#define OBJ_id_Gost28147_89_TestParamSet OBJ_cryptopro,31L,0L + +#define SN_id_Gost28147_89_CryptoPro_A_ParamSet "id-Gost28147-89-CryptoPro-A-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_A_ParamSet 824 +#define OBJ_id_Gost28147_89_CryptoPro_A_ParamSet OBJ_cryptopro,31L,1L + +#define SN_id_Gost28147_89_CryptoPro_B_ParamSet "id-Gost28147-89-CryptoPro-B-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_B_ParamSet 825 +#define OBJ_id_Gost28147_89_CryptoPro_B_ParamSet OBJ_cryptopro,31L,2L + +#define SN_id_Gost28147_89_CryptoPro_C_ParamSet "id-Gost28147-89-CryptoPro-C-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_C_ParamSet 826 +#define OBJ_id_Gost28147_89_CryptoPro_C_ParamSet OBJ_cryptopro,31L,3L + +#define SN_id_Gost28147_89_CryptoPro_D_ParamSet "id-Gost28147-89-CryptoPro-D-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_D_ParamSet 827 +#define OBJ_id_Gost28147_89_CryptoPro_D_ParamSet OBJ_cryptopro,31L,4L + +#define SN_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet 828 +#define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_1_ParamSet OBJ_cryptopro,31L,5L + +#define SN_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet 829 +#define OBJ_id_Gost28147_89_CryptoPro_Oscar_1_0_ParamSet OBJ_cryptopro,31L,6L + +#define SN_id_Gost28147_89_CryptoPro_RIC_1_ParamSet "id-Gost28147-89-CryptoPro-RIC-1-ParamSet" +#define NID_id_Gost28147_89_CryptoPro_RIC_1_ParamSet 830 +#define OBJ_id_Gost28147_89_CryptoPro_RIC_1_ParamSet OBJ_cryptopro,31L,7L + +#define SN_id_GostR3410_94_TestParamSet "id-GostR3410-94-TestParamSet" +#define NID_id_GostR3410_94_TestParamSet 831 +#define OBJ_id_GostR3410_94_TestParamSet OBJ_cryptopro,32L,0L + +#define SN_id_GostR3410_94_CryptoPro_A_ParamSet "id-GostR3410-94-CryptoPro-A-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_A_ParamSet 832 +#define OBJ_id_GostR3410_94_CryptoPro_A_ParamSet OBJ_cryptopro,32L,2L + +#define SN_id_GostR3410_94_CryptoPro_B_ParamSet "id-GostR3410-94-CryptoPro-B-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_B_ParamSet 833 +#define OBJ_id_GostR3410_94_CryptoPro_B_ParamSet OBJ_cryptopro,32L,3L + +#define SN_id_GostR3410_94_CryptoPro_C_ParamSet "id-GostR3410-94-CryptoPro-C-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_C_ParamSet 834 +#define OBJ_id_GostR3410_94_CryptoPro_C_ParamSet OBJ_cryptopro,32L,4L + +#define SN_id_GostR3410_94_CryptoPro_D_ParamSet "id-GostR3410-94-CryptoPro-D-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_D_ParamSet 835 +#define OBJ_id_GostR3410_94_CryptoPro_D_ParamSet OBJ_cryptopro,32L,5L + +#define SN_id_GostR3410_94_CryptoPro_XchA_ParamSet "id-GostR3410-94-CryptoPro-XchA-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_XchA_ParamSet 836 +#define OBJ_id_GostR3410_94_CryptoPro_XchA_ParamSet OBJ_cryptopro,33L,1L + +#define SN_id_GostR3410_94_CryptoPro_XchB_ParamSet "id-GostR3410-94-CryptoPro-XchB-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_XchB_ParamSet 837 +#define OBJ_id_GostR3410_94_CryptoPro_XchB_ParamSet OBJ_cryptopro,33L,2L + +#define SN_id_GostR3410_94_CryptoPro_XchC_ParamSet "id-GostR3410-94-CryptoPro-XchC-ParamSet" +#define NID_id_GostR3410_94_CryptoPro_XchC_ParamSet 838 +#define OBJ_id_GostR3410_94_CryptoPro_XchC_ParamSet OBJ_cryptopro,33L,3L + +#define SN_id_GostR3410_2001_TestParamSet "id-GostR3410-2001-TestParamSet" +#define NID_id_GostR3410_2001_TestParamSet 839 +#define OBJ_id_GostR3410_2001_TestParamSet OBJ_cryptopro,35L,0L + +#define SN_id_GostR3410_2001_CryptoPro_A_ParamSet "id-GostR3410-2001-CryptoPro-A-ParamSet" +#define NID_id_GostR3410_2001_CryptoPro_A_ParamSet 840 +#define OBJ_id_GostR3410_2001_CryptoPro_A_ParamSet OBJ_cryptopro,35L,1L + +#define SN_id_GostR3410_2001_CryptoPro_B_ParamSet "id-GostR3410-2001-CryptoPro-B-ParamSet" +#define NID_id_GostR3410_2001_CryptoPro_B_ParamSet 841 +#define OBJ_id_GostR3410_2001_CryptoPro_B_ParamSet OBJ_cryptopro,35L,2L + +#define SN_id_GostR3410_2001_CryptoPro_C_ParamSet "id-GostR3410-2001-CryptoPro-C-ParamSet" +#define NID_id_GostR3410_2001_CryptoPro_C_ParamSet 842 +#define OBJ_id_GostR3410_2001_CryptoPro_C_ParamSet OBJ_cryptopro,35L,3L + +#define SN_id_GostR3410_2001_CryptoPro_XchA_ParamSet "id-GostR3410-2001-CryptoPro-XchA-ParamSet" +#define NID_id_GostR3410_2001_CryptoPro_XchA_ParamSet 843 +#define OBJ_id_GostR3410_2001_CryptoPro_XchA_ParamSet OBJ_cryptopro,36L,0L + +#define SN_id_GostR3410_2001_CryptoPro_XchB_ParamSet "id-GostR3410-2001-CryptoPro-XchB-ParamSet" +#define NID_id_GostR3410_2001_CryptoPro_XchB_ParamSet 844 +#define OBJ_id_GostR3410_2001_CryptoPro_XchB_ParamSet OBJ_cryptopro,36L,1L + +#define SN_id_GostR3410_94_a "id-GostR3410-94-a" +#define NID_id_GostR3410_94_a 845 +#define OBJ_id_GostR3410_94_a OBJ_id_GostR3410_94,1L + +#define SN_id_GostR3410_94_aBis "id-GostR3410-94-aBis" +#define NID_id_GostR3410_94_aBis 846 +#define OBJ_id_GostR3410_94_aBis OBJ_id_GostR3410_94,2L + +#define SN_id_GostR3410_94_b "id-GostR3410-94-b" +#define NID_id_GostR3410_94_b 847 +#define OBJ_id_GostR3410_94_b OBJ_id_GostR3410_94,3L + +#define SN_id_GostR3410_94_bBis "id-GostR3410-94-bBis" +#define NID_id_GostR3410_94_bBis 848 +#define OBJ_id_GostR3410_94_bBis OBJ_id_GostR3410_94,4L + +#define SN_id_Gost28147_89_cc "id-Gost28147-89-cc" +#define LN_id_Gost28147_89_cc "GOST 28147-89 Cryptocom ParamSet" +#define NID_id_Gost28147_89_cc 849 +#define OBJ_id_Gost28147_89_cc OBJ_cryptocom,1L,6L,1L + +#define SN_id_GostR3410_94_cc "gost94cc" +#define LN_id_GostR3410_94_cc "GOST 34.10-94 Cryptocom" +#define NID_id_GostR3410_94_cc 850 +#define OBJ_id_GostR3410_94_cc OBJ_cryptocom,1L,5L,3L + +#define SN_id_GostR3410_2001_cc "gost2001cc" +#define LN_id_GostR3410_2001_cc "GOST 34.10-2001 Cryptocom" +#define NID_id_GostR3410_2001_cc 851 +#define OBJ_id_GostR3410_2001_cc OBJ_cryptocom,1L,5L,4L + +#define SN_id_GostR3411_94_with_GostR3410_94_cc "id-GostR3411-94-with-GostR3410-94-cc" +#define LN_id_GostR3411_94_with_GostR3410_94_cc "GOST R 34.11-94 with GOST R 34.10-94 Cryptocom" +#define NID_id_GostR3411_94_with_GostR3410_94_cc 852 +#define OBJ_id_GostR3411_94_with_GostR3410_94_cc OBJ_cryptocom,1L,3L,3L + +#define SN_id_GostR3411_94_with_GostR3410_2001_cc "id-GostR3411-94-with-GostR3410-2001-cc" +#define LN_id_GostR3411_94_with_GostR3410_2001_cc "GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom" +#define NID_id_GostR3411_94_with_GostR3410_2001_cc 853 +#define OBJ_id_GostR3411_94_with_GostR3410_2001_cc OBJ_cryptocom,1L,3L,4L + +#define SN_id_GostR3410_2001_ParamSet_cc "id-GostR3410-2001-ParamSet-cc" +#define LN_id_GostR3410_2001_ParamSet_cc "GOST R 3410-2001 Parameter Set Cryptocom" +#define NID_id_GostR3410_2001_ParamSet_cc 854 +#define OBJ_id_GostR3410_2001_ParamSet_cc OBJ_cryptocom,1L,8L,1L + +#define SN_id_tc26_algorithms "id-tc26-algorithms" +#define NID_id_tc26_algorithms 977 +#define OBJ_id_tc26_algorithms OBJ_id_tc26,1L + +#define SN_id_tc26_sign "id-tc26-sign" +#define NID_id_tc26_sign 978 +#define OBJ_id_tc26_sign OBJ_id_tc26_algorithms,1L + +#define SN_id_GostR3410_2012_256 "gost2012_256" +#define LN_id_GostR3410_2012_256 "GOST R 34.10-2012 with 256 bit modulus" +#define NID_id_GostR3410_2012_256 979 +#define OBJ_id_GostR3410_2012_256 OBJ_id_tc26_sign,1L + +#define SN_id_GostR3410_2012_512 "gost2012_512" +#define LN_id_GostR3410_2012_512 "GOST R 34.10-2012 with 512 bit modulus" +#define NID_id_GostR3410_2012_512 980 +#define OBJ_id_GostR3410_2012_512 OBJ_id_tc26_sign,2L + +#define SN_id_tc26_digest "id-tc26-digest" +#define NID_id_tc26_digest 981 +#define OBJ_id_tc26_digest OBJ_id_tc26_algorithms,2L + +#define SN_id_GostR3411_2012_256 "md_gost12_256" +#define LN_id_GostR3411_2012_256 "GOST R 34.11-2012 with 256 bit hash" +#define NID_id_GostR3411_2012_256 982 +#define OBJ_id_GostR3411_2012_256 OBJ_id_tc26_digest,2L + +#define SN_id_GostR3411_2012_512 "md_gost12_512" +#define LN_id_GostR3411_2012_512 "GOST R 34.11-2012 with 512 bit hash" +#define NID_id_GostR3411_2012_512 983 +#define OBJ_id_GostR3411_2012_512 OBJ_id_tc26_digest,3L + +#define SN_id_tc26_signwithdigest "id-tc26-signwithdigest" +#define NID_id_tc26_signwithdigest 984 +#define OBJ_id_tc26_signwithdigest OBJ_id_tc26_algorithms,3L + +#define SN_id_tc26_signwithdigest_gost3410_2012_256 "id-tc26-signwithdigest-gost3410-2012-256" +#define LN_id_tc26_signwithdigest_gost3410_2012_256 "GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)" +#define NID_id_tc26_signwithdigest_gost3410_2012_256 985 +#define OBJ_id_tc26_signwithdigest_gost3410_2012_256 OBJ_id_tc26_signwithdigest,2L + +#define SN_id_tc26_signwithdigest_gost3410_2012_512 "id-tc26-signwithdigest-gost3410-2012-512" +#define LN_id_tc26_signwithdigest_gost3410_2012_512 "GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)" +#define NID_id_tc26_signwithdigest_gost3410_2012_512 986 +#define OBJ_id_tc26_signwithdigest_gost3410_2012_512 OBJ_id_tc26_signwithdigest,3L + +#define SN_id_tc26_mac "id-tc26-mac" +#define NID_id_tc26_mac 987 +#define OBJ_id_tc26_mac OBJ_id_tc26_algorithms,4L + +#define SN_id_tc26_hmac_gost_3411_2012_256 "id-tc26-hmac-gost-3411-2012-256" +#define LN_id_tc26_hmac_gost_3411_2012_256 "HMAC GOST 34.11-2012 256 bit" +#define NID_id_tc26_hmac_gost_3411_2012_256 988 +#define OBJ_id_tc26_hmac_gost_3411_2012_256 OBJ_id_tc26_mac,1L + +#define SN_id_tc26_hmac_gost_3411_2012_512 "id-tc26-hmac-gost-3411-2012-512" +#define LN_id_tc26_hmac_gost_3411_2012_512 "HMAC GOST 34.11-2012 512 bit" +#define NID_id_tc26_hmac_gost_3411_2012_512 989 +#define OBJ_id_tc26_hmac_gost_3411_2012_512 OBJ_id_tc26_mac,2L + +#define SN_id_tc26_cipher "id-tc26-cipher" +#define NID_id_tc26_cipher 990 +#define OBJ_id_tc26_cipher OBJ_id_tc26_algorithms,5L + +#define SN_id_tc26_cipher_gostr3412_2015_magma "id-tc26-cipher-gostr3412-2015-magma" +#define NID_id_tc26_cipher_gostr3412_2015_magma 1173 +#define OBJ_id_tc26_cipher_gostr3412_2015_magma OBJ_id_tc26_cipher,1L + +#define SN_id_tc26_cipher_gostr3412_2015_magma_ctracpkm "id-tc26-cipher-gostr3412-2015-magma-ctracpkm" +#define NID_id_tc26_cipher_gostr3412_2015_magma_ctracpkm 1174 +#define OBJ_id_tc26_cipher_gostr3412_2015_magma_ctracpkm OBJ_id_tc26_cipher_gostr3412_2015_magma,1L + +#define SN_id_tc26_cipher_gostr3412_2015_magma_ctracpkm_omac "id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac" +#define NID_id_tc26_cipher_gostr3412_2015_magma_ctracpkm_omac 1175 +#define OBJ_id_tc26_cipher_gostr3412_2015_magma_ctracpkm_omac OBJ_id_tc26_cipher_gostr3412_2015_magma,2L + +#define SN_id_tc26_cipher_gostr3412_2015_kuznyechik "id-tc26-cipher-gostr3412-2015-kuznyechik" +#define NID_id_tc26_cipher_gostr3412_2015_kuznyechik 1176 +#define OBJ_id_tc26_cipher_gostr3412_2015_kuznyechik OBJ_id_tc26_cipher,2L + +#define SN_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm "id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm" +#define NID_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm 1177 +#define OBJ_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm OBJ_id_tc26_cipher_gostr3412_2015_kuznyechik,1L + +#define SN_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm_omac "id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac" +#define NID_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm_omac 1178 +#define OBJ_id_tc26_cipher_gostr3412_2015_kuznyechik_ctracpkm_omac OBJ_id_tc26_cipher_gostr3412_2015_kuznyechik,2L + +#define SN_id_tc26_agreement "id-tc26-agreement" +#define NID_id_tc26_agreement 991 +#define OBJ_id_tc26_agreement OBJ_id_tc26_algorithms,6L + +#define SN_id_tc26_agreement_gost_3410_2012_256 "id-tc26-agreement-gost-3410-2012-256" +#define NID_id_tc26_agreement_gost_3410_2012_256 992 +#define OBJ_id_tc26_agreement_gost_3410_2012_256 OBJ_id_tc26_agreement,1L + +#define SN_id_tc26_agreement_gost_3410_2012_512 "id-tc26-agreement-gost-3410-2012-512" +#define NID_id_tc26_agreement_gost_3410_2012_512 993 +#define OBJ_id_tc26_agreement_gost_3410_2012_512 OBJ_id_tc26_agreement,2L + +#define SN_id_tc26_wrap "id-tc26-wrap" +#define NID_id_tc26_wrap 1179 +#define OBJ_id_tc26_wrap OBJ_id_tc26_algorithms,7L + +#define SN_id_tc26_wrap_gostr3412_2015_magma "id-tc26-wrap-gostr3412-2015-magma" +#define NID_id_tc26_wrap_gostr3412_2015_magma 1180 +#define OBJ_id_tc26_wrap_gostr3412_2015_magma OBJ_id_tc26_wrap,1L + +#define SN_id_tc26_wrap_gostr3412_2015_magma_kexp15 "id-tc26-wrap-gostr3412-2015-magma-kexp15" +#define NID_id_tc26_wrap_gostr3412_2015_magma_kexp15 1181 +#define OBJ_id_tc26_wrap_gostr3412_2015_magma_kexp15 OBJ_id_tc26_wrap_gostr3412_2015_magma,1L + +#define SN_id_tc26_wrap_gostr3412_2015_kuznyechik "id-tc26-wrap-gostr3412-2015-kuznyechik" +#define NID_id_tc26_wrap_gostr3412_2015_kuznyechik 1182 +#define OBJ_id_tc26_wrap_gostr3412_2015_kuznyechik OBJ_id_tc26_wrap,2L + +#define SN_id_tc26_wrap_gostr3412_2015_kuznyechik_kexp15 "id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15" +#define NID_id_tc26_wrap_gostr3412_2015_kuznyechik_kexp15 1183 +#define OBJ_id_tc26_wrap_gostr3412_2015_kuznyechik_kexp15 OBJ_id_tc26_wrap_gostr3412_2015_kuznyechik,1L + +#define SN_id_tc26_constants "id-tc26-constants" +#define NID_id_tc26_constants 994 +#define OBJ_id_tc26_constants OBJ_id_tc26,2L + +#define SN_id_tc26_sign_constants "id-tc26-sign-constants" +#define NID_id_tc26_sign_constants 995 +#define OBJ_id_tc26_sign_constants OBJ_id_tc26_constants,1L + +#define SN_id_tc26_gost_3410_2012_256_constants "id-tc26-gost-3410-2012-256-constants" +#define NID_id_tc26_gost_3410_2012_256_constants 1147 +#define OBJ_id_tc26_gost_3410_2012_256_constants OBJ_id_tc26_sign_constants,1L + +#define SN_id_tc26_gost_3410_2012_256_paramSetA "id-tc26-gost-3410-2012-256-paramSetA" +#define LN_id_tc26_gost_3410_2012_256_paramSetA "GOST R 34.10-2012 (256 bit) ParamSet A" +#define NID_id_tc26_gost_3410_2012_256_paramSetA 1148 +#define OBJ_id_tc26_gost_3410_2012_256_paramSetA OBJ_id_tc26_gost_3410_2012_256_constants,1L + +#define SN_id_tc26_gost_3410_2012_256_paramSetB "id-tc26-gost-3410-2012-256-paramSetB" +#define LN_id_tc26_gost_3410_2012_256_paramSetB "GOST R 34.10-2012 (256 bit) ParamSet B" +#define NID_id_tc26_gost_3410_2012_256_paramSetB 1184 +#define OBJ_id_tc26_gost_3410_2012_256_paramSetB OBJ_id_tc26_gost_3410_2012_256_constants,2L + +#define SN_id_tc26_gost_3410_2012_256_paramSetC "id-tc26-gost-3410-2012-256-paramSetC" +#define LN_id_tc26_gost_3410_2012_256_paramSetC "GOST R 34.10-2012 (256 bit) ParamSet C" +#define NID_id_tc26_gost_3410_2012_256_paramSetC 1185 +#define OBJ_id_tc26_gost_3410_2012_256_paramSetC OBJ_id_tc26_gost_3410_2012_256_constants,3L + +#define SN_id_tc26_gost_3410_2012_256_paramSetD "id-tc26-gost-3410-2012-256-paramSetD" +#define LN_id_tc26_gost_3410_2012_256_paramSetD "GOST R 34.10-2012 (256 bit) ParamSet D" +#define NID_id_tc26_gost_3410_2012_256_paramSetD 1186 +#define OBJ_id_tc26_gost_3410_2012_256_paramSetD OBJ_id_tc26_gost_3410_2012_256_constants,4L + +#define SN_id_tc26_gost_3410_2012_512_constants "id-tc26-gost-3410-2012-512-constants" +#define NID_id_tc26_gost_3410_2012_512_constants 996 +#define OBJ_id_tc26_gost_3410_2012_512_constants OBJ_id_tc26_sign_constants,2L + +#define SN_id_tc26_gost_3410_2012_512_paramSetTest "id-tc26-gost-3410-2012-512-paramSetTest" +#define LN_id_tc26_gost_3410_2012_512_paramSetTest "GOST R 34.10-2012 (512 bit) testing parameter set" +#define NID_id_tc26_gost_3410_2012_512_paramSetTest 997 +#define OBJ_id_tc26_gost_3410_2012_512_paramSetTest OBJ_id_tc26_gost_3410_2012_512_constants,0L + +#define SN_id_tc26_gost_3410_2012_512_paramSetA "id-tc26-gost-3410-2012-512-paramSetA" +#define LN_id_tc26_gost_3410_2012_512_paramSetA "GOST R 34.10-2012 (512 bit) ParamSet A" +#define NID_id_tc26_gost_3410_2012_512_paramSetA 998 +#define OBJ_id_tc26_gost_3410_2012_512_paramSetA OBJ_id_tc26_gost_3410_2012_512_constants,1L + +#define SN_id_tc26_gost_3410_2012_512_paramSetB "id-tc26-gost-3410-2012-512-paramSetB" +#define LN_id_tc26_gost_3410_2012_512_paramSetB "GOST R 34.10-2012 (512 bit) ParamSet B" +#define NID_id_tc26_gost_3410_2012_512_paramSetB 999 +#define OBJ_id_tc26_gost_3410_2012_512_paramSetB OBJ_id_tc26_gost_3410_2012_512_constants,2L + +#define SN_id_tc26_gost_3410_2012_512_paramSetC "id-tc26-gost-3410-2012-512-paramSetC" +#define LN_id_tc26_gost_3410_2012_512_paramSetC "GOST R 34.10-2012 (512 bit) ParamSet C" +#define NID_id_tc26_gost_3410_2012_512_paramSetC 1149 +#define OBJ_id_tc26_gost_3410_2012_512_paramSetC OBJ_id_tc26_gost_3410_2012_512_constants,3L + +#define SN_id_tc26_digest_constants "id-tc26-digest-constants" +#define NID_id_tc26_digest_constants 1000 +#define OBJ_id_tc26_digest_constants OBJ_id_tc26_constants,2L + +#define SN_id_tc26_cipher_constants "id-tc26-cipher-constants" +#define NID_id_tc26_cipher_constants 1001 +#define OBJ_id_tc26_cipher_constants OBJ_id_tc26_constants,5L + +#define SN_id_tc26_gost_28147_constants "id-tc26-gost-28147-constants" +#define NID_id_tc26_gost_28147_constants 1002 +#define OBJ_id_tc26_gost_28147_constants OBJ_id_tc26_cipher_constants,1L + +#define SN_id_tc26_gost_28147_param_Z "id-tc26-gost-28147-param-Z" +#define LN_id_tc26_gost_28147_param_Z "GOST 28147-89 TC26 parameter set" +#define NID_id_tc26_gost_28147_param_Z 1003 +#define OBJ_id_tc26_gost_28147_param_Z OBJ_id_tc26_gost_28147_constants,1L + +#define SN_INN "INN" +#define LN_INN "INN" +#define NID_INN 1004 +#define OBJ_INN OBJ_member_body,643L,3L,131L,1L,1L + +#define SN_OGRN "OGRN" +#define LN_OGRN "OGRN" +#define NID_OGRN 1005 +#define OBJ_OGRN OBJ_member_body,643L,100L,1L + +#define SN_SNILS "SNILS" +#define LN_SNILS "SNILS" +#define NID_SNILS 1006 +#define OBJ_SNILS OBJ_member_body,643L,100L,3L + +#define SN_subjectSignTool "subjectSignTool" +#define LN_subjectSignTool "Signing Tool of Subject" +#define NID_subjectSignTool 1007 +#define OBJ_subjectSignTool OBJ_member_body,643L,100L,111L + +#define SN_issuerSignTool "issuerSignTool" +#define LN_issuerSignTool "Signing Tool of Issuer" +#define NID_issuerSignTool 1008 +#define OBJ_issuerSignTool OBJ_member_body,643L,100L,112L + +#define SN_grasshopper_ecb "grasshopper-ecb" +#define NID_grasshopper_ecb 1012 + +#define SN_grasshopper_ctr "grasshopper-ctr" +#define NID_grasshopper_ctr 1013 + +#define SN_grasshopper_ofb "grasshopper-ofb" +#define NID_grasshopper_ofb 1014 + +#define SN_grasshopper_cbc "grasshopper-cbc" +#define NID_grasshopper_cbc 1015 + +#define SN_grasshopper_cfb "grasshopper-cfb" +#define NID_grasshopper_cfb 1016 + +#define SN_grasshopper_mac "grasshopper-mac" +#define NID_grasshopper_mac 1017 + +#define SN_magma_ecb "magma-ecb" +#define NID_magma_ecb 1187 + +#define SN_magma_ctr "magma-ctr" +#define NID_magma_ctr 1188 + +#define SN_magma_ofb "magma-ofb" +#define NID_magma_ofb 1189 + +#define SN_magma_cbc "magma-cbc" +#define NID_magma_cbc 1190 + +#define SN_magma_cfb "magma-cfb" +#define NID_magma_cfb 1191 + +#define SN_magma_mac "magma-mac" +#define NID_magma_mac 1192 + +#define SN_camellia_128_cbc "CAMELLIA-128-CBC" +#define LN_camellia_128_cbc "camellia-128-cbc" +#define NID_camellia_128_cbc 751 +#define OBJ_camellia_128_cbc 1L,2L,392L,200011L,61L,1L,1L,1L,2L + +#define SN_camellia_192_cbc "CAMELLIA-192-CBC" +#define LN_camellia_192_cbc "camellia-192-cbc" +#define NID_camellia_192_cbc 752 +#define OBJ_camellia_192_cbc 1L,2L,392L,200011L,61L,1L,1L,1L,3L + +#define SN_camellia_256_cbc "CAMELLIA-256-CBC" +#define LN_camellia_256_cbc "camellia-256-cbc" +#define NID_camellia_256_cbc 753 +#define OBJ_camellia_256_cbc 1L,2L,392L,200011L,61L,1L,1L,1L,4L + +#define SN_id_camellia128_wrap "id-camellia128-wrap" +#define NID_id_camellia128_wrap 907 +#define OBJ_id_camellia128_wrap 1L,2L,392L,200011L,61L,1L,1L,3L,2L + +#define SN_id_camellia192_wrap "id-camellia192-wrap" +#define NID_id_camellia192_wrap 908 +#define OBJ_id_camellia192_wrap 1L,2L,392L,200011L,61L,1L,1L,3L,3L + +#define SN_id_camellia256_wrap "id-camellia256-wrap" +#define NID_id_camellia256_wrap 909 +#define OBJ_id_camellia256_wrap 1L,2L,392L,200011L,61L,1L,1L,3L,4L + +#define OBJ_ntt_ds 0L,3L,4401L,5L + +#define OBJ_camellia OBJ_ntt_ds,3L,1L,9L + +#define SN_camellia_128_ecb "CAMELLIA-128-ECB" +#define LN_camellia_128_ecb "camellia-128-ecb" +#define NID_camellia_128_ecb 754 +#define OBJ_camellia_128_ecb OBJ_camellia,1L + +#define SN_camellia_128_ofb128 "CAMELLIA-128-OFB" +#define LN_camellia_128_ofb128 "camellia-128-ofb" +#define NID_camellia_128_ofb128 766 +#define OBJ_camellia_128_ofb128 OBJ_camellia,3L + +#define SN_camellia_128_cfb128 "CAMELLIA-128-CFB" +#define LN_camellia_128_cfb128 "camellia-128-cfb" +#define NID_camellia_128_cfb128 757 +#define OBJ_camellia_128_cfb128 OBJ_camellia,4L + +#define SN_camellia_128_gcm "CAMELLIA-128-GCM" +#define LN_camellia_128_gcm "camellia-128-gcm" +#define NID_camellia_128_gcm 961 +#define OBJ_camellia_128_gcm OBJ_camellia,6L + +#define SN_camellia_128_ccm "CAMELLIA-128-CCM" +#define LN_camellia_128_ccm "camellia-128-ccm" +#define NID_camellia_128_ccm 962 +#define OBJ_camellia_128_ccm OBJ_camellia,7L + +#define SN_camellia_128_ctr "CAMELLIA-128-CTR" +#define LN_camellia_128_ctr "camellia-128-ctr" +#define NID_camellia_128_ctr 963 +#define OBJ_camellia_128_ctr OBJ_camellia,9L + +#define SN_camellia_128_cmac "CAMELLIA-128-CMAC" +#define LN_camellia_128_cmac "camellia-128-cmac" +#define NID_camellia_128_cmac 964 +#define OBJ_camellia_128_cmac OBJ_camellia,10L + +#define SN_camellia_192_ecb "CAMELLIA-192-ECB" +#define LN_camellia_192_ecb "camellia-192-ecb" +#define NID_camellia_192_ecb 755 +#define OBJ_camellia_192_ecb OBJ_camellia,21L + +#define SN_camellia_192_ofb128 "CAMELLIA-192-OFB" +#define LN_camellia_192_ofb128 "camellia-192-ofb" +#define NID_camellia_192_ofb128 767 +#define OBJ_camellia_192_ofb128 OBJ_camellia,23L + +#define SN_camellia_192_cfb128 "CAMELLIA-192-CFB" +#define LN_camellia_192_cfb128 "camellia-192-cfb" +#define NID_camellia_192_cfb128 758 +#define OBJ_camellia_192_cfb128 OBJ_camellia,24L + +#define SN_camellia_192_gcm "CAMELLIA-192-GCM" +#define LN_camellia_192_gcm "camellia-192-gcm" +#define NID_camellia_192_gcm 965 +#define OBJ_camellia_192_gcm OBJ_camellia,26L + +#define SN_camellia_192_ccm "CAMELLIA-192-CCM" +#define LN_camellia_192_ccm "camellia-192-ccm" +#define NID_camellia_192_ccm 966 +#define OBJ_camellia_192_ccm OBJ_camellia,27L + +#define SN_camellia_192_ctr "CAMELLIA-192-CTR" +#define LN_camellia_192_ctr "camellia-192-ctr" +#define NID_camellia_192_ctr 967 +#define OBJ_camellia_192_ctr OBJ_camellia,29L + +#define SN_camellia_192_cmac "CAMELLIA-192-CMAC" +#define LN_camellia_192_cmac "camellia-192-cmac" +#define NID_camellia_192_cmac 968 +#define OBJ_camellia_192_cmac OBJ_camellia,30L + +#define SN_camellia_256_ecb "CAMELLIA-256-ECB" +#define LN_camellia_256_ecb "camellia-256-ecb" +#define NID_camellia_256_ecb 756 +#define OBJ_camellia_256_ecb OBJ_camellia,41L + +#define SN_camellia_256_ofb128 "CAMELLIA-256-OFB" +#define LN_camellia_256_ofb128 "camellia-256-ofb" +#define NID_camellia_256_ofb128 768 +#define OBJ_camellia_256_ofb128 OBJ_camellia,43L + +#define SN_camellia_256_cfb128 "CAMELLIA-256-CFB" +#define LN_camellia_256_cfb128 "camellia-256-cfb" +#define NID_camellia_256_cfb128 759 +#define OBJ_camellia_256_cfb128 OBJ_camellia,44L + +#define SN_camellia_256_gcm "CAMELLIA-256-GCM" +#define LN_camellia_256_gcm "camellia-256-gcm" +#define NID_camellia_256_gcm 969 +#define OBJ_camellia_256_gcm OBJ_camellia,46L + +#define SN_camellia_256_ccm "CAMELLIA-256-CCM" +#define LN_camellia_256_ccm "camellia-256-ccm" +#define NID_camellia_256_ccm 970 +#define OBJ_camellia_256_ccm OBJ_camellia,47L + +#define SN_camellia_256_ctr "CAMELLIA-256-CTR" +#define LN_camellia_256_ctr "camellia-256-ctr" +#define NID_camellia_256_ctr 971 +#define OBJ_camellia_256_ctr OBJ_camellia,49L + +#define SN_camellia_256_cmac "CAMELLIA-256-CMAC" +#define LN_camellia_256_cmac "camellia-256-cmac" +#define NID_camellia_256_cmac 972 +#define OBJ_camellia_256_cmac OBJ_camellia,50L + +#define SN_camellia_128_cfb1 "CAMELLIA-128-CFB1" +#define LN_camellia_128_cfb1 "camellia-128-cfb1" +#define NID_camellia_128_cfb1 760 + +#define SN_camellia_192_cfb1 "CAMELLIA-192-CFB1" +#define LN_camellia_192_cfb1 "camellia-192-cfb1" +#define NID_camellia_192_cfb1 761 + +#define SN_camellia_256_cfb1 "CAMELLIA-256-CFB1" +#define LN_camellia_256_cfb1 "camellia-256-cfb1" +#define NID_camellia_256_cfb1 762 + +#define SN_camellia_128_cfb8 "CAMELLIA-128-CFB8" +#define LN_camellia_128_cfb8 "camellia-128-cfb8" +#define NID_camellia_128_cfb8 763 + +#define SN_camellia_192_cfb8 "CAMELLIA-192-CFB8" +#define LN_camellia_192_cfb8 "camellia-192-cfb8" +#define NID_camellia_192_cfb8 764 + +#define SN_camellia_256_cfb8 "CAMELLIA-256-CFB8" +#define LN_camellia_256_cfb8 "camellia-256-cfb8" +#define NID_camellia_256_cfb8 765 + +#define OBJ_aria 1L,2L,410L,200046L,1L,1L + +#define SN_aria_128_ecb "ARIA-128-ECB" +#define LN_aria_128_ecb "aria-128-ecb" +#define NID_aria_128_ecb 1065 +#define OBJ_aria_128_ecb OBJ_aria,1L + +#define SN_aria_128_cbc "ARIA-128-CBC" +#define LN_aria_128_cbc "aria-128-cbc" +#define NID_aria_128_cbc 1066 +#define OBJ_aria_128_cbc OBJ_aria,2L + +#define SN_aria_128_cfb128 "ARIA-128-CFB" +#define LN_aria_128_cfb128 "aria-128-cfb" +#define NID_aria_128_cfb128 1067 +#define OBJ_aria_128_cfb128 OBJ_aria,3L + +#define SN_aria_128_ofb128 "ARIA-128-OFB" +#define LN_aria_128_ofb128 "aria-128-ofb" +#define NID_aria_128_ofb128 1068 +#define OBJ_aria_128_ofb128 OBJ_aria,4L + +#define SN_aria_128_ctr "ARIA-128-CTR" +#define LN_aria_128_ctr "aria-128-ctr" +#define NID_aria_128_ctr 1069 +#define OBJ_aria_128_ctr OBJ_aria,5L + +#define SN_aria_192_ecb "ARIA-192-ECB" +#define LN_aria_192_ecb "aria-192-ecb" +#define NID_aria_192_ecb 1070 +#define OBJ_aria_192_ecb OBJ_aria,6L + +#define SN_aria_192_cbc "ARIA-192-CBC" +#define LN_aria_192_cbc "aria-192-cbc" +#define NID_aria_192_cbc 1071 +#define OBJ_aria_192_cbc OBJ_aria,7L + +#define SN_aria_192_cfb128 "ARIA-192-CFB" +#define LN_aria_192_cfb128 "aria-192-cfb" +#define NID_aria_192_cfb128 1072 +#define OBJ_aria_192_cfb128 OBJ_aria,8L + +#define SN_aria_192_ofb128 "ARIA-192-OFB" +#define LN_aria_192_ofb128 "aria-192-ofb" +#define NID_aria_192_ofb128 1073 +#define OBJ_aria_192_ofb128 OBJ_aria,9L + +#define SN_aria_192_ctr "ARIA-192-CTR" +#define LN_aria_192_ctr "aria-192-ctr" +#define NID_aria_192_ctr 1074 +#define OBJ_aria_192_ctr OBJ_aria,10L + +#define SN_aria_256_ecb "ARIA-256-ECB" +#define LN_aria_256_ecb "aria-256-ecb" +#define NID_aria_256_ecb 1075 +#define OBJ_aria_256_ecb OBJ_aria,11L + +#define SN_aria_256_cbc "ARIA-256-CBC" +#define LN_aria_256_cbc "aria-256-cbc" +#define NID_aria_256_cbc 1076 +#define OBJ_aria_256_cbc OBJ_aria,12L + +#define SN_aria_256_cfb128 "ARIA-256-CFB" +#define LN_aria_256_cfb128 "aria-256-cfb" +#define NID_aria_256_cfb128 1077 +#define OBJ_aria_256_cfb128 OBJ_aria,13L + +#define SN_aria_256_ofb128 "ARIA-256-OFB" +#define LN_aria_256_ofb128 "aria-256-ofb" +#define NID_aria_256_ofb128 1078 +#define OBJ_aria_256_ofb128 OBJ_aria,14L + +#define SN_aria_256_ctr "ARIA-256-CTR" +#define LN_aria_256_ctr "aria-256-ctr" +#define NID_aria_256_ctr 1079 +#define OBJ_aria_256_ctr OBJ_aria,15L + +#define SN_aria_128_cfb1 "ARIA-128-CFB1" +#define LN_aria_128_cfb1 "aria-128-cfb1" +#define NID_aria_128_cfb1 1080 + +#define SN_aria_192_cfb1 "ARIA-192-CFB1" +#define LN_aria_192_cfb1 "aria-192-cfb1" +#define NID_aria_192_cfb1 1081 + +#define SN_aria_256_cfb1 "ARIA-256-CFB1" +#define LN_aria_256_cfb1 "aria-256-cfb1" +#define NID_aria_256_cfb1 1082 + +#define SN_aria_128_cfb8 "ARIA-128-CFB8" +#define LN_aria_128_cfb8 "aria-128-cfb8" +#define NID_aria_128_cfb8 1083 + +#define SN_aria_192_cfb8 "ARIA-192-CFB8" +#define LN_aria_192_cfb8 "aria-192-cfb8" +#define NID_aria_192_cfb8 1084 + +#define SN_aria_256_cfb8 "ARIA-256-CFB8" +#define LN_aria_256_cfb8 "aria-256-cfb8" +#define NID_aria_256_cfb8 1085 + +#define SN_aria_128_ccm "ARIA-128-CCM" +#define LN_aria_128_ccm "aria-128-ccm" +#define NID_aria_128_ccm 1120 +#define OBJ_aria_128_ccm OBJ_aria,37L + +#define SN_aria_192_ccm "ARIA-192-CCM" +#define LN_aria_192_ccm "aria-192-ccm" +#define NID_aria_192_ccm 1121 +#define OBJ_aria_192_ccm OBJ_aria,38L + +#define SN_aria_256_ccm "ARIA-256-CCM" +#define LN_aria_256_ccm "aria-256-ccm" +#define NID_aria_256_ccm 1122 +#define OBJ_aria_256_ccm OBJ_aria,39L + +#define SN_aria_128_gcm "ARIA-128-GCM" +#define LN_aria_128_gcm "aria-128-gcm" +#define NID_aria_128_gcm 1123 +#define OBJ_aria_128_gcm OBJ_aria,34L + +#define SN_aria_192_gcm "ARIA-192-GCM" +#define LN_aria_192_gcm "aria-192-gcm" +#define NID_aria_192_gcm 1124 +#define OBJ_aria_192_gcm OBJ_aria,35L + +#define SN_aria_256_gcm "ARIA-256-GCM" +#define LN_aria_256_gcm "aria-256-gcm" +#define NID_aria_256_gcm 1125 +#define OBJ_aria_256_gcm OBJ_aria,36L + +#define SN_kisa "KISA" +#define LN_kisa "kisa" +#define NID_kisa 773 +#define OBJ_kisa OBJ_member_body,410L,200004L + +#define SN_seed_ecb "SEED-ECB" +#define LN_seed_ecb "seed-ecb" +#define NID_seed_ecb 776 +#define OBJ_seed_ecb OBJ_kisa,1L,3L + +#define SN_seed_cbc "SEED-CBC" +#define LN_seed_cbc "seed-cbc" +#define NID_seed_cbc 777 +#define OBJ_seed_cbc OBJ_kisa,1L,4L + +#define SN_seed_cfb128 "SEED-CFB" +#define LN_seed_cfb128 "seed-cfb" +#define NID_seed_cfb128 779 +#define OBJ_seed_cfb128 OBJ_kisa,1L,5L + +#define SN_seed_ofb128 "SEED-OFB" +#define LN_seed_ofb128 "seed-ofb" +#define NID_seed_ofb128 778 +#define OBJ_seed_ofb128 OBJ_kisa,1L,6L + +#define SN_sm4_ecb "SM4-ECB" +#define LN_sm4_ecb "sm4-ecb" +#define NID_sm4_ecb 1133 +#define OBJ_sm4_ecb OBJ_sm_scheme,104L,1L + +#define SN_sm4_cbc "SM4-CBC" +#define LN_sm4_cbc "sm4-cbc" +#define NID_sm4_cbc 1134 +#define OBJ_sm4_cbc OBJ_sm_scheme,104L,2L + +#define SN_sm4_ofb128 "SM4-OFB" +#define LN_sm4_ofb128 "sm4-ofb" +#define NID_sm4_ofb128 1135 +#define OBJ_sm4_ofb128 OBJ_sm_scheme,104L,3L + +#define SN_sm4_cfb128 "SM4-CFB" +#define LN_sm4_cfb128 "sm4-cfb" +#define NID_sm4_cfb128 1137 +#define OBJ_sm4_cfb128 OBJ_sm_scheme,104L,4L + +#define SN_sm4_cfb1 "SM4-CFB1" +#define LN_sm4_cfb1 "sm4-cfb1" +#define NID_sm4_cfb1 1136 +#define OBJ_sm4_cfb1 OBJ_sm_scheme,104L,5L + +#define SN_sm4_cfb8 "SM4-CFB8" +#define LN_sm4_cfb8 "sm4-cfb8" +#define NID_sm4_cfb8 1138 +#define OBJ_sm4_cfb8 OBJ_sm_scheme,104L,6L + +#define SN_sm4_ctr "SM4-CTR" +#define LN_sm4_ctr "sm4-ctr" +#define NID_sm4_ctr 1139 +#define OBJ_sm4_ctr OBJ_sm_scheme,104L,7L + +#define SN_hmac "HMAC" +#define LN_hmac "hmac" +#define NID_hmac 855 + +#define SN_cmac "CMAC" +#define LN_cmac "cmac" +#define NID_cmac 894 + +#define SN_rc4_hmac_md5 "RC4-HMAC-MD5" +#define LN_rc4_hmac_md5 "rc4-hmac-md5" +#define NID_rc4_hmac_md5 915 + +#define SN_aes_128_cbc_hmac_sha1 "AES-128-CBC-HMAC-SHA1" +#define LN_aes_128_cbc_hmac_sha1 "aes-128-cbc-hmac-sha1" +#define NID_aes_128_cbc_hmac_sha1 916 + +#define SN_aes_192_cbc_hmac_sha1 "AES-192-CBC-HMAC-SHA1" +#define LN_aes_192_cbc_hmac_sha1 "aes-192-cbc-hmac-sha1" +#define NID_aes_192_cbc_hmac_sha1 917 + +#define SN_aes_256_cbc_hmac_sha1 "AES-256-CBC-HMAC-SHA1" +#define LN_aes_256_cbc_hmac_sha1 "aes-256-cbc-hmac-sha1" +#define NID_aes_256_cbc_hmac_sha1 918 + +#define SN_aes_128_cbc_hmac_sha256 "AES-128-CBC-HMAC-SHA256" +#define LN_aes_128_cbc_hmac_sha256 "aes-128-cbc-hmac-sha256" +#define NID_aes_128_cbc_hmac_sha256 948 + +#define SN_aes_192_cbc_hmac_sha256 "AES-192-CBC-HMAC-SHA256" +#define LN_aes_192_cbc_hmac_sha256 "aes-192-cbc-hmac-sha256" +#define NID_aes_192_cbc_hmac_sha256 949 + +#define SN_aes_256_cbc_hmac_sha256 "AES-256-CBC-HMAC-SHA256" +#define LN_aes_256_cbc_hmac_sha256 "aes-256-cbc-hmac-sha256" +#define NID_aes_256_cbc_hmac_sha256 950 + +#define SN_chacha20_poly1305 "ChaCha20-Poly1305" +#define LN_chacha20_poly1305 "chacha20-poly1305" +#define NID_chacha20_poly1305 1018 + +#define SN_chacha20 "ChaCha20" +#define LN_chacha20 "chacha20" +#define NID_chacha20 1019 + +#define SN_dhpublicnumber "dhpublicnumber" +#define LN_dhpublicnumber "X9.42 DH" +#define NID_dhpublicnumber 920 +#define OBJ_dhpublicnumber OBJ_ISO_US,10046L,2L,1L + +#define SN_brainpoolP160r1 "brainpoolP160r1" +#define NID_brainpoolP160r1 921 +#define OBJ_brainpoolP160r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,1L + +#define SN_brainpoolP160t1 "brainpoolP160t1" +#define NID_brainpoolP160t1 922 +#define OBJ_brainpoolP160t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,2L + +#define SN_brainpoolP192r1 "brainpoolP192r1" +#define NID_brainpoolP192r1 923 +#define OBJ_brainpoolP192r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,3L + +#define SN_brainpoolP192t1 "brainpoolP192t1" +#define NID_brainpoolP192t1 924 +#define OBJ_brainpoolP192t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,4L + +#define SN_brainpoolP224r1 "brainpoolP224r1" +#define NID_brainpoolP224r1 925 +#define OBJ_brainpoolP224r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,5L + +#define SN_brainpoolP224t1 "brainpoolP224t1" +#define NID_brainpoolP224t1 926 +#define OBJ_brainpoolP224t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,6L + +#define SN_brainpoolP256r1 "brainpoolP256r1" +#define NID_brainpoolP256r1 927 +#define OBJ_brainpoolP256r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,7L + +#define SN_brainpoolP256t1 "brainpoolP256t1" +#define NID_brainpoolP256t1 928 +#define OBJ_brainpoolP256t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,8L + +#define SN_brainpoolP320r1 "brainpoolP320r1" +#define NID_brainpoolP320r1 929 +#define OBJ_brainpoolP320r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,9L + +#define SN_brainpoolP320t1 "brainpoolP320t1" +#define NID_brainpoolP320t1 930 +#define OBJ_brainpoolP320t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,10L + +#define SN_brainpoolP384r1 "brainpoolP384r1" +#define NID_brainpoolP384r1 931 +#define OBJ_brainpoolP384r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,11L + +#define SN_brainpoolP384t1 "brainpoolP384t1" +#define NID_brainpoolP384t1 932 +#define OBJ_brainpoolP384t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,12L + +#define SN_brainpoolP512r1 "brainpoolP512r1" +#define NID_brainpoolP512r1 933 +#define OBJ_brainpoolP512r1 1L,3L,36L,3L,3L,2L,8L,1L,1L,13L + +#define SN_brainpoolP512t1 "brainpoolP512t1" +#define NID_brainpoolP512t1 934 +#define OBJ_brainpoolP512t1 1L,3L,36L,3L,3L,2L,8L,1L,1L,14L + +#define OBJ_x9_63_scheme 1L,3L,133L,16L,840L,63L,0L + +#define OBJ_secg_scheme OBJ_certicom_arc,1L + +#define SN_dhSinglePass_stdDH_sha1kdf_scheme "dhSinglePass-stdDH-sha1kdf-scheme" +#define NID_dhSinglePass_stdDH_sha1kdf_scheme 936 +#define OBJ_dhSinglePass_stdDH_sha1kdf_scheme OBJ_x9_63_scheme,2L + +#define SN_dhSinglePass_stdDH_sha224kdf_scheme "dhSinglePass-stdDH-sha224kdf-scheme" +#define NID_dhSinglePass_stdDH_sha224kdf_scheme 937 +#define OBJ_dhSinglePass_stdDH_sha224kdf_scheme OBJ_secg_scheme,11L,0L + +#define SN_dhSinglePass_stdDH_sha256kdf_scheme "dhSinglePass-stdDH-sha256kdf-scheme" +#define NID_dhSinglePass_stdDH_sha256kdf_scheme 938 +#define OBJ_dhSinglePass_stdDH_sha256kdf_scheme OBJ_secg_scheme,11L,1L + +#define SN_dhSinglePass_stdDH_sha384kdf_scheme "dhSinglePass-stdDH-sha384kdf-scheme" +#define NID_dhSinglePass_stdDH_sha384kdf_scheme 939 +#define OBJ_dhSinglePass_stdDH_sha384kdf_scheme OBJ_secg_scheme,11L,2L + +#define SN_dhSinglePass_stdDH_sha512kdf_scheme "dhSinglePass-stdDH-sha512kdf-scheme" +#define NID_dhSinglePass_stdDH_sha512kdf_scheme 940 +#define OBJ_dhSinglePass_stdDH_sha512kdf_scheme OBJ_secg_scheme,11L,3L + +#define SN_dhSinglePass_cofactorDH_sha1kdf_scheme "dhSinglePass-cofactorDH-sha1kdf-scheme" +#define NID_dhSinglePass_cofactorDH_sha1kdf_scheme 941 +#define OBJ_dhSinglePass_cofactorDH_sha1kdf_scheme OBJ_x9_63_scheme,3L + +#define SN_dhSinglePass_cofactorDH_sha224kdf_scheme "dhSinglePass-cofactorDH-sha224kdf-scheme" +#define NID_dhSinglePass_cofactorDH_sha224kdf_scheme 942 +#define OBJ_dhSinglePass_cofactorDH_sha224kdf_scheme OBJ_secg_scheme,14L,0L + +#define SN_dhSinglePass_cofactorDH_sha256kdf_scheme "dhSinglePass-cofactorDH-sha256kdf-scheme" +#define NID_dhSinglePass_cofactorDH_sha256kdf_scheme 943 +#define OBJ_dhSinglePass_cofactorDH_sha256kdf_scheme OBJ_secg_scheme,14L,1L + +#define SN_dhSinglePass_cofactorDH_sha384kdf_scheme "dhSinglePass-cofactorDH-sha384kdf-scheme" +#define NID_dhSinglePass_cofactorDH_sha384kdf_scheme 944 +#define OBJ_dhSinglePass_cofactorDH_sha384kdf_scheme OBJ_secg_scheme,14L,2L + +#define SN_dhSinglePass_cofactorDH_sha512kdf_scheme "dhSinglePass-cofactorDH-sha512kdf-scheme" +#define NID_dhSinglePass_cofactorDH_sha512kdf_scheme 945 +#define OBJ_dhSinglePass_cofactorDH_sha512kdf_scheme OBJ_secg_scheme,14L,3L + +#define SN_dh_std_kdf "dh-std-kdf" +#define NID_dh_std_kdf 946 + +#define SN_dh_cofactor_kdf "dh-cofactor-kdf" +#define NID_dh_cofactor_kdf 947 + +#define SN_ct_precert_scts "ct_precert_scts" +#define LN_ct_precert_scts "CT Precertificate SCTs" +#define NID_ct_precert_scts 951 +#define OBJ_ct_precert_scts 1L,3L,6L,1L,4L,1L,11129L,2L,4L,2L + +#define SN_ct_precert_poison "ct_precert_poison" +#define LN_ct_precert_poison "CT Precertificate Poison" +#define NID_ct_precert_poison 952 +#define OBJ_ct_precert_poison 1L,3L,6L,1L,4L,1L,11129L,2L,4L,3L + +#define SN_ct_precert_signer "ct_precert_signer" +#define LN_ct_precert_signer "CT Precertificate Signer" +#define NID_ct_precert_signer 953 +#define OBJ_ct_precert_signer 1L,3L,6L,1L,4L,1L,11129L,2L,4L,4L + +#define SN_ct_cert_scts "ct_cert_scts" +#define LN_ct_cert_scts "CT Certificate SCTs" +#define NID_ct_cert_scts 954 +#define OBJ_ct_cert_scts 1L,3L,6L,1L,4L,1L,11129L,2L,4L,5L + +#define SN_jurisdictionLocalityName "jurisdictionL" +#define LN_jurisdictionLocalityName "jurisdictionLocalityName" +#define NID_jurisdictionLocalityName 955 +#define OBJ_jurisdictionLocalityName 1L,3L,6L,1L,4L,1L,311L,60L,2L,1L,1L + +#define SN_jurisdictionStateOrProvinceName "jurisdictionST" +#define LN_jurisdictionStateOrProvinceName "jurisdictionStateOrProvinceName" +#define NID_jurisdictionStateOrProvinceName 956 +#define OBJ_jurisdictionStateOrProvinceName 1L,3L,6L,1L,4L,1L,311L,60L,2L,1L,2L + +#define SN_jurisdictionCountryName "jurisdictionC" +#define LN_jurisdictionCountryName "jurisdictionCountryName" +#define NID_jurisdictionCountryName 957 +#define OBJ_jurisdictionCountryName 1L,3L,6L,1L,4L,1L,311L,60L,2L,1L,3L + +#define SN_id_scrypt "id-scrypt" +#define LN_id_scrypt "scrypt" +#define NID_id_scrypt 973 +#define OBJ_id_scrypt 1L,3L,6L,1L,4L,1L,11591L,4L,11L + +#define SN_tls1_prf "TLS1-PRF" +#define LN_tls1_prf "tls1-prf" +#define NID_tls1_prf 1021 + +#define SN_hkdf "HKDF" +#define LN_hkdf "hkdf" +#define NID_hkdf 1036 + +#define SN_id_pkinit "id-pkinit" +#define NID_id_pkinit 1031 +#define OBJ_id_pkinit 1L,3L,6L,1L,5L,2L,3L + +#define SN_pkInitClientAuth "pkInitClientAuth" +#define LN_pkInitClientAuth "PKINIT Client Auth" +#define NID_pkInitClientAuth 1032 +#define OBJ_pkInitClientAuth OBJ_id_pkinit,4L + +#define SN_pkInitKDC "pkInitKDC" +#define LN_pkInitKDC "Signing KDC Response" +#define NID_pkInitKDC 1033 +#define OBJ_pkInitKDC OBJ_id_pkinit,5L + +#define SN_X25519 "X25519" +#define NID_X25519 1034 +#define OBJ_X25519 1L,3L,101L,110L + +#define SN_X448 "X448" +#define NID_X448 1035 +#define OBJ_X448 1L,3L,101L,111L + +#define SN_ED25519 "ED25519" +#define NID_ED25519 1087 +#define OBJ_ED25519 1L,3L,101L,112L + +#define SN_ED448 "ED448" +#define NID_ED448 1088 +#define OBJ_ED448 1L,3L,101L,113L + +#define SN_kx_rsa "KxRSA" +#define LN_kx_rsa "kx-rsa" +#define NID_kx_rsa 1037 + +#define SN_kx_ecdhe "KxECDHE" +#define LN_kx_ecdhe "kx-ecdhe" +#define NID_kx_ecdhe 1038 + +#define SN_kx_dhe "KxDHE" +#define LN_kx_dhe "kx-dhe" +#define NID_kx_dhe 1039 + +#define SN_kx_ecdhe_psk "KxECDHE-PSK" +#define LN_kx_ecdhe_psk "kx-ecdhe-psk" +#define NID_kx_ecdhe_psk 1040 + +#define SN_kx_dhe_psk "KxDHE-PSK" +#define LN_kx_dhe_psk "kx-dhe-psk" +#define NID_kx_dhe_psk 1041 + +#define SN_kx_rsa_psk "KxRSA_PSK" +#define LN_kx_rsa_psk "kx-rsa-psk" +#define NID_kx_rsa_psk 1042 + +#define SN_kx_psk "KxPSK" +#define LN_kx_psk "kx-psk" +#define NID_kx_psk 1043 + +#define SN_kx_srp "KxSRP" +#define LN_kx_srp "kx-srp" +#define NID_kx_srp 1044 + +#define SN_kx_gost "KxGOST" +#define LN_kx_gost "kx-gost" +#define NID_kx_gost 1045 + +#define SN_kx_any "KxANY" +#define LN_kx_any "kx-any" +#define NID_kx_any 1063 + +#define SN_auth_rsa "AuthRSA" +#define LN_auth_rsa "auth-rsa" +#define NID_auth_rsa 1046 + +#define SN_auth_ecdsa "AuthECDSA" +#define LN_auth_ecdsa "auth-ecdsa" +#define NID_auth_ecdsa 1047 + +#define SN_auth_psk "AuthPSK" +#define LN_auth_psk "auth-psk" +#define NID_auth_psk 1048 + +#define SN_auth_dss "AuthDSS" +#define LN_auth_dss "auth-dss" +#define NID_auth_dss 1049 + +#define SN_auth_gost01 "AuthGOST01" +#define LN_auth_gost01 "auth-gost01" +#define NID_auth_gost01 1050 + +#define SN_auth_gost12 "AuthGOST12" +#define LN_auth_gost12 "auth-gost12" +#define NID_auth_gost12 1051 + +#define SN_auth_srp "AuthSRP" +#define LN_auth_srp "auth-srp" +#define NID_auth_srp 1052 + +#define SN_auth_null "AuthNULL" +#define LN_auth_null "auth-null" +#define NID_auth_null 1053 + +#define SN_auth_any "AuthANY" +#define LN_auth_any "auth-any" +#define NID_auth_any 1064 + +#define SN_poly1305 "Poly1305" +#define LN_poly1305 "poly1305" +#define NID_poly1305 1061 + +#define SN_siphash "SipHash" +#define LN_siphash "siphash" +#define NID_siphash 1062 + +#define SN_ffdhe2048 "ffdhe2048" +#define NID_ffdhe2048 1126 + +#define SN_ffdhe3072 "ffdhe3072" +#define NID_ffdhe3072 1127 + +#define SN_ffdhe4096 "ffdhe4096" +#define NID_ffdhe4096 1128 + +#define SN_ffdhe6144 "ffdhe6144" +#define NID_ffdhe6144 1129 + +#define SN_ffdhe8192 "ffdhe8192" +#define NID_ffdhe8192 1130 + +#define SN_ISO_UA "ISO-UA" +#define NID_ISO_UA 1150 +#define OBJ_ISO_UA OBJ_member_body,804L + +#define SN_ua_pki "ua-pki" +#define NID_ua_pki 1151 +#define OBJ_ua_pki OBJ_ISO_UA,2L,1L,1L,1L + +#define SN_dstu28147 "dstu28147" +#define LN_dstu28147 "DSTU Gost 28147-2009" +#define NID_dstu28147 1152 +#define OBJ_dstu28147 OBJ_ua_pki,1L,1L,1L + +#define SN_dstu28147_ofb "dstu28147-ofb" +#define LN_dstu28147_ofb "DSTU Gost 28147-2009 OFB mode" +#define NID_dstu28147_ofb 1153 +#define OBJ_dstu28147_ofb OBJ_dstu28147,2L + +#define SN_dstu28147_cfb "dstu28147-cfb" +#define LN_dstu28147_cfb "DSTU Gost 28147-2009 CFB mode" +#define NID_dstu28147_cfb 1154 +#define OBJ_dstu28147_cfb OBJ_dstu28147,3L + +#define SN_dstu28147_wrap "dstu28147-wrap" +#define LN_dstu28147_wrap "DSTU Gost 28147-2009 key wrap" +#define NID_dstu28147_wrap 1155 +#define OBJ_dstu28147_wrap OBJ_dstu28147,5L + +#define SN_hmacWithDstu34311 "hmacWithDstu34311" +#define LN_hmacWithDstu34311 "HMAC DSTU Gost 34311-95" +#define NID_hmacWithDstu34311 1156 +#define OBJ_hmacWithDstu34311 OBJ_ua_pki,1L,1L,2L + +#define SN_dstu34311 "dstu34311" +#define LN_dstu34311 "DSTU Gost 34311-95" +#define NID_dstu34311 1157 +#define OBJ_dstu34311 OBJ_ua_pki,1L,2L,1L + +#define SN_dstu4145le "dstu4145le" +#define LN_dstu4145le "DSTU 4145-2002 little endian" +#define NID_dstu4145le 1158 +#define OBJ_dstu4145le OBJ_ua_pki,1L,3L,1L,1L + +#define SN_dstu4145be "dstu4145be" +#define LN_dstu4145be "DSTU 4145-2002 big endian" +#define NID_dstu4145be 1159 +#define OBJ_dstu4145be OBJ_dstu4145le,1L,1L + +#define SN_uacurve0 "uacurve0" +#define LN_uacurve0 "DSTU curve 0" +#define NID_uacurve0 1160 +#define OBJ_uacurve0 OBJ_dstu4145le,2L,0L + +#define SN_uacurve1 "uacurve1" +#define LN_uacurve1 "DSTU curve 1" +#define NID_uacurve1 1161 +#define OBJ_uacurve1 OBJ_dstu4145le,2L,1L + +#define SN_uacurve2 "uacurve2" +#define LN_uacurve2 "DSTU curve 2" +#define NID_uacurve2 1162 +#define OBJ_uacurve2 OBJ_dstu4145le,2L,2L + +#define SN_uacurve3 "uacurve3" +#define LN_uacurve3 "DSTU curve 3" +#define NID_uacurve3 1163 +#define OBJ_uacurve3 OBJ_dstu4145le,2L,3L + +#define SN_uacurve4 "uacurve4" +#define LN_uacurve4 "DSTU curve 4" +#define NID_uacurve4 1164 +#define OBJ_uacurve4 OBJ_dstu4145le,2L,4L + +#define SN_uacurve5 "uacurve5" +#define LN_uacurve5 "DSTU curve 5" +#define NID_uacurve5 1165 +#define OBJ_uacurve5 OBJ_dstu4145le,2L,5L + +#define SN_uacurve6 "uacurve6" +#define LN_uacurve6 "DSTU curve 6" +#define NID_uacurve6 1166 +#define OBJ_uacurve6 OBJ_dstu4145le,2L,6L + +#define SN_uacurve7 "uacurve7" +#define LN_uacurve7 "DSTU curve 7" +#define NID_uacurve7 1167 +#define OBJ_uacurve7 OBJ_dstu4145le,2L,7L + +#define SN_uacurve8 "uacurve8" +#define LN_uacurve8 "DSTU curve 8" +#define NID_uacurve8 1168 +#define OBJ_uacurve8 OBJ_dstu4145le,2L,8L + +#define SN_uacurve9 "uacurve9" +#define LN_uacurve9 "DSTU curve 9" +#define NID_uacurve9 1169 +#define OBJ_uacurve9 OBJ_dstu4145le,2L,9L diff --git a/third_party/openssl/uos/sw_64/include/openssl/objects.h b/third_party/openssl/uos/sw_64/include/openssl/objects.h new file mode 100644 index 00000000..5e8b5762 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/objects.h @@ -0,0 +1,175 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OBJECTS_H +# define HEADER_OBJECTS_H + +# include +# include +# include +# include + +# define OBJ_NAME_TYPE_UNDEF 0x00 +# define OBJ_NAME_TYPE_MD_METH 0x01 +# define OBJ_NAME_TYPE_CIPHER_METH 0x02 +# define OBJ_NAME_TYPE_PKEY_METH 0x03 +# define OBJ_NAME_TYPE_COMP_METH 0x04 +# define OBJ_NAME_TYPE_NUM 0x05 + +# define OBJ_NAME_ALIAS 0x8000 + +# define OBJ_BSEARCH_VALUE_ON_NOMATCH 0x01 +# define OBJ_BSEARCH_FIRST_VALUE_ON_MATCH 0x02 + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct obj_name_st { + int type; + int alias; + const char *name; + const char *data; +} OBJ_NAME; + +# define OBJ_create_and_add_object(a,b,c) OBJ_create(a,b,c) + +int OBJ_NAME_init(void); +int OBJ_NAME_new_index(unsigned long (*hash_func) (const char *), + int (*cmp_func) (const char *, const char *), + void (*free_func) (const char *, int, const char *)); +const char *OBJ_NAME_get(const char *name, int type); +int OBJ_NAME_add(const char *name, int type, const char *data); +int OBJ_NAME_remove(const char *name, int type); +void OBJ_NAME_cleanup(int type); /* -1 for everything */ +void OBJ_NAME_do_all(int type, void (*fn) (const OBJ_NAME *, void *arg), + void *arg); +void OBJ_NAME_do_all_sorted(int type, + void (*fn) (const OBJ_NAME *, void *arg), + void *arg); + +ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o); +ASN1_OBJECT *OBJ_nid2obj(int n); +const char *OBJ_nid2ln(int n); +const char *OBJ_nid2sn(int n); +int OBJ_obj2nid(const ASN1_OBJECT *o); +ASN1_OBJECT *OBJ_txt2obj(const char *s, int no_name); +int OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name); +int OBJ_txt2nid(const char *s); +int OBJ_ln2nid(const char *s); +int OBJ_sn2nid(const char *s); +int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b); +const void *OBJ_bsearch_(const void *key, const void *base, int num, int size, + int (*cmp) (const void *, const void *)); +const void *OBJ_bsearch_ex_(const void *key, const void *base, int num, + int size, + int (*cmp) (const void *, const void *), + int flags); + +# define _DECLARE_OBJ_BSEARCH_CMP_FN(scope, type1, type2, nm) \ + static int nm##_cmp_BSEARCH_CMP_FN(const void *, const void *); \ + static int nm##_cmp(type1 const *, type2 const *); \ + scope type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) + +# define DECLARE_OBJ_BSEARCH_CMP_FN(type1, type2, cmp) \ + _DECLARE_OBJ_BSEARCH_CMP_FN(static, type1, type2, cmp) +# define DECLARE_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm) \ + type2 * OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) + +/*- + * Unsolved problem: if a type is actually a pointer type, like + * nid_triple is, then its impossible to get a const where you need + * it. Consider: + * + * typedef int nid_triple[3]; + * const void *a_; + * const nid_triple const *a = a_; + * + * The assignment discards a const because what you really want is: + * + * const int const * const *a = a_; + * + * But if you do that, you lose the fact that a is an array of 3 ints, + * which breaks comparison functions. + * + * Thus we end up having to cast, sadly, or unpack the + * declarations. Or, as I finally did in this case, declare nid_triple + * to be a struct, which it should have been in the first place. + * + * Ben, August 2008. + * + * Also, strictly speaking not all types need be const, but handling + * the non-constness means a lot of complication, and in practice + * comparison routines do always not touch their arguments. + */ + +# define IMPLEMENT_OBJ_BSEARCH_CMP_FN(type1, type2, nm) \ + static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_) \ + { \ + type1 const *a = a_; \ + type2 const *b = b_; \ + return nm##_cmp(a,b); \ + } \ + static type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \ + { \ + return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \ + nm##_cmp_BSEARCH_CMP_FN); \ + } \ + extern void dummy_prototype(void) + +# define IMPLEMENT_OBJ_BSEARCH_GLOBAL_CMP_FN(type1, type2, nm) \ + static int nm##_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_) \ + { \ + type1 const *a = a_; \ + type2 const *b = b_; \ + return nm##_cmp(a,b); \ + } \ + type2 *OBJ_bsearch_##nm(type1 *key, type2 const *base, int num) \ + { \ + return (type2 *)OBJ_bsearch_(key, base, num, sizeof(type2), \ + nm##_cmp_BSEARCH_CMP_FN); \ + } \ + extern void dummy_prototype(void) + +# define OBJ_bsearch(type1,key,type2,base,num,cmp) \ + ((type2 *)OBJ_bsearch_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \ + num,sizeof(type2), \ + ((void)CHECKED_PTR_OF(type1,cmp##_type_1), \ + (void)CHECKED_PTR_OF(type2,cmp##_type_2), \ + cmp##_BSEARCH_CMP_FN))) + +# define OBJ_bsearch_ex(type1,key,type2,base,num,cmp,flags) \ + ((type2 *)OBJ_bsearch_ex_(CHECKED_PTR_OF(type1,key),CHECKED_PTR_OF(type2,base), \ + num,sizeof(type2), \ + ((void)CHECKED_PTR_OF(type1,cmp##_type_1), \ + (void)type_2=CHECKED_PTR_OF(type2,cmp##_type_2), \ + cmp##_BSEARCH_CMP_FN)),flags) + +int OBJ_new_nid(int num); +int OBJ_add_object(const ASN1_OBJECT *obj); +int OBJ_create(const char *oid, const char *sn, const char *ln); +#if OPENSSL_API_COMPAT < 0x10100000L +# define OBJ_cleanup() while(0) continue +#endif +int OBJ_create_objects(BIO *in); + +size_t OBJ_length(const ASN1_OBJECT *obj); +const unsigned char *OBJ_get0_data(const ASN1_OBJECT *obj); + +int OBJ_find_sigid_algs(int signid, int *pdig_nid, int *ppkey_nid); +int OBJ_find_sigid_by_algs(int *psignid, int dig_nid, int pkey_nid); +int OBJ_add_sigid(int signid, int dig_id, int pkey_id); +void OBJ_sigid_free(void); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/objectserr.h b/third_party/openssl/uos/sw_64/include/openssl/objectserr.h new file mode 100644 index 00000000..02e166f1 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/objectserr.h @@ -0,0 +1,42 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OBJERR_H +# define HEADER_OBJERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_OBJ_strings(void); + +/* + * OBJ function codes. + */ +# define OBJ_F_OBJ_ADD_OBJECT 105 +# define OBJ_F_OBJ_ADD_SIGID 107 +# define OBJ_F_OBJ_CREATE 100 +# define OBJ_F_OBJ_DUP 101 +# define OBJ_F_OBJ_NAME_NEW_INDEX 106 +# define OBJ_F_OBJ_NID2LN 102 +# define OBJ_F_OBJ_NID2OBJ 103 +# define OBJ_F_OBJ_NID2SN 104 +# define OBJ_F_OBJ_TXT2OBJ 108 + +/* + * OBJ reason codes. + */ +# define OBJ_R_OID_EXISTS 102 +# define OBJ_R_UNKNOWN_NID 101 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ocsp.h b/third_party/openssl/uos/sw_64/include/openssl/ocsp.h new file mode 100644 index 00000000..4d759a49 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ocsp.h @@ -0,0 +1,352 @@ +/* + * Copyright 2000-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OCSP_H +# define HEADER_OCSP_H + +#include + +/* + * These definitions are outside the OPENSSL_NO_OCSP guard because although for + * historical reasons they have OCSP_* names, they can actually be used + * independently of OCSP. E.g. see RFC5280 + */ +/*- + * CRLReason ::= ENUMERATED { + * unspecified (0), + * keyCompromise (1), + * cACompromise (2), + * affiliationChanged (3), + * superseded (4), + * cessationOfOperation (5), + * certificateHold (6), + * removeFromCRL (8) } + */ +# define OCSP_REVOKED_STATUS_NOSTATUS -1 +# define OCSP_REVOKED_STATUS_UNSPECIFIED 0 +# define OCSP_REVOKED_STATUS_KEYCOMPROMISE 1 +# define OCSP_REVOKED_STATUS_CACOMPROMISE 2 +# define OCSP_REVOKED_STATUS_AFFILIATIONCHANGED 3 +# define OCSP_REVOKED_STATUS_SUPERSEDED 4 +# define OCSP_REVOKED_STATUS_CESSATIONOFOPERATION 5 +# define OCSP_REVOKED_STATUS_CERTIFICATEHOLD 6 +# define OCSP_REVOKED_STATUS_REMOVEFROMCRL 8 + + +# ifndef OPENSSL_NO_OCSP + +# include +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Various flags and values */ + +# define OCSP_DEFAULT_NONCE_LENGTH 16 + +# define OCSP_NOCERTS 0x1 +# define OCSP_NOINTERN 0x2 +# define OCSP_NOSIGS 0x4 +# define OCSP_NOCHAIN 0x8 +# define OCSP_NOVERIFY 0x10 +# define OCSP_NOEXPLICIT 0x20 +# define OCSP_NOCASIGN 0x40 +# define OCSP_NODELEGATED 0x80 +# define OCSP_NOCHECKS 0x100 +# define OCSP_TRUSTOTHER 0x200 +# define OCSP_RESPID_KEY 0x400 +# define OCSP_NOTIME 0x800 + +typedef struct ocsp_cert_id_st OCSP_CERTID; + +DEFINE_STACK_OF(OCSP_CERTID) + +typedef struct ocsp_one_request_st OCSP_ONEREQ; + +DEFINE_STACK_OF(OCSP_ONEREQ) + +typedef struct ocsp_req_info_st OCSP_REQINFO; +typedef struct ocsp_signature_st OCSP_SIGNATURE; +typedef struct ocsp_request_st OCSP_REQUEST; + +# define OCSP_RESPONSE_STATUS_SUCCESSFUL 0 +# define OCSP_RESPONSE_STATUS_MALFORMEDREQUEST 1 +# define OCSP_RESPONSE_STATUS_INTERNALERROR 2 +# define OCSP_RESPONSE_STATUS_TRYLATER 3 +# define OCSP_RESPONSE_STATUS_SIGREQUIRED 5 +# define OCSP_RESPONSE_STATUS_UNAUTHORIZED 6 + +typedef struct ocsp_resp_bytes_st OCSP_RESPBYTES; + +# define V_OCSP_RESPID_NAME 0 +# define V_OCSP_RESPID_KEY 1 + +DEFINE_STACK_OF(OCSP_RESPID) + +typedef struct ocsp_revoked_info_st OCSP_REVOKEDINFO; + +# define V_OCSP_CERTSTATUS_GOOD 0 +# define V_OCSP_CERTSTATUS_REVOKED 1 +# define V_OCSP_CERTSTATUS_UNKNOWN 2 + +typedef struct ocsp_cert_status_st OCSP_CERTSTATUS; +typedef struct ocsp_single_response_st OCSP_SINGLERESP; + +DEFINE_STACK_OF(OCSP_SINGLERESP) + +typedef struct ocsp_response_data_st OCSP_RESPDATA; + +typedef struct ocsp_basic_response_st OCSP_BASICRESP; + +typedef struct ocsp_crl_id_st OCSP_CRLID; +typedef struct ocsp_service_locator_st OCSP_SERVICELOC; + +# define PEM_STRING_OCSP_REQUEST "OCSP REQUEST" +# define PEM_STRING_OCSP_RESPONSE "OCSP RESPONSE" + +# define d2i_OCSP_REQUEST_bio(bp,p) ASN1_d2i_bio_of(OCSP_REQUEST,OCSP_REQUEST_new,d2i_OCSP_REQUEST,bp,p) + +# define d2i_OCSP_RESPONSE_bio(bp,p) ASN1_d2i_bio_of(OCSP_RESPONSE,OCSP_RESPONSE_new,d2i_OCSP_RESPONSE,bp,p) + +# define PEM_read_bio_OCSP_REQUEST(bp,x,cb) (OCSP_REQUEST *)PEM_ASN1_read_bio( \ + (char *(*)())d2i_OCSP_REQUEST,PEM_STRING_OCSP_REQUEST, \ + bp,(char **)(x),cb,NULL) + +# define PEM_read_bio_OCSP_RESPONSE(bp,x,cb) (OCSP_RESPONSE *)PEM_ASN1_read_bio(\ + (char *(*)())d2i_OCSP_RESPONSE,PEM_STRING_OCSP_RESPONSE, \ + bp,(char **)(x),cb,NULL) + +# define PEM_write_bio_OCSP_REQUEST(bp,o) \ + PEM_ASN1_write_bio((int (*)())i2d_OCSP_REQUEST,PEM_STRING_OCSP_REQUEST,\ + bp,(char *)(o), NULL,NULL,0,NULL,NULL) + +# define PEM_write_bio_OCSP_RESPONSE(bp,o) \ + PEM_ASN1_write_bio((int (*)())i2d_OCSP_RESPONSE,PEM_STRING_OCSP_RESPONSE,\ + bp,(char *)(o), NULL,NULL,0,NULL,NULL) + +# define i2d_OCSP_RESPONSE_bio(bp,o) ASN1_i2d_bio_of(OCSP_RESPONSE,i2d_OCSP_RESPONSE,bp,o) + +# define i2d_OCSP_REQUEST_bio(bp,o) ASN1_i2d_bio_of(OCSP_REQUEST,i2d_OCSP_REQUEST,bp,o) + +# define ASN1_BIT_STRING_digest(data,type,md,len) \ + ASN1_item_digest(ASN1_ITEM_rptr(ASN1_BIT_STRING),type,data,md,len) + +# define OCSP_CERTSTATUS_dup(cs)\ + (OCSP_CERTSTATUS*)ASN1_dup((int(*)())i2d_OCSP_CERTSTATUS,\ + (char *(*)())d2i_OCSP_CERTSTATUS,(char *)(cs)) + +OCSP_CERTID *OCSP_CERTID_dup(OCSP_CERTID *id); + +OCSP_RESPONSE *OCSP_sendreq_bio(BIO *b, const char *path, OCSP_REQUEST *req); +OCSP_REQ_CTX *OCSP_sendreq_new(BIO *io, const char *path, OCSP_REQUEST *req, + int maxline); +int OCSP_REQ_CTX_nbio(OCSP_REQ_CTX *rctx); +int OCSP_sendreq_nbio(OCSP_RESPONSE **presp, OCSP_REQ_CTX *rctx); +OCSP_REQ_CTX *OCSP_REQ_CTX_new(BIO *io, int maxline); +void OCSP_REQ_CTX_free(OCSP_REQ_CTX *rctx); +void OCSP_set_max_response_length(OCSP_REQ_CTX *rctx, unsigned long len); +int OCSP_REQ_CTX_i2d(OCSP_REQ_CTX *rctx, const ASN1_ITEM *it, + ASN1_VALUE *val); +int OCSP_REQ_CTX_nbio_d2i(OCSP_REQ_CTX *rctx, ASN1_VALUE **pval, + const ASN1_ITEM *it); +BIO *OCSP_REQ_CTX_get0_mem_bio(OCSP_REQ_CTX *rctx); +int OCSP_REQ_CTX_http(OCSP_REQ_CTX *rctx, const char *op, const char *path); +int OCSP_REQ_CTX_set1_req(OCSP_REQ_CTX *rctx, OCSP_REQUEST *req); +int OCSP_REQ_CTX_add1_header(OCSP_REQ_CTX *rctx, + const char *name, const char *value); + +OCSP_CERTID *OCSP_cert_to_id(const EVP_MD *dgst, const X509 *subject, + const X509 *issuer); + +OCSP_CERTID *OCSP_cert_id_new(const EVP_MD *dgst, + const X509_NAME *issuerName, + const ASN1_BIT_STRING *issuerKey, + const ASN1_INTEGER *serialNumber); + +OCSP_ONEREQ *OCSP_request_add0_id(OCSP_REQUEST *req, OCSP_CERTID *cid); + +int OCSP_request_add1_nonce(OCSP_REQUEST *req, unsigned char *val, int len); +int OCSP_basic_add1_nonce(OCSP_BASICRESP *resp, unsigned char *val, int len); +int OCSP_check_nonce(OCSP_REQUEST *req, OCSP_BASICRESP *bs); +int OCSP_copy_nonce(OCSP_BASICRESP *resp, OCSP_REQUEST *req); + +int OCSP_request_set1_name(OCSP_REQUEST *req, X509_NAME *nm); +int OCSP_request_add1_cert(OCSP_REQUEST *req, X509 *cert); + +int OCSP_request_sign(OCSP_REQUEST *req, + X509 *signer, + EVP_PKEY *key, + const EVP_MD *dgst, + STACK_OF(X509) *certs, unsigned long flags); + +int OCSP_response_status(OCSP_RESPONSE *resp); +OCSP_BASICRESP *OCSP_response_get1_basic(OCSP_RESPONSE *resp); + +const ASN1_OCTET_STRING *OCSP_resp_get0_signature(const OCSP_BASICRESP *bs); +const X509_ALGOR *OCSP_resp_get0_tbs_sigalg(const OCSP_BASICRESP *bs); +const OCSP_RESPDATA *OCSP_resp_get0_respdata(const OCSP_BASICRESP *bs); +int OCSP_resp_get0_signer(OCSP_BASICRESP *bs, X509 **signer, + STACK_OF(X509) *extra_certs); + +int OCSP_resp_count(OCSP_BASICRESP *bs); +OCSP_SINGLERESP *OCSP_resp_get0(OCSP_BASICRESP *bs, int idx); +const ASN1_GENERALIZEDTIME *OCSP_resp_get0_produced_at(const OCSP_BASICRESP* bs); +const STACK_OF(X509) *OCSP_resp_get0_certs(const OCSP_BASICRESP *bs); +int OCSP_resp_get0_id(const OCSP_BASICRESP *bs, + const ASN1_OCTET_STRING **pid, + const X509_NAME **pname); +int OCSP_resp_get1_id(const OCSP_BASICRESP *bs, + ASN1_OCTET_STRING **pid, + X509_NAME **pname); + +int OCSP_resp_find(OCSP_BASICRESP *bs, OCSP_CERTID *id, int last); +int OCSP_single_get0_status(OCSP_SINGLERESP *single, int *reason, + ASN1_GENERALIZEDTIME **revtime, + ASN1_GENERALIZEDTIME **thisupd, + ASN1_GENERALIZEDTIME **nextupd); +int OCSP_resp_find_status(OCSP_BASICRESP *bs, OCSP_CERTID *id, int *status, + int *reason, + ASN1_GENERALIZEDTIME **revtime, + ASN1_GENERALIZEDTIME **thisupd, + ASN1_GENERALIZEDTIME **nextupd); +int OCSP_check_validity(ASN1_GENERALIZEDTIME *thisupd, + ASN1_GENERALIZEDTIME *nextupd, long sec, long maxsec); + +int OCSP_request_verify(OCSP_REQUEST *req, STACK_OF(X509) *certs, + X509_STORE *store, unsigned long flags); + +int OCSP_parse_url(const char *url, char **phost, char **pport, char **ppath, + int *pssl); + +int OCSP_id_issuer_cmp(const OCSP_CERTID *a, const OCSP_CERTID *b); +int OCSP_id_cmp(const OCSP_CERTID *a, const OCSP_CERTID *b); + +int OCSP_request_onereq_count(OCSP_REQUEST *req); +OCSP_ONEREQ *OCSP_request_onereq_get0(OCSP_REQUEST *req, int i); +OCSP_CERTID *OCSP_onereq_get0_id(OCSP_ONEREQ *one); +int OCSP_id_get0_info(ASN1_OCTET_STRING **piNameHash, ASN1_OBJECT **pmd, + ASN1_OCTET_STRING **pikeyHash, + ASN1_INTEGER **pserial, OCSP_CERTID *cid); +int OCSP_request_is_signed(OCSP_REQUEST *req); +OCSP_RESPONSE *OCSP_response_create(int status, OCSP_BASICRESP *bs); +OCSP_SINGLERESP *OCSP_basic_add1_status(OCSP_BASICRESP *rsp, + OCSP_CERTID *cid, + int status, int reason, + ASN1_TIME *revtime, + ASN1_TIME *thisupd, + ASN1_TIME *nextupd); +int OCSP_basic_add1_cert(OCSP_BASICRESP *resp, X509 *cert); +int OCSP_basic_sign(OCSP_BASICRESP *brsp, + X509 *signer, EVP_PKEY *key, const EVP_MD *dgst, + STACK_OF(X509) *certs, unsigned long flags); +int OCSP_basic_sign_ctx(OCSP_BASICRESP *brsp, + X509 *signer, EVP_MD_CTX *ctx, + STACK_OF(X509) *certs, unsigned long flags); +int OCSP_RESPID_set_by_name(OCSP_RESPID *respid, X509 *cert); +int OCSP_RESPID_set_by_key(OCSP_RESPID *respid, X509 *cert); +int OCSP_RESPID_match(OCSP_RESPID *respid, X509 *cert); + +X509_EXTENSION *OCSP_crlID_new(const char *url, long *n, char *tim); + +X509_EXTENSION *OCSP_accept_responses_new(char **oids); + +X509_EXTENSION *OCSP_archive_cutoff_new(char *tim); + +X509_EXTENSION *OCSP_url_svcloc_new(X509_NAME *issuer, const char **urls); + +int OCSP_REQUEST_get_ext_count(OCSP_REQUEST *x); +int OCSP_REQUEST_get_ext_by_NID(OCSP_REQUEST *x, int nid, int lastpos); +int OCSP_REQUEST_get_ext_by_OBJ(OCSP_REQUEST *x, const ASN1_OBJECT *obj, + int lastpos); +int OCSP_REQUEST_get_ext_by_critical(OCSP_REQUEST *x, int crit, int lastpos); +X509_EXTENSION *OCSP_REQUEST_get_ext(OCSP_REQUEST *x, int loc); +X509_EXTENSION *OCSP_REQUEST_delete_ext(OCSP_REQUEST *x, int loc); +void *OCSP_REQUEST_get1_ext_d2i(OCSP_REQUEST *x, int nid, int *crit, + int *idx); +int OCSP_REQUEST_add1_ext_i2d(OCSP_REQUEST *x, int nid, void *value, int crit, + unsigned long flags); +int OCSP_REQUEST_add_ext(OCSP_REQUEST *x, X509_EXTENSION *ex, int loc); + +int OCSP_ONEREQ_get_ext_count(OCSP_ONEREQ *x); +int OCSP_ONEREQ_get_ext_by_NID(OCSP_ONEREQ *x, int nid, int lastpos); +int OCSP_ONEREQ_get_ext_by_OBJ(OCSP_ONEREQ *x, const ASN1_OBJECT *obj, int lastpos); +int OCSP_ONEREQ_get_ext_by_critical(OCSP_ONEREQ *x, int crit, int lastpos); +X509_EXTENSION *OCSP_ONEREQ_get_ext(OCSP_ONEREQ *x, int loc); +X509_EXTENSION *OCSP_ONEREQ_delete_ext(OCSP_ONEREQ *x, int loc); +void *OCSP_ONEREQ_get1_ext_d2i(OCSP_ONEREQ *x, int nid, int *crit, int *idx); +int OCSP_ONEREQ_add1_ext_i2d(OCSP_ONEREQ *x, int nid, void *value, int crit, + unsigned long flags); +int OCSP_ONEREQ_add_ext(OCSP_ONEREQ *x, X509_EXTENSION *ex, int loc); + +int OCSP_BASICRESP_get_ext_count(OCSP_BASICRESP *x); +int OCSP_BASICRESP_get_ext_by_NID(OCSP_BASICRESP *x, int nid, int lastpos); +int OCSP_BASICRESP_get_ext_by_OBJ(OCSP_BASICRESP *x, const ASN1_OBJECT *obj, + int lastpos); +int OCSP_BASICRESP_get_ext_by_critical(OCSP_BASICRESP *x, int crit, + int lastpos); +X509_EXTENSION *OCSP_BASICRESP_get_ext(OCSP_BASICRESP *x, int loc); +X509_EXTENSION *OCSP_BASICRESP_delete_ext(OCSP_BASICRESP *x, int loc); +void *OCSP_BASICRESP_get1_ext_d2i(OCSP_BASICRESP *x, int nid, int *crit, + int *idx); +int OCSP_BASICRESP_add1_ext_i2d(OCSP_BASICRESP *x, int nid, void *value, + int crit, unsigned long flags); +int OCSP_BASICRESP_add_ext(OCSP_BASICRESP *x, X509_EXTENSION *ex, int loc); + +int OCSP_SINGLERESP_get_ext_count(OCSP_SINGLERESP *x); +int OCSP_SINGLERESP_get_ext_by_NID(OCSP_SINGLERESP *x, int nid, int lastpos); +int OCSP_SINGLERESP_get_ext_by_OBJ(OCSP_SINGLERESP *x, const ASN1_OBJECT *obj, + int lastpos); +int OCSP_SINGLERESP_get_ext_by_critical(OCSP_SINGLERESP *x, int crit, + int lastpos); +X509_EXTENSION *OCSP_SINGLERESP_get_ext(OCSP_SINGLERESP *x, int loc); +X509_EXTENSION *OCSP_SINGLERESP_delete_ext(OCSP_SINGLERESP *x, int loc); +void *OCSP_SINGLERESP_get1_ext_d2i(OCSP_SINGLERESP *x, int nid, int *crit, + int *idx); +int OCSP_SINGLERESP_add1_ext_i2d(OCSP_SINGLERESP *x, int nid, void *value, + int crit, unsigned long flags); +int OCSP_SINGLERESP_add_ext(OCSP_SINGLERESP *x, X509_EXTENSION *ex, int loc); +const OCSP_CERTID *OCSP_SINGLERESP_get0_id(const OCSP_SINGLERESP *x); + +DECLARE_ASN1_FUNCTIONS(OCSP_SINGLERESP) +DECLARE_ASN1_FUNCTIONS(OCSP_CERTSTATUS) +DECLARE_ASN1_FUNCTIONS(OCSP_REVOKEDINFO) +DECLARE_ASN1_FUNCTIONS(OCSP_BASICRESP) +DECLARE_ASN1_FUNCTIONS(OCSP_RESPDATA) +DECLARE_ASN1_FUNCTIONS(OCSP_RESPID) +DECLARE_ASN1_FUNCTIONS(OCSP_RESPONSE) +DECLARE_ASN1_FUNCTIONS(OCSP_RESPBYTES) +DECLARE_ASN1_FUNCTIONS(OCSP_ONEREQ) +DECLARE_ASN1_FUNCTIONS(OCSP_CERTID) +DECLARE_ASN1_FUNCTIONS(OCSP_REQUEST) +DECLARE_ASN1_FUNCTIONS(OCSP_SIGNATURE) +DECLARE_ASN1_FUNCTIONS(OCSP_REQINFO) +DECLARE_ASN1_FUNCTIONS(OCSP_CRLID) +DECLARE_ASN1_FUNCTIONS(OCSP_SERVICELOC) + +const char *OCSP_response_status_str(long s); +const char *OCSP_cert_status_str(long s); +const char *OCSP_crl_reason_str(long s); + +int OCSP_REQUEST_print(BIO *bp, OCSP_REQUEST *a, unsigned long flags); +int OCSP_RESPONSE_print(BIO *bp, OCSP_RESPONSE *o, unsigned long flags); + +int OCSP_basic_verify(OCSP_BASICRESP *bs, STACK_OF(X509) *certs, + X509_STORE *st, unsigned long flags); + + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ocsperr.h b/third_party/openssl/uos/sw_64/include/openssl/ocsperr.h new file mode 100644 index 00000000..8dd9e01a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ocsperr.h @@ -0,0 +1,78 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OCSPERR_H +# define HEADER_OCSPERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_OCSP + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_OCSP_strings(void); + +/* + * OCSP function codes. + */ +# define OCSP_F_D2I_OCSP_NONCE 102 +# define OCSP_F_OCSP_BASIC_ADD1_STATUS 103 +# define OCSP_F_OCSP_BASIC_SIGN 104 +# define OCSP_F_OCSP_BASIC_SIGN_CTX 119 +# define OCSP_F_OCSP_BASIC_VERIFY 105 +# define OCSP_F_OCSP_CERT_ID_NEW 101 +# define OCSP_F_OCSP_CHECK_DELEGATED 106 +# define OCSP_F_OCSP_CHECK_IDS 107 +# define OCSP_F_OCSP_CHECK_ISSUER 108 +# define OCSP_F_OCSP_CHECK_VALIDITY 115 +# define OCSP_F_OCSP_MATCH_ISSUERID 109 +# define OCSP_F_OCSP_PARSE_URL 114 +# define OCSP_F_OCSP_REQUEST_SIGN 110 +# define OCSP_F_OCSP_REQUEST_VERIFY 116 +# define OCSP_F_OCSP_RESPONSE_GET1_BASIC 111 +# define OCSP_F_PARSE_HTTP_LINE1 118 + +/* + * OCSP reason codes. + */ +# define OCSP_R_CERTIFICATE_VERIFY_ERROR 101 +# define OCSP_R_DIGEST_ERR 102 +# define OCSP_R_ERROR_IN_NEXTUPDATE_FIELD 122 +# define OCSP_R_ERROR_IN_THISUPDATE_FIELD 123 +# define OCSP_R_ERROR_PARSING_URL 121 +# define OCSP_R_MISSING_OCSPSIGNING_USAGE 103 +# define OCSP_R_NEXTUPDATE_BEFORE_THISUPDATE 124 +# define OCSP_R_NOT_BASIC_RESPONSE 104 +# define OCSP_R_NO_CERTIFICATES_IN_CHAIN 105 +# define OCSP_R_NO_RESPONSE_DATA 108 +# define OCSP_R_NO_REVOKED_TIME 109 +# define OCSP_R_NO_SIGNER_KEY 130 +# define OCSP_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE 110 +# define OCSP_R_REQUEST_NOT_SIGNED 128 +# define OCSP_R_RESPONSE_CONTAINS_NO_REVOCATION_DATA 111 +# define OCSP_R_ROOT_CA_NOT_TRUSTED 112 +# define OCSP_R_SERVER_RESPONSE_ERROR 114 +# define OCSP_R_SERVER_RESPONSE_PARSE_ERROR 115 +# define OCSP_R_SIGNATURE_FAILURE 117 +# define OCSP_R_SIGNER_CERTIFICATE_NOT_FOUND 118 +# define OCSP_R_STATUS_EXPIRED 125 +# define OCSP_R_STATUS_NOT_YET_VALID 126 +# define OCSP_R_STATUS_TOO_OLD 127 +# define OCSP_R_UNKNOWN_MESSAGE_DIGEST 119 +# define OCSP_R_UNKNOWN_NID 120 +# define OCSP_R_UNSUPPORTED_REQUESTORNAME_TYPE 129 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/opensslconf.h b/third_party/openssl/uos/sw_64/include/openssl/opensslconf.h new file mode 100644 index 00000000..75070efc --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/opensslconf.h @@ -0,0 +1,197 @@ +/* + * WARNING: do not edit! + * Generated by Makefile from include/openssl/opensslconf.h.in + * + * Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef OPENSSL_ALGORITHM_DEFINES +# error OPENSSL_ALGORITHM_DEFINES no longer supported +#endif + +/* + * OpenSSL was configured with the following options: + */ + +#ifndef OPENSSL_NO_MD2 +# define OPENSSL_NO_MD2 +#endif +#ifndef OPENSSL_NO_RC5 +# define OPENSSL_NO_RC5 +#endif +#ifndef OPENSSL_THREADS +# define OPENSSL_THREADS +#endif +#ifndef OPENSSL_RAND_SEED_OS +# define OPENSSL_RAND_SEED_OS +#endif +#ifndef OPENSSL_NO_ASAN +# define OPENSSL_NO_ASAN +#endif +#ifndef OPENSSL_NO_CRYPTO_MDEBUG +# define OPENSSL_NO_CRYPTO_MDEBUG +#endif +#ifndef OPENSSL_NO_CRYPTO_MDEBUG_BACKTRACE +# define OPENSSL_NO_CRYPTO_MDEBUG_BACKTRACE +#endif +#ifndef OPENSSL_NO_DEVCRYPTOENG +# define OPENSSL_NO_DEVCRYPTOENG +#endif +#ifndef OPENSSL_NO_EC_NISTP_64_GCC_128 +# define OPENSSL_NO_EC_NISTP_64_GCC_128 +#endif +#ifndef OPENSSL_NO_EGD +# define OPENSSL_NO_EGD +#endif +#ifndef OPENSSL_NO_EXTERNAL_TESTS +# define OPENSSL_NO_EXTERNAL_TESTS +#endif +#ifndef OPENSSL_NO_FUZZ_AFL +# define OPENSSL_NO_FUZZ_AFL +#endif +#ifndef OPENSSL_NO_FUZZ_LIBFUZZER +# define OPENSSL_NO_FUZZ_LIBFUZZER +#endif +#ifndef OPENSSL_NO_HEARTBEATS +# define OPENSSL_NO_HEARTBEATS +#endif +#ifndef OPENSSL_NO_MSAN +# define OPENSSL_NO_MSAN +#endif +#ifndef OPENSSL_NO_SCTP +# define OPENSSL_NO_SCTP +#endif +#ifndef OPENSSL_NO_SSL_TRACE +# define OPENSSL_NO_SSL_TRACE +#endif +#ifndef OPENSSL_NO_SSL3 +# define OPENSSL_NO_SSL3 +#endif +#ifndef OPENSSL_NO_SSL3_METHOD +# define OPENSSL_NO_SSL3_METHOD +#endif +#ifndef OPENSSL_NO_UBSAN +# define OPENSSL_NO_UBSAN +#endif +#ifndef OPENSSL_NO_UNIT_TEST +# define OPENSSL_NO_UNIT_TEST +#endif +#ifndef OPENSSL_NO_WEAK_SSL_CIPHERS +# define OPENSSL_NO_WEAK_SSL_CIPHERS +#endif +#ifndef OPENSSL_NO_DYNAMIC_ENGINE +# define OPENSSL_NO_DYNAMIC_ENGINE +#endif + + +/* + * Sometimes OPENSSSL_NO_xxx ends up with an empty file and some compilers + * don't like that. This will hopefully silence them. + */ +#define NON_EMPTY_TRANSLATION_UNIT static void *dummy = &dummy; + +/* + * Applications should use -DOPENSSL_API_COMPAT= to suppress the + * declarations of functions deprecated in or before . Otherwise, they + * still won't see them if the library has been built to disable deprecated + * functions. + */ +#ifndef DECLARE_DEPRECATED +# define DECLARE_DEPRECATED(f) f; +# ifdef __GNUC__ +# if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 0) +# undef DECLARE_DEPRECATED +# define DECLARE_DEPRECATED(f) f __attribute__ ((deprecated)); +# endif +# elif defined(__SUNPRO_C) +# if (__SUNPRO_C >= 0x5130) +# undef DECLARE_DEPRECATED +# define DECLARE_DEPRECATED(f) f __attribute__ ((deprecated)); +# endif +# endif +#endif + +#ifndef OPENSSL_FILE +# ifdef OPENSSL_NO_FILENAMES +# define OPENSSL_FILE "" +# define OPENSSL_LINE 0 +# else +# define OPENSSL_FILE __FILE__ +# define OPENSSL_LINE __LINE__ +# endif +#endif + +#ifndef OPENSSL_MIN_API +# define OPENSSL_MIN_API 0 +#endif + +#if !defined(OPENSSL_API_COMPAT) || OPENSSL_API_COMPAT < OPENSSL_MIN_API +# undef OPENSSL_API_COMPAT +# define OPENSSL_API_COMPAT OPENSSL_MIN_API +#endif + +/* + * Do not deprecate things to be deprecated in version 1.2.0 before the + * OpenSSL version number matches. + */ +#if OPENSSL_VERSION_NUMBER < 0x10200000L +# define DEPRECATEDIN_1_2_0(f) f; +#elif OPENSSL_API_COMPAT < 0x10200000L +# define DEPRECATEDIN_1_2_0(f) DECLARE_DEPRECATED(f) +#else +# define DEPRECATEDIN_1_2_0(f) +#endif + +#if OPENSSL_API_COMPAT < 0x10100000L +# define DEPRECATEDIN_1_1_0(f) DECLARE_DEPRECATED(f) +#else +# define DEPRECATEDIN_1_1_0(f) +#endif + +#if OPENSSL_API_COMPAT < 0x10000000L +# define DEPRECATEDIN_1_0_0(f) DECLARE_DEPRECATED(f) +#else +# define DEPRECATEDIN_1_0_0(f) +#endif + +#if OPENSSL_API_COMPAT < 0x00908000L +# define DEPRECATEDIN_0_9_8(f) DECLARE_DEPRECATED(f) +#else +# define DEPRECATEDIN_0_9_8(f) +#endif + +/* Generate 80386 code? */ +#undef I386_ONLY + +#undef OPENSSL_UNISTD +#define OPENSSL_UNISTD + +#undef OPENSSL_EXPORT_VAR_AS_FUNCTION + +/* + * The following are cipher-specific, but are part of the public API. + */ +#if !defined(OPENSSL_SYS_UEFI) +# define BN_LLONG +/* Only one for the following should be defined */ +# undef SIXTY_FOUR_BIT_LONG +# undef SIXTY_FOUR_BIT +# define THIRTY_TWO_BIT +#endif + +#define RC4_INT unsigned char + +#ifdef __cplusplus +} +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/opensslv.h b/third_party/openssl/uos/sw_64/include/openssl/opensslv.h new file mode 100644 index 00000000..0dc99ecc --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/opensslv.h @@ -0,0 +1,101 @@ +/* + * Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OPENSSLV_H +# define HEADER_OPENSSLV_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*- + * Numeric release version identifier: + * MNNFFPPS: major minor fix patch status + * The status nibble has one of the values 0 for development, 1 to e for betas + * 1 to 14, and f for release. The patch level is exactly that. + * For example: + * 0.9.3-dev 0x00903000 + * 0.9.3-beta1 0x00903001 + * 0.9.3-beta2-dev 0x00903002 + * 0.9.3-beta2 0x00903002 (same as ...beta2-dev) + * 0.9.3 0x0090300f + * 0.9.3a 0x0090301f + * 0.9.4 0x0090400f + * 1.2.3z 0x102031af + * + * For continuity reasons (because 0.9.5 is already out, and is coded + * 0x00905100), between 0.9.5 and 0.9.6 the coding of the patch level + * part is slightly different, by setting the highest bit. This means + * that 0.9.5a looks like this: 0x0090581f. At 0.9.6, we can start + * with 0x0090600S... + * + * (Prior to 0.9.3-dev a different scheme was used: 0.9.2b is 0x0922.) + * (Prior to 0.9.5a beta1, a different scheme was used: MMNNFFRBB for + * major minor fix final patch/beta) + */ +# define OPENSSL_VERSION_NUMBER 0x1010116fL +# define OPENSSL_VERSION_TEXT "OpenSSL 1.1.1v 1 Aug 2023" + +/*- + * The macros below are to be used for shared library (.so, .dll, ...) + * versioning. That kind of versioning works a bit differently between + * operating systems. The most usual scheme is to set a major and a minor + * number, and have the runtime loader check that the major number is equal + * to what it was at application link time, while the minor number has to + * be greater or equal to what it was at application link time. With this + * scheme, the version number is usually part of the file name, like this: + * + * libcrypto.so.0.9 + * + * Some unixen also make a softlink with the major version number only: + * + * libcrypto.so.0 + * + * On Tru64 and IRIX 6.x it works a little bit differently. There, the + * shared library version is stored in the file, and is actually a series + * of versions, separated by colons. The rightmost version present in the + * library when linking an application is stored in the application to be + * matched at run time. When the application is run, a check is done to + * see if the library version stored in the application matches any of the + * versions in the version string of the library itself. + * This version string can be constructed in any way, depending on what + * kind of matching is desired. However, to implement the same scheme as + * the one used in the other unixen, all compatible versions, from lowest + * to highest, should be part of the string. Consecutive builds would + * give the following versions strings: + * + * 3.0 + * 3.0:3.1 + * 3.0:3.1:3.2 + * 4.0 + * 4.0:4.1 + * + * Notice how version 4 is completely incompatible with version, and + * therefore give the breach you can see. + * + * There may be other schemes as well that I haven't yet discovered. + * + * So, here's the way it works here: first of all, the library version + * number doesn't need at all to match the overall OpenSSL version. + * However, it's nice and more understandable if it actually does. + * The current library version is stored in the macro SHLIB_VERSION_NUMBER, + * which is just a piece of text in the format "M.m.e" (Major, minor, edit). + * For the sake of Tru64, IRIX, and any other OS that behaves in similar ways, + * we need to keep a history of version numbers, which is done in the + * macro SHLIB_VERSION_HISTORY. The numbers are separated by colons and + * should only keep the versions that are binary compatible with the current. + */ +# define SHLIB_VERSION_HISTORY "" +# define SHLIB_VERSION_NUMBER "1.1" + + +#ifdef __cplusplus +} +#endif +#endif /* HEADER_OPENSSLV_H */ diff --git a/third_party/openssl/uos/sw_64/include/openssl/ossl_typ.h b/third_party/openssl/uos/sw_64/include/openssl/ossl_typ.h new file mode 100644 index 00000000..e0edfaaf --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ossl_typ.h @@ -0,0 +1,197 @@ +/* + * Copyright 2001-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OPENSSL_TYPES_H +# define HEADER_OPENSSL_TYPES_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +# include + +# ifdef NO_ASN1_TYPEDEFS +# define ASN1_INTEGER ASN1_STRING +# define ASN1_ENUMERATED ASN1_STRING +# define ASN1_BIT_STRING ASN1_STRING +# define ASN1_OCTET_STRING ASN1_STRING +# define ASN1_PRINTABLESTRING ASN1_STRING +# define ASN1_T61STRING ASN1_STRING +# define ASN1_IA5STRING ASN1_STRING +# define ASN1_UTCTIME ASN1_STRING +# define ASN1_GENERALIZEDTIME ASN1_STRING +# define ASN1_TIME ASN1_STRING +# define ASN1_GENERALSTRING ASN1_STRING +# define ASN1_UNIVERSALSTRING ASN1_STRING +# define ASN1_BMPSTRING ASN1_STRING +# define ASN1_VISIBLESTRING ASN1_STRING +# define ASN1_UTF8STRING ASN1_STRING +# define ASN1_BOOLEAN int +# define ASN1_NULL int +# else +typedef struct asn1_string_st ASN1_INTEGER; +typedef struct asn1_string_st ASN1_ENUMERATED; +typedef struct asn1_string_st ASN1_BIT_STRING; +typedef struct asn1_string_st ASN1_OCTET_STRING; +typedef struct asn1_string_st ASN1_PRINTABLESTRING; +typedef struct asn1_string_st ASN1_T61STRING; +typedef struct asn1_string_st ASN1_IA5STRING; +typedef struct asn1_string_st ASN1_GENERALSTRING; +typedef struct asn1_string_st ASN1_UNIVERSALSTRING; +typedef struct asn1_string_st ASN1_BMPSTRING; +typedef struct asn1_string_st ASN1_UTCTIME; +typedef struct asn1_string_st ASN1_TIME; +typedef struct asn1_string_st ASN1_GENERALIZEDTIME; +typedef struct asn1_string_st ASN1_VISIBLESTRING; +typedef struct asn1_string_st ASN1_UTF8STRING; +typedef struct asn1_string_st ASN1_STRING; +typedef int ASN1_BOOLEAN; +typedef int ASN1_NULL; +# endif + +typedef struct asn1_object_st ASN1_OBJECT; + +typedef struct ASN1_ITEM_st ASN1_ITEM; +typedef struct asn1_pctx_st ASN1_PCTX; +typedef struct asn1_sctx_st ASN1_SCTX; + +# ifdef _WIN32 +# undef X509_NAME +# undef X509_EXTENSIONS +# undef PKCS7_ISSUER_AND_SERIAL +# undef PKCS7_SIGNER_INFO +# undef OCSP_REQUEST +# undef OCSP_RESPONSE +# endif + +# ifdef BIGNUM +# undef BIGNUM +# endif +struct dane_st; +typedef struct bio_st BIO; +typedef struct bignum_st BIGNUM; +typedef struct bignum_ctx BN_CTX; +typedef struct bn_blinding_st BN_BLINDING; +typedef struct bn_mont_ctx_st BN_MONT_CTX; +typedef struct bn_recp_ctx_st BN_RECP_CTX; +typedef struct bn_gencb_st BN_GENCB; + +typedef struct buf_mem_st BUF_MEM; + +typedef struct evp_cipher_st EVP_CIPHER; +typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX; +typedef struct evp_md_st EVP_MD; +typedef struct evp_md_ctx_st EVP_MD_CTX; +typedef struct evp_pkey_st EVP_PKEY; + +typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD; + +typedef struct evp_pkey_method_st EVP_PKEY_METHOD; +typedef struct evp_pkey_ctx_st EVP_PKEY_CTX; + +typedef struct evp_Encode_Ctx_st EVP_ENCODE_CTX; + +typedef struct hmac_ctx_st HMAC_CTX; + +typedef struct dh_st DH; +typedef struct dh_method DH_METHOD; + +typedef struct dsa_st DSA; +typedef struct dsa_method DSA_METHOD; + +typedef struct rsa_st RSA; +typedef struct rsa_meth_st RSA_METHOD; +typedef struct rsa_pss_params_st RSA_PSS_PARAMS; + +typedef struct ec_key_st EC_KEY; +typedef struct ec_key_method_st EC_KEY_METHOD; + +typedef struct rand_meth_st RAND_METHOD; +typedef struct rand_drbg_st RAND_DRBG; + +typedef struct ssl_dane_st SSL_DANE; +typedef struct x509_st X509; +typedef struct X509_algor_st X509_ALGOR; +typedef struct X509_crl_st X509_CRL; +typedef struct x509_crl_method_st X509_CRL_METHOD; +typedef struct x509_revoked_st X509_REVOKED; +typedef struct X509_name_st X509_NAME; +typedef struct X509_pubkey_st X509_PUBKEY; +typedef struct x509_store_st X509_STORE; +typedef struct x509_store_ctx_st X509_STORE_CTX; + +typedef struct x509_object_st X509_OBJECT; +typedef struct x509_lookup_st X509_LOOKUP; +typedef struct x509_lookup_method_st X509_LOOKUP_METHOD; +typedef struct X509_VERIFY_PARAM_st X509_VERIFY_PARAM; + +typedef struct x509_sig_info_st X509_SIG_INFO; + +typedef struct pkcs8_priv_key_info_st PKCS8_PRIV_KEY_INFO; + +typedef struct v3_ext_ctx X509V3_CTX; +typedef struct conf_st CONF; +typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS; + +typedef struct ui_st UI; +typedef struct ui_method_st UI_METHOD; + +typedef struct engine_st ENGINE; +typedef struct ssl_st SSL; +typedef struct ssl_ctx_st SSL_CTX; + +typedef struct comp_ctx_st COMP_CTX; +typedef struct comp_method_st COMP_METHOD; + +typedef struct X509_POLICY_NODE_st X509_POLICY_NODE; +typedef struct X509_POLICY_LEVEL_st X509_POLICY_LEVEL; +typedef struct X509_POLICY_TREE_st X509_POLICY_TREE; +typedef struct X509_POLICY_CACHE_st X509_POLICY_CACHE; + +typedef struct AUTHORITY_KEYID_st AUTHORITY_KEYID; +typedef struct DIST_POINT_st DIST_POINT; +typedef struct ISSUING_DIST_POINT_st ISSUING_DIST_POINT; +typedef struct NAME_CONSTRAINTS_st NAME_CONSTRAINTS; + +typedef struct crypto_ex_data_st CRYPTO_EX_DATA; + +typedef struct ocsp_req_ctx_st OCSP_REQ_CTX; +typedef struct ocsp_response_st OCSP_RESPONSE; +typedef struct ocsp_responder_id_st OCSP_RESPID; + +typedef struct sct_st SCT; +typedef struct sct_ctx_st SCT_CTX; +typedef struct ctlog_st CTLOG; +typedef struct ctlog_store_st CTLOG_STORE; +typedef struct ct_policy_eval_ctx_st CT_POLICY_EVAL_CTX; + +typedef struct ossl_store_info_st OSSL_STORE_INFO; +typedef struct ossl_store_search_st OSSL_STORE_SEARCH; + +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L && \ + defined(INTMAX_MAX) && defined(UINTMAX_MAX) +typedef intmax_t ossl_intmax_t; +typedef uintmax_t ossl_uintmax_t; +#else +/* + * Not long long, because the C-library can only be expected to provide + * strtoll(), strtoull() at the same time as intmax_t and strtoimax(), + * strtoumax(). Since we use these for parsing arguments, we need the + * conversion functions, not just the sizes. + */ +typedef long ossl_intmax_t; +typedef unsigned long ossl_uintmax_t; +#endif + +#ifdef __cplusplus +} +#endif +#endif /* def HEADER_OPENSSL_TYPES_H */ diff --git a/third_party/openssl/uos/sw_64/include/openssl/pem.h b/third_party/openssl/uos/sw_64/include/openssl/pem.h new file mode 100644 index 00000000..2ef5b5d0 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pem.h @@ -0,0 +1,378 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PEM_H +# define HEADER_PEM_H + +# include +# include +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# define PEM_BUFSIZE 1024 + +# define PEM_STRING_X509_OLD "X509 CERTIFICATE" +# define PEM_STRING_X509 "CERTIFICATE" +# define PEM_STRING_X509_TRUSTED "TRUSTED CERTIFICATE" +# define PEM_STRING_X509_REQ_OLD "NEW CERTIFICATE REQUEST" +# define PEM_STRING_X509_REQ "CERTIFICATE REQUEST" +# define PEM_STRING_X509_CRL "X509 CRL" +# define PEM_STRING_EVP_PKEY "ANY PRIVATE KEY" +# define PEM_STRING_PUBLIC "PUBLIC KEY" +# define PEM_STRING_RSA "RSA PRIVATE KEY" +# define PEM_STRING_RSA_PUBLIC "RSA PUBLIC KEY" +# define PEM_STRING_DSA "DSA PRIVATE KEY" +# define PEM_STRING_DSA_PUBLIC "DSA PUBLIC KEY" +# define PEM_STRING_PKCS7 "PKCS7" +# define PEM_STRING_PKCS7_SIGNED "PKCS #7 SIGNED DATA" +# define PEM_STRING_PKCS8 "ENCRYPTED PRIVATE KEY" +# define PEM_STRING_PKCS8INF "PRIVATE KEY" +# define PEM_STRING_DHPARAMS "DH PARAMETERS" +# define PEM_STRING_DHXPARAMS "X9.42 DH PARAMETERS" +# define PEM_STRING_SSL_SESSION "SSL SESSION PARAMETERS" +# define PEM_STRING_DSAPARAMS "DSA PARAMETERS" +# define PEM_STRING_ECDSA_PUBLIC "ECDSA PUBLIC KEY" +# define PEM_STRING_ECPARAMETERS "EC PARAMETERS" +# define PEM_STRING_ECPRIVATEKEY "EC PRIVATE KEY" +# define PEM_STRING_PARAMETERS "PARAMETERS" +# define PEM_STRING_CMS "CMS" + +# define PEM_TYPE_ENCRYPTED 10 +# define PEM_TYPE_MIC_ONLY 20 +# define PEM_TYPE_MIC_CLEAR 30 +# define PEM_TYPE_CLEAR 40 + +/* + * These macros make the PEM_read/PEM_write functions easier to maintain and + * write. Now they are all implemented with either: IMPLEMENT_PEM_rw(...) or + * IMPLEMENT_PEM_rw_cb(...) + */ + +# ifdef OPENSSL_NO_STDIO + +# define IMPLEMENT_PEM_read_fp(name, type, str, asn1) /**/ +# define IMPLEMENT_PEM_write_fp(name, type, str, asn1) /**/ +# define IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) /**/ +# define IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) /**/ +# define IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) /**/ +# else + +# define IMPLEMENT_PEM_read_fp(name, type, str, asn1) \ +type *PEM_read_##name(FILE *fp, type **x, pem_password_cb *cb, void *u)\ +{ \ +return PEM_ASN1_read((d2i_of_void *)d2i_##asn1, str,fp,(void **)x,cb,u); \ +} + +# define IMPLEMENT_PEM_write_fp(name, type, str, asn1) \ +int PEM_write_##name(FILE *fp, type *x) \ +{ \ +return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,NULL,NULL,0,NULL,NULL); \ +} + +# define IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) \ +int PEM_write_##name(FILE *fp, const type *x) \ +{ \ +return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,(void *)x,NULL,NULL,0,NULL,NULL); \ +} + +# define IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) \ +int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, \ + void *u) \ + { \ + return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,enc,kstr,klen,cb,u); \ + } + +# define IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) \ +int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, \ + void *u) \ + { \ + return PEM_ASN1_write((i2d_of_void *)i2d_##asn1,str,fp,x,enc,kstr,klen,cb,u); \ + } + +# endif + +# define IMPLEMENT_PEM_read_bio(name, type, str, asn1) \ +type *PEM_read_bio_##name(BIO *bp, type **x, pem_password_cb *cb, void *u)\ +{ \ +return PEM_ASN1_read_bio((d2i_of_void *)d2i_##asn1, str,bp,(void **)x,cb,u); \ +} + +# define IMPLEMENT_PEM_write_bio(name, type, str, asn1) \ +int PEM_write_bio_##name(BIO *bp, type *x) \ +{ \ +return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,x,NULL,NULL,0,NULL,NULL); \ +} + +# define IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \ +int PEM_write_bio_##name(BIO *bp, const type *x) \ +{ \ +return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,(void *)x,NULL,NULL,0,NULL,NULL); \ +} + +# define IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \ +int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, void *u) \ + { \ + return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,x,enc,kstr,klen,cb,u); \ + } + +# define IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \ +int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, void *u) \ + { \ + return PEM_ASN1_write_bio((i2d_of_void *)i2d_##asn1,str,bp,(void *)x,enc,kstr,klen,cb,u); \ + } + +# define IMPLEMENT_PEM_write(name, type, str, asn1) \ + IMPLEMENT_PEM_write_bio(name, type, str, asn1) \ + IMPLEMENT_PEM_write_fp(name, type, str, asn1) + +# define IMPLEMENT_PEM_write_const(name, type, str, asn1) \ + IMPLEMENT_PEM_write_bio_const(name, type, str, asn1) \ + IMPLEMENT_PEM_write_fp_const(name, type, str, asn1) + +# define IMPLEMENT_PEM_write_cb(name, type, str, asn1) \ + IMPLEMENT_PEM_write_cb_bio(name, type, str, asn1) \ + IMPLEMENT_PEM_write_cb_fp(name, type, str, asn1) + +# define IMPLEMENT_PEM_write_cb_const(name, type, str, asn1) \ + IMPLEMENT_PEM_write_cb_bio_const(name, type, str, asn1) \ + IMPLEMENT_PEM_write_cb_fp_const(name, type, str, asn1) + +# define IMPLEMENT_PEM_read(name, type, str, asn1) \ + IMPLEMENT_PEM_read_bio(name, type, str, asn1) \ + IMPLEMENT_PEM_read_fp(name, type, str, asn1) + +# define IMPLEMENT_PEM_rw(name, type, str, asn1) \ + IMPLEMENT_PEM_read(name, type, str, asn1) \ + IMPLEMENT_PEM_write(name, type, str, asn1) + +# define IMPLEMENT_PEM_rw_const(name, type, str, asn1) \ + IMPLEMENT_PEM_read(name, type, str, asn1) \ + IMPLEMENT_PEM_write_const(name, type, str, asn1) + +# define IMPLEMENT_PEM_rw_cb(name, type, str, asn1) \ + IMPLEMENT_PEM_read(name, type, str, asn1) \ + IMPLEMENT_PEM_write_cb(name, type, str, asn1) + +/* These are the same except they are for the declarations */ + +# if defined(OPENSSL_NO_STDIO) + +# define DECLARE_PEM_read_fp(name, type) /**/ +# define DECLARE_PEM_write_fp(name, type) /**/ +# define DECLARE_PEM_write_fp_const(name, type) /**/ +# define DECLARE_PEM_write_cb_fp(name, type) /**/ +# else + +# define DECLARE_PEM_read_fp(name, type) \ + type *PEM_read_##name(FILE *fp, type **x, pem_password_cb *cb, void *u); + +# define DECLARE_PEM_write_fp(name, type) \ + int PEM_write_##name(FILE *fp, type *x); + +# define DECLARE_PEM_write_fp_const(name, type) \ + int PEM_write_##name(FILE *fp, const type *x); + +# define DECLARE_PEM_write_cb_fp(name, type) \ + int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, void *u); + +# endif + +# define DECLARE_PEM_read_bio(name, type) \ + type *PEM_read_bio_##name(BIO *bp, type **x, pem_password_cb *cb, void *u); + +# define DECLARE_PEM_write_bio(name, type) \ + int PEM_write_bio_##name(BIO *bp, type *x); + +# define DECLARE_PEM_write_bio_const(name, type) \ + int PEM_write_bio_##name(BIO *bp, const type *x); + +# define DECLARE_PEM_write_cb_bio(name, type) \ + int PEM_write_bio_##name(BIO *bp, type *x, const EVP_CIPHER *enc, \ + unsigned char *kstr, int klen, pem_password_cb *cb, void *u); + +# define DECLARE_PEM_write(name, type) \ + DECLARE_PEM_write_bio(name, type) \ + DECLARE_PEM_write_fp(name, type) +# define DECLARE_PEM_write_const(name, type) \ + DECLARE_PEM_write_bio_const(name, type) \ + DECLARE_PEM_write_fp_const(name, type) +# define DECLARE_PEM_write_cb(name, type) \ + DECLARE_PEM_write_cb_bio(name, type) \ + DECLARE_PEM_write_cb_fp(name, type) +# define DECLARE_PEM_read(name, type) \ + DECLARE_PEM_read_bio(name, type) \ + DECLARE_PEM_read_fp(name, type) +# define DECLARE_PEM_rw(name, type) \ + DECLARE_PEM_read(name, type) \ + DECLARE_PEM_write(name, type) +# define DECLARE_PEM_rw_const(name, type) \ + DECLARE_PEM_read(name, type) \ + DECLARE_PEM_write_const(name, type) +# define DECLARE_PEM_rw_cb(name, type) \ + DECLARE_PEM_read(name, type) \ + DECLARE_PEM_write_cb(name, type) +typedef int pem_password_cb (char *buf, int size, int rwflag, void *userdata); + +int PEM_get_EVP_CIPHER_INFO(char *header, EVP_CIPHER_INFO *cipher); +int PEM_do_header(EVP_CIPHER_INFO *cipher, unsigned char *data, long *len, + pem_password_cb *callback, void *u); + +int PEM_read_bio(BIO *bp, char **name, char **header, + unsigned char **data, long *len); +# define PEM_FLAG_SECURE 0x1 +# define PEM_FLAG_EAY_COMPATIBLE 0x2 +# define PEM_FLAG_ONLY_B64 0x4 +int PEM_read_bio_ex(BIO *bp, char **name, char **header, + unsigned char **data, long *len, unsigned int flags); +int PEM_bytes_read_bio_secmem(unsigned char **pdata, long *plen, char **pnm, + const char *name, BIO *bp, pem_password_cb *cb, + void *u); +int PEM_write_bio(BIO *bp, const char *name, const char *hdr, + const unsigned char *data, long len); +int PEM_bytes_read_bio(unsigned char **pdata, long *plen, char **pnm, + const char *name, BIO *bp, pem_password_cb *cb, + void *u); +void *PEM_ASN1_read_bio(d2i_of_void *d2i, const char *name, BIO *bp, void **x, + pem_password_cb *cb, void *u); +int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp, void *x, + const EVP_CIPHER *enc, unsigned char *kstr, int klen, + pem_password_cb *cb, void *u); + +STACK_OF(X509_INFO) *PEM_X509_INFO_read_bio(BIO *bp, STACK_OF(X509_INFO) *sk, + pem_password_cb *cb, void *u); +int PEM_X509_INFO_write_bio(BIO *bp, X509_INFO *xi, EVP_CIPHER *enc, + unsigned char *kstr, int klen, + pem_password_cb *cd, void *u); + +#ifndef OPENSSL_NO_STDIO +int PEM_read(FILE *fp, char **name, char **header, + unsigned char **data, long *len); +int PEM_write(FILE *fp, const char *name, const char *hdr, + const unsigned char *data, long len); +void *PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x, + pem_password_cb *cb, void *u); +int PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp, + void *x, const EVP_CIPHER *enc, unsigned char *kstr, + int klen, pem_password_cb *callback, void *u); +STACK_OF(X509_INFO) *PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk, + pem_password_cb *cb, void *u); +#endif + +int PEM_SignInit(EVP_MD_CTX *ctx, EVP_MD *type); +int PEM_SignUpdate(EVP_MD_CTX *ctx, unsigned char *d, unsigned int cnt); +int PEM_SignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, + unsigned int *siglen, EVP_PKEY *pkey); + +/* The default pem_password_cb that's used internally */ +int PEM_def_callback(char *buf, int num, int rwflag, void *userdata); +void PEM_proc_type(char *buf, int type); +void PEM_dek_info(char *buf, const char *type, int len, char *str); + +# include + +DECLARE_PEM_rw(X509, X509) +DECLARE_PEM_rw(X509_AUX, X509) +DECLARE_PEM_rw(X509_REQ, X509_REQ) +DECLARE_PEM_write(X509_REQ_NEW, X509_REQ) +DECLARE_PEM_rw(X509_CRL, X509_CRL) +DECLARE_PEM_rw(PKCS7, PKCS7) +DECLARE_PEM_rw(NETSCAPE_CERT_SEQUENCE, NETSCAPE_CERT_SEQUENCE) +DECLARE_PEM_rw(PKCS8, X509_SIG) +DECLARE_PEM_rw(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO) +# ifndef OPENSSL_NO_RSA +DECLARE_PEM_rw_cb(RSAPrivateKey, RSA) +DECLARE_PEM_rw_const(RSAPublicKey, RSA) +DECLARE_PEM_rw(RSA_PUBKEY, RSA) +# endif +# ifndef OPENSSL_NO_DSA +DECLARE_PEM_rw_cb(DSAPrivateKey, DSA) +DECLARE_PEM_rw(DSA_PUBKEY, DSA) +DECLARE_PEM_rw_const(DSAparams, DSA) +# endif +# ifndef OPENSSL_NO_EC +DECLARE_PEM_rw_const(ECPKParameters, EC_GROUP) +DECLARE_PEM_rw_cb(ECPrivateKey, EC_KEY) +DECLARE_PEM_rw(EC_PUBKEY, EC_KEY) +# endif +# ifndef OPENSSL_NO_DH +DECLARE_PEM_rw_const(DHparams, DH) +DECLARE_PEM_write_const(DHxparams, DH) +# endif +DECLARE_PEM_rw_cb(PrivateKey, EVP_PKEY) +DECLARE_PEM_rw(PUBKEY, EVP_PKEY) + +int PEM_write_bio_PrivateKey_traditional(BIO *bp, EVP_PKEY *x, + const EVP_CIPHER *enc, + unsigned char *kstr, int klen, + pem_password_cb *cb, void *u); + +int PEM_write_bio_PKCS8PrivateKey_nid(BIO *bp, EVP_PKEY *x, int nid, + char *kstr, int klen, + pem_password_cb *cb, void *u); +int PEM_write_bio_PKCS8PrivateKey(BIO *, EVP_PKEY *, const EVP_CIPHER *, + char *, int, pem_password_cb *, void *); +int i2d_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY *x, const EVP_CIPHER *enc, + char *kstr, int klen, + pem_password_cb *cb, void *u); +int i2d_PKCS8PrivateKey_nid_bio(BIO *bp, EVP_PKEY *x, int nid, + char *kstr, int klen, + pem_password_cb *cb, void *u); +EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, + void *u); + +# ifndef OPENSSL_NO_STDIO +int i2d_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc, + char *kstr, int klen, + pem_password_cb *cb, void *u); +int i2d_PKCS8PrivateKey_nid_fp(FILE *fp, EVP_PKEY *x, int nid, + char *kstr, int klen, + pem_password_cb *cb, void *u); +int PEM_write_PKCS8PrivateKey_nid(FILE *fp, EVP_PKEY *x, int nid, + char *kstr, int klen, + pem_password_cb *cb, void *u); + +EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_password_cb *cb, + void *u); + +int PEM_write_PKCS8PrivateKey(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc, + char *kstr, int klen, pem_password_cb *cd, + void *u); +# endif +EVP_PKEY *PEM_read_bio_Parameters(BIO *bp, EVP_PKEY **x); +int PEM_write_bio_Parameters(BIO *bp, EVP_PKEY *x); + +# ifndef OPENSSL_NO_DSA +EVP_PKEY *b2i_PrivateKey(const unsigned char **in, long length); +EVP_PKEY *b2i_PublicKey(const unsigned char **in, long length); +EVP_PKEY *b2i_PrivateKey_bio(BIO *in); +EVP_PKEY *b2i_PublicKey_bio(BIO *in); +int i2b_PrivateKey_bio(BIO *out, EVP_PKEY *pk); +int i2b_PublicKey_bio(BIO *out, EVP_PKEY *pk); +# ifndef OPENSSL_NO_RC4 +EVP_PKEY *b2i_PVK_bio(BIO *in, pem_password_cb *cb, void *u); +int i2b_PVK_bio(BIO *out, EVP_PKEY *pk, int enclevel, + pem_password_cb *cb, void *u); +# endif +# endif + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pem2.h b/third_party/openssl/uos/sw_64/include/openssl/pem2.h new file mode 100644 index 00000000..038fe790 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pem2.h @@ -0,0 +1,13 @@ +/* + * Copyright 1999-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PEM2_H +# define HEADER_PEM2_H +# include +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pemerr.h b/third_party/openssl/uos/sw_64/include/openssl/pemerr.h new file mode 100644 index 00000000..4f7e3574 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pemerr.h @@ -0,0 +1,105 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PEMERR_H +# define HEADER_PEMERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_PEM_strings(void); + +/* + * PEM function codes. + */ +# define PEM_F_B2I_DSS 127 +# define PEM_F_B2I_PVK_BIO 128 +# define PEM_F_B2I_RSA 129 +# define PEM_F_CHECK_BITLEN_DSA 130 +# define PEM_F_CHECK_BITLEN_RSA 131 +# define PEM_F_D2I_PKCS8PRIVATEKEY_BIO 120 +# define PEM_F_D2I_PKCS8PRIVATEKEY_FP 121 +# define PEM_F_DO_B2I 132 +# define PEM_F_DO_B2I_BIO 133 +# define PEM_F_DO_BLOB_HEADER 134 +# define PEM_F_DO_I2B 146 +# define PEM_F_DO_PK8PKEY 126 +# define PEM_F_DO_PK8PKEY_FP 125 +# define PEM_F_DO_PVK_BODY 135 +# define PEM_F_DO_PVK_HEADER 136 +# define PEM_F_GET_HEADER_AND_DATA 143 +# define PEM_F_GET_NAME 144 +# define PEM_F_I2B_PVK 137 +# define PEM_F_I2B_PVK_BIO 138 +# define PEM_F_LOAD_IV 101 +# define PEM_F_PEM_ASN1_READ 102 +# define PEM_F_PEM_ASN1_READ_BIO 103 +# define PEM_F_PEM_ASN1_WRITE 104 +# define PEM_F_PEM_ASN1_WRITE_BIO 105 +# define PEM_F_PEM_DEF_CALLBACK 100 +# define PEM_F_PEM_DO_HEADER 106 +# define PEM_F_PEM_GET_EVP_CIPHER_INFO 107 +# define PEM_F_PEM_READ 108 +# define PEM_F_PEM_READ_BIO 109 +# define PEM_F_PEM_READ_BIO_DHPARAMS 141 +# define PEM_F_PEM_READ_BIO_EX 145 +# define PEM_F_PEM_READ_BIO_PARAMETERS 140 +# define PEM_F_PEM_READ_BIO_PRIVATEKEY 123 +# define PEM_F_PEM_READ_DHPARAMS 142 +# define PEM_F_PEM_READ_PRIVATEKEY 124 +# define PEM_F_PEM_SIGNFINAL 112 +# define PEM_F_PEM_WRITE 113 +# define PEM_F_PEM_WRITE_BIO 114 +# define PEM_F_PEM_WRITE_BIO_PRIVATEKEY_TRADITIONAL 147 +# define PEM_F_PEM_WRITE_PRIVATEKEY 139 +# define PEM_F_PEM_X509_INFO_READ 115 +# define PEM_F_PEM_X509_INFO_READ_BIO 116 +# define PEM_F_PEM_X509_INFO_WRITE_BIO 117 + +/* + * PEM reason codes. + */ +# define PEM_R_BAD_BASE64_DECODE 100 +# define PEM_R_BAD_DECRYPT 101 +# define PEM_R_BAD_END_LINE 102 +# define PEM_R_BAD_IV_CHARS 103 +# define PEM_R_BAD_MAGIC_NUMBER 116 +# define PEM_R_BAD_PASSWORD_READ 104 +# define PEM_R_BAD_VERSION_NUMBER 117 +# define PEM_R_BIO_WRITE_FAILURE 118 +# define PEM_R_CIPHER_IS_NULL 127 +# define PEM_R_ERROR_CONVERTING_PRIVATE_KEY 115 +# define PEM_R_EXPECTING_PRIVATE_KEY_BLOB 119 +# define PEM_R_EXPECTING_PUBLIC_KEY_BLOB 120 +# define PEM_R_HEADER_TOO_LONG 128 +# define PEM_R_INCONSISTENT_HEADER 121 +# define PEM_R_KEYBLOB_HEADER_PARSE_ERROR 122 +# define PEM_R_KEYBLOB_TOO_SHORT 123 +# define PEM_R_MISSING_DEK_IV 129 +# define PEM_R_NOT_DEK_INFO 105 +# define PEM_R_NOT_ENCRYPTED 106 +# define PEM_R_NOT_PROC_TYPE 107 +# define PEM_R_NO_START_LINE 108 +# define PEM_R_PROBLEMS_GETTING_PASSWORD 109 +# define PEM_R_PVK_DATA_TOO_SHORT 124 +# define PEM_R_PVK_TOO_SHORT 125 +# define PEM_R_READ_KEY 111 +# define PEM_R_SHORT_HEADER 112 +# define PEM_R_UNEXPECTED_DEK_IV 130 +# define PEM_R_UNSUPPORTED_CIPHER 113 +# define PEM_R_UNSUPPORTED_ENCRYPTION 114 +# define PEM_R_UNSUPPORTED_KEY_COMPONENTS 126 +# define PEM_R_UNSUPPORTED_PUBLIC_KEY_TYPE 110 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pkcs12.h b/third_party/openssl/uos/sw_64/include/openssl/pkcs12.h new file mode 100644 index 00000000..3f43dad6 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pkcs12.h @@ -0,0 +1,223 @@ +/* + * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PKCS12_H +# define HEADER_PKCS12_H + +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# define PKCS12_KEY_ID 1 +# define PKCS12_IV_ID 2 +# define PKCS12_MAC_ID 3 + +/* Default iteration count */ +# ifndef PKCS12_DEFAULT_ITER +# define PKCS12_DEFAULT_ITER PKCS5_DEFAULT_ITER +# endif + +# define PKCS12_MAC_KEY_LENGTH 20 + +# define PKCS12_SALT_LEN 8 + +/* It's not clear if these are actually needed... */ +# define PKCS12_key_gen PKCS12_key_gen_utf8 +# define PKCS12_add_friendlyname PKCS12_add_friendlyname_utf8 + +/* MS key usage constants */ + +# define KEY_EX 0x10 +# define KEY_SIG 0x80 + +typedef struct PKCS12_MAC_DATA_st PKCS12_MAC_DATA; + +typedef struct PKCS12_st PKCS12; + +typedef struct PKCS12_SAFEBAG_st PKCS12_SAFEBAG; + +DEFINE_STACK_OF(PKCS12_SAFEBAG) + +typedef struct pkcs12_bag_st PKCS12_BAGS; + +# define PKCS12_ERROR 0 +# define PKCS12_OK 1 + +/* Compatibility macros */ + +#if OPENSSL_API_COMPAT < 0x10100000L + +# define M_PKCS12_bag_type PKCS12_bag_type +# define M_PKCS12_cert_bag_type PKCS12_cert_bag_type +# define M_PKCS12_crl_bag_type PKCS12_cert_bag_type + +# define PKCS12_certbag2x509 PKCS12_SAFEBAG_get1_cert +# define PKCS12_certbag2scrl PKCS12_SAFEBAG_get1_crl +# define PKCS12_bag_type PKCS12_SAFEBAG_get_nid +# define PKCS12_cert_bag_type PKCS12_SAFEBAG_get_bag_nid +# define PKCS12_x5092certbag PKCS12_SAFEBAG_create_cert +# define PKCS12_x509crl2certbag PKCS12_SAFEBAG_create_crl +# define PKCS12_MAKE_KEYBAG PKCS12_SAFEBAG_create0_p8inf +# define PKCS12_MAKE_SHKEYBAG PKCS12_SAFEBAG_create_pkcs8_encrypt + +#endif + +DEPRECATEDIN_1_1_0(ASN1_TYPE *PKCS12_get_attr(const PKCS12_SAFEBAG *bag, int attr_nid)) + +ASN1_TYPE *PKCS8_get_attr(PKCS8_PRIV_KEY_INFO *p8, int attr_nid); +int PKCS12_mac_present(const PKCS12 *p12); +void PKCS12_get0_mac(const ASN1_OCTET_STRING **pmac, + const X509_ALGOR **pmacalg, + const ASN1_OCTET_STRING **psalt, + const ASN1_INTEGER **piter, + const PKCS12 *p12); + +const ASN1_TYPE *PKCS12_SAFEBAG_get0_attr(const PKCS12_SAFEBAG *bag, + int attr_nid); +const ASN1_OBJECT *PKCS12_SAFEBAG_get0_type(const PKCS12_SAFEBAG *bag); +int PKCS12_SAFEBAG_get_nid(const PKCS12_SAFEBAG *bag); +int PKCS12_SAFEBAG_get_bag_nid(const PKCS12_SAFEBAG *bag); + +X509 *PKCS12_SAFEBAG_get1_cert(const PKCS12_SAFEBAG *bag); +X509_CRL *PKCS12_SAFEBAG_get1_crl(const PKCS12_SAFEBAG *bag); +const STACK_OF(PKCS12_SAFEBAG) * +PKCS12_SAFEBAG_get0_safes(const PKCS12_SAFEBAG *bag); +const PKCS8_PRIV_KEY_INFO *PKCS12_SAFEBAG_get0_p8inf(const PKCS12_SAFEBAG *bag); +const X509_SIG *PKCS12_SAFEBAG_get0_pkcs8(const PKCS12_SAFEBAG *bag); + +PKCS12_SAFEBAG *PKCS12_SAFEBAG_create_cert(X509 *x509); +PKCS12_SAFEBAG *PKCS12_SAFEBAG_create_crl(X509_CRL *crl); +PKCS12_SAFEBAG *PKCS12_SAFEBAG_create0_p8inf(PKCS8_PRIV_KEY_INFO *p8); +PKCS12_SAFEBAG *PKCS12_SAFEBAG_create0_pkcs8(X509_SIG *p8); +PKCS12_SAFEBAG *PKCS12_SAFEBAG_create_pkcs8_encrypt(int pbe_nid, + const char *pass, + int passlen, + unsigned char *salt, + int saltlen, int iter, + PKCS8_PRIV_KEY_INFO *p8inf); + +PKCS12_SAFEBAG *PKCS12_item_pack_safebag(void *obj, const ASN1_ITEM *it, + int nid1, int nid2); +PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(const X509_SIG *p8, const char *pass, + int passlen); +PKCS8_PRIV_KEY_INFO *PKCS12_decrypt_skey(const PKCS12_SAFEBAG *bag, + const char *pass, int passlen); +X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, + const char *pass, int passlen, unsigned char *salt, + int saltlen, int iter, PKCS8_PRIV_KEY_INFO *p8); +X509_SIG *PKCS8_set0_pbe(const char *pass, int passlen, + PKCS8_PRIV_KEY_INFO *p8inf, X509_ALGOR *pbe); +PKCS7 *PKCS12_pack_p7data(STACK_OF(PKCS12_SAFEBAG) *sk); +STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_p7data(PKCS7 *p7); +PKCS7 *PKCS12_pack_p7encdata(int pbe_nid, const char *pass, int passlen, + unsigned char *salt, int saltlen, int iter, + STACK_OF(PKCS12_SAFEBAG) *bags); +STACK_OF(PKCS12_SAFEBAG) *PKCS12_unpack_p7encdata(PKCS7 *p7, const char *pass, + int passlen); + +int PKCS12_pack_authsafes(PKCS12 *p12, STACK_OF(PKCS7) *safes); +STACK_OF(PKCS7) *PKCS12_unpack_authsafes(const PKCS12 *p12); + +int PKCS12_add_localkeyid(PKCS12_SAFEBAG *bag, unsigned char *name, + int namelen); +int PKCS12_add_friendlyname_asc(PKCS12_SAFEBAG *bag, const char *name, + int namelen); +int PKCS12_add_friendlyname_utf8(PKCS12_SAFEBAG *bag, const char *name, + int namelen); +int PKCS12_add_CSPName_asc(PKCS12_SAFEBAG *bag, const char *name, + int namelen); +int PKCS12_add_friendlyname_uni(PKCS12_SAFEBAG *bag, + const unsigned char *name, int namelen); +int PKCS8_add_keyusage(PKCS8_PRIV_KEY_INFO *p8, int usage); +ASN1_TYPE *PKCS12_get_attr_gen(const STACK_OF(X509_ATTRIBUTE) *attrs, + int attr_nid); +char *PKCS12_get_friendlyname(PKCS12_SAFEBAG *bag); +const STACK_OF(X509_ATTRIBUTE) * +PKCS12_SAFEBAG_get0_attrs(const PKCS12_SAFEBAG *bag); +unsigned char *PKCS12_pbe_crypt(const X509_ALGOR *algor, + const char *pass, int passlen, + const unsigned char *in, int inlen, + unsigned char **data, int *datalen, + int en_de); +void *PKCS12_item_decrypt_d2i(const X509_ALGOR *algor, const ASN1_ITEM *it, + const char *pass, int passlen, + const ASN1_OCTET_STRING *oct, int zbuf); +ASN1_OCTET_STRING *PKCS12_item_i2d_encrypt(X509_ALGOR *algor, + const ASN1_ITEM *it, + const char *pass, int passlen, + void *obj, int zbuf); +PKCS12 *PKCS12_init(int mode); +int PKCS12_key_gen_asc(const char *pass, int passlen, unsigned char *salt, + int saltlen, int id, int iter, int n, + unsigned char *out, const EVP_MD *md_type); +int PKCS12_key_gen_uni(unsigned char *pass, int passlen, unsigned char *salt, + int saltlen, int id, int iter, int n, + unsigned char *out, const EVP_MD *md_type); +int PKCS12_key_gen_utf8(const char *pass, int passlen, unsigned char *salt, + int saltlen, int id, int iter, int n, + unsigned char *out, const EVP_MD *md_type); +int PKCS12_PBE_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass, int passlen, + ASN1_TYPE *param, const EVP_CIPHER *cipher, + const EVP_MD *md_type, int en_de); +int PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen, + unsigned char *mac, unsigned int *maclen); +int PKCS12_verify_mac(PKCS12 *p12, const char *pass, int passlen); +int PKCS12_set_mac(PKCS12 *p12, const char *pass, int passlen, + unsigned char *salt, int saltlen, int iter, + const EVP_MD *md_type); +int PKCS12_setup_mac(PKCS12 *p12, int iter, unsigned char *salt, + int saltlen, const EVP_MD *md_type); +unsigned char *OPENSSL_asc2uni(const char *asc, int asclen, + unsigned char **uni, int *unilen); +char *OPENSSL_uni2asc(const unsigned char *uni, int unilen); +unsigned char *OPENSSL_utf82uni(const char *asc, int asclen, + unsigned char **uni, int *unilen); +char *OPENSSL_uni2utf8(const unsigned char *uni, int unilen); + +DECLARE_ASN1_FUNCTIONS(PKCS12) +DECLARE_ASN1_FUNCTIONS(PKCS12_MAC_DATA) +DECLARE_ASN1_FUNCTIONS(PKCS12_SAFEBAG) +DECLARE_ASN1_FUNCTIONS(PKCS12_BAGS) + +DECLARE_ASN1_ITEM(PKCS12_SAFEBAGS) +DECLARE_ASN1_ITEM(PKCS12_AUTHSAFES) + +void PKCS12_PBE_add(void); +int PKCS12_parse(PKCS12 *p12, const char *pass, EVP_PKEY **pkey, X509 **cert, + STACK_OF(X509) **ca); +PKCS12 *PKCS12_create(const char *pass, const char *name, EVP_PKEY *pkey, + X509 *cert, STACK_OF(X509) *ca, int nid_key, int nid_cert, + int iter, int mac_iter, int keytype); + +PKCS12_SAFEBAG *PKCS12_add_cert(STACK_OF(PKCS12_SAFEBAG) **pbags, X509 *cert); +PKCS12_SAFEBAG *PKCS12_add_key(STACK_OF(PKCS12_SAFEBAG) **pbags, + EVP_PKEY *key, int key_usage, int iter, + int key_nid, const char *pass); +int PKCS12_add_safe(STACK_OF(PKCS7) **psafes, STACK_OF(PKCS12_SAFEBAG) *bags, + int safe_nid, int iter, const char *pass); +PKCS12 *PKCS12_add_safes(STACK_OF(PKCS7) *safes, int p7_nid); + +int i2d_PKCS12_bio(BIO *bp, PKCS12 *p12); +# ifndef OPENSSL_NO_STDIO +int i2d_PKCS12_fp(FILE *fp, PKCS12 *p12); +# endif +PKCS12 *d2i_PKCS12_bio(BIO *bp, PKCS12 **p12); +# ifndef OPENSSL_NO_STDIO +PKCS12 *d2i_PKCS12_fp(FILE *fp, PKCS12 **p12); +# endif +int PKCS12_newpass(PKCS12 *p12, const char *oldpass, const char *newpass); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pkcs12err.h b/third_party/openssl/uos/sw_64/include/openssl/pkcs12err.h new file mode 100644 index 00000000..eff5eb26 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pkcs12err.h @@ -0,0 +1,81 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PKCS12ERR_H +# define HEADER_PKCS12ERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_PKCS12_strings(void); + +/* + * PKCS12 function codes. + */ +# define PKCS12_F_OPENSSL_ASC2UNI 121 +# define PKCS12_F_OPENSSL_UNI2ASC 124 +# define PKCS12_F_OPENSSL_UNI2UTF8 127 +# define PKCS12_F_OPENSSL_UTF82UNI 129 +# define PKCS12_F_PKCS12_CREATE 105 +# define PKCS12_F_PKCS12_GEN_MAC 107 +# define PKCS12_F_PKCS12_INIT 109 +# define PKCS12_F_PKCS12_ITEM_DECRYPT_D2I 106 +# define PKCS12_F_PKCS12_ITEM_I2D_ENCRYPT 108 +# define PKCS12_F_PKCS12_ITEM_PACK_SAFEBAG 117 +# define PKCS12_F_PKCS12_KEY_GEN_ASC 110 +# define PKCS12_F_PKCS12_KEY_GEN_UNI 111 +# define PKCS12_F_PKCS12_KEY_GEN_UTF8 116 +# define PKCS12_F_PKCS12_NEWPASS 128 +# define PKCS12_F_PKCS12_PACK_P7DATA 114 +# define PKCS12_F_PKCS12_PACK_P7ENCDATA 115 +# define PKCS12_F_PKCS12_PARSE 118 +# define PKCS12_F_PKCS12_PBE_CRYPT 119 +# define PKCS12_F_PKCS12_PBE_KEYIVGEN 120 +# define PKCS12_F_PKCS12_SAFEBAG_CREATE0_P8INF 112 +# define PKCS12_F_PKCS12_SAFEBAG_CREATE0_PKCS8 113 +# define PKCS12_F_PKCS12_SAFEBAG_CREATE_PKCS8_ENCRYPT 133 +# define PKCS12_F_PKCS12_SETUP_MAC 122 +# define PKCS12_F_PKCS12_SET_MAC 123 +# define PKCS12_F_PKCS12_UNPACK_AUTHSAFES 130 +# define PKCS12_F_PKCS12_UNPACK_P7DATA 131 +# define PKCS12_F_PKCS12_VERIFY_MAC 126 +# define PKCS12_F_PKCS8_ENCRYPT 125 +# define PKCS12_F_PKCS8_SET0_PBE 132 + +/* + * PKCS12 reason codes. + */ +# define PKCS12_R_CANT_PACK_STRUCTURE 100 +# define PKCS12_R_CONTENT_TYPE_NOT_DATA 121 +# define PKCS12_R_DECODE_ERROR 101 +# define PKCS12_R_ENCODE_ERROR 102 +# define PKCS12_R_ENCRYPT_ERROR 103 +# define PKCS12_R_ERROR_SETTING_ENCRYPTED_DATA_TYPE 120 +# define PKCS12_R_INVALID_NULL_ARGUMENT 104 +# define PKCS12_R_INVALID_NULL_PKCS12_POINTER 105 +# define PKCS12_R_IV_GEN_ERROR 106 +# define PKCS12_R_KEY_GEN_ERROR 107 +# define PKCS12_R_MAC_ABSENT 108 +# define PKCS12_R_MAC_GENERATION_ERROR 109 +# define PKCS12_R_MAC_SETUP_ERROR 110 +# define PKCS12_R_MAC_STRING_SET_ERROR 111 +# define PKCS12_R_MAC_VERIFY_FAILURE 113 +# define PKCS12_R_PARSE_ERROR 114 +# define PKCS12_R_PKCS12_ALGOR_CIPHERINIT_ERROR 115 +# define PKCS12_R_PKCS12_CIPHERFINAL_ERROR 116 +# define PKCS12_R_PKCS12_PBE_CRYPT_ERROR 117 +# define PKCS12_R_UNKNOWN_DIGEST_ALGORITHM 118 +# define PKCS12_R_UNSUPPORTED_PKCS12_MODE 119 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pkcs7.h b/third_party/openssl/uos/sw_64/include/openssl/pkcs7.h new file mode 100644 index 00000000..9b66e002 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pkcs7.h @@ -0,0 +1,319 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PKCS7_H +# define HEADER_PKCS7_H + +# include +# include +# include + +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/*- +Encryption_ID DES-CBC +Digest_ID MD5 +Digest_Encryption_ID rsaEncryption +Key_Encryption_ID rsaEncryption +*/ + +typedef struct pkcs7_issuer_and_serial_st { + X509_NAME *issuer; + ASN1_INTEGER *serial; +} PKCS7_ISSUER_AND_SERIAL; + +typedef struct pkcs7_signer_info_st { + ASN1_INTEGER *version; /* version 1 */ + PKCS7_ISSUER_AND_SERIAL *issuer_and_serial; + X509_ALGOR *digest_alg; + STACK_OF(X509_ATTRIBUTE) *auth_attr; /* [ 0 ] */ + X509_ALGOR *digest_enc_alg; + ASN1_OCTET_STRING *enc_digest; + STACK_OF(X509_ATTRIBUTE) *unauth_attr; /* [ 1 ] */ + /* The private key to sign with */ + EVP_PKEY *pkey; +} PKCS7_SIGNER_INFO; + +DEFINE_STACK_OF(PKCS7_SIGNER_INFO) + +typedef struct pkcs7_recip_info_st { + ASN1_INTEGER *version; /* version 0 */ + PKCS7_ISSUER_AND_SERIAL *issuer_and_serial; + X509_ALGOR *key_enc_algor; + ASN1_OCTET_STRING *enc_key; + X509 *cert; /* get the pub-key from this */ +} PKCS7_RECIP_INFO; + +DEFINE_STACK_OF(PKCS7_RECIP_INFO) + +typedef struct pkcs7_signed_st { + ASN1_INTEGER *version; /* version 1 */ + STACK_OF(X509_ALGOR) *md_algs; /* md used */ + STACK_OF(X509) *cert; /* [ 0 ] */ + STACK_OF(X509_CRL) *crl; /* [ 1 ] */ + STACK_OF(PKCS7_SIGNER_INFO) *signer_info; + struct pkcs7_st *contents; +} PKCS7_SIGNED; +/* + * The above structure is very very similar to PKCS7_SIGN_ENVELOPE. How about + * merging the two + */ + +typedef struct pkcs7_enc_content_st { + ASN1_OBJECT *content_type; + X509_ALGOR *algorithm; + ASN1_OCTET_STRING *enc_data; /* [ 0 ] */ + const EVP_CIPHER *cipher; +} PKCS7_ENC_CONTENT; + +typedef struct pkcs7_enveloped_st { + ASN1_INTEGER *version; /* version 0 */ + STACK_OF(PKCS7_RECIP_INFO) *recipientinfo; + PKCS7_ENC_CONTENT *enc_data; +} PKCS7_ENVELOPE; + +typedef struct pkcs7_signedandenveloped_st { + ASN1_INTEGER *version; /* version 1 */ + STACK_OF(X509_ALGOR) *md_algs; /* md used */ + STACK_OF(X509) *cert; /* [ 0 ] */ + STACK_OF(X509_CRL) *crl; /* [ 1 ] */ + STACK_OF(PKCS7_SIGNER_INFO) *signer_info; + PKCS7_ENC_CONTENT *enc_data; + STACK_OF(PKCS7_RECIP_INFO) *recipientinfo; +} PKCS7_SIGN_ENVELOPE; + +typedef struct pkcs7_digest_st { + ASN1_INTEGER *version; /* version 0 */ + X509_ALGOR *md; /* md used */ + struct pkcs7_st *contents; + ASN1_OCTET_STRING *digest; +} PKCS7_DIGEST; + +typedef struct pkcs7_encrypted_st { + ASN1_INTEGER *version; /* version 0 */ + PKCS7_ENC_CONTENT *enc_data; +} PKCS7_ENCRYPT; + +typedef struct pkcs7_st { + /* + * The following is non NULL if it contains ASN1 encoding of this + * structure + */ + unsigned char *asn1; + long length; +# define PKCS7_S_HEADER 0 +# define PKCS7_S_BODY 1 +# define PKCS7_S_TAIL 2 + int state; /* used during processing */ + int detached; + ASN1_OBJECT *type; + /* content as defined by the type */ + /* + * all encryption/message digests are applied to the 'contents', leaving + * out the 'type' field. + */ + union { + char *ptr; + /* NID_pkcs7_data */ + ASN1_OCTET_STRING *data; + /* NID_pkcs7_signed */ + PKCS7_SIGNED *sign; + /* NID_pkcs7_enveloped */ + PKCS7_ENVELOPE *enveloped; + /* NID_pkcs7_signedAndEnveloped */ + PKCS7_SIGN_ENVELOPE *signed_and_enveloped; + /* NID_pkcs7_digest */ + PKCS7_DIGEST *digest; + /* NID_pkcs7_encrypted */ + PKCS7_ENCRYPT *encrypted; + /* Anything else */ + ASN1_TYPE *other; + } d; +} PKCS7; + +DEFINE_STACK_OF(PKCS7) + +# define PKCS7_OP_SET_DETACHED_SIGNATURE 1 +# define PKCS7_OP_GET_DETACHED_SIGNATURE 2 + +# define PKCS7_get_signed_attributes(si) ((si)->auth_attr) +# define PKCS7_get_attributes(si) ((si)->unauth_attr) + +# define PKCS7_type_is_signed(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_signed) +# define PKCS7_type_is_encrypted(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_encrypted) +# define PKCS7_type_is_enveloped(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_enveloped) +# define PKCS7_type_is_signedAndEnveloped(a) \ + (OBJ_obj2nid((a)->type) == NID_pkcs7_signedAndEnveloped) +# define PKCS7_type_is_data(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_data) +# define PKCS7_type_is_digest(a) (OBJ_obj2nid((a)->type) == NID_pkcs7_digest) + +# define PKCS7_set_detached(p,v) \ + PKCS7_ctrl(p,PKCS7_OP_SET_DETACHED_SIGNATURE,v,NULL) +# define PKCS7_get_detached(p) \ + PKCS7_ctrl(p,PKCS7_OP_GET_DETACHED_SIGNATURE,0,NULL) + +# define PKCS7_is_detached(p7) (PKCS7_type_is_signed(p7) && PKCS7_get_detached(p7)) + +/* S/MIME related flags */ + +# define PKCS7_TEXT 0x1 +# define PKCS7_NOCERTS 0x2 +# define PKCS7_NOSIGS 0x4 +# define PKCS7_NOCHAIN 0x8 +# define PKCS7_NOINTERN 0x10 +# define PKCS7_NOVERIFY 0x20 +# define PKCS7_DETACHED 0x40 +# define PKCS7_BINARY 0x80 +# define PKCS7_NOATTR 0x100 +# define PKCS7_NOSMIMECAP 0x200 +# define PKCS7_NOOLDMIMETYPE 0x400 +# define PKCS7_CRLFEOL 0x800 +# define PKCS7_STREAM 0x1000 +# define PKCS7_NOCRL 0x2000 +# define PKCS7_PARTIAL 0x4000 +# define PKCS7_REUSE_DIGEST 0x8000 +# define PKCS7_NO_DUAL_CONTENT 0x10000 + +/* Flags: for compatibility with older code */ + +# define SMIME_TEXT PKCS7_TEXT +# define SMIME_NOCERTS PKCS7_NOCERTS +# define SMIME_NOSIGS PKCS7_NOSIGS +# define SMIME_NOCHAIN PKCS7_NOCHAIN +# define SMIME_NOINTERN PKCS7_NOINTERN +# define SMIME_NOVERIFY PKCS7_NOVERIFY +# define SMIME_DETACHED PKCS7_DETACHED +# define SMIME_BINARY PKCS7_BINARY +# define SMIME_NOATTR PKCS7_NOATTR + +/* CRLF ASCII canonicalisation */ +# define SMIME_ASCIICRLF 0x80000 + +DECLARE_ASN1_FUNCTIONS(PKCS7_ISSUER_AND_SERIAL) + +int PKCS7_ISSUER_AND_SERIAL_digest(PKCS7_ISSUER_AND_SERIAL *data, + const EVP_MD *type, unsigned char *md, + unsigned int *len); +# ifndef OPENSSL_NO_STDIO +PKCS7 *d2i_PKCS7_fp(FILE *fp, PKCS7 **p7); +int i2d_PKCS7_fp(FILE *fp, PKCS7 *p7); +# endif +PKCS7 *PKCS7_dup(PKCS7 *p7); +PKCS7 *d2i_PKCS7_bio(BIO *bp, PKCS7 **p7); +int i2d_PKCS7_bio(BIO *bp, PKCS7 *p7); +int i2d_PKCS7_bio_stream(BIO *out, PKCS7 *p7, BIO *in, int flags); +int PEM_write_bio_PKCS7_stream(BIO *out, PKCS7 *p7, BIO *in, int flags); + +DECLARE_ASN1_FUNCTIONS(PKCS7_SIGNER_INFO) +DECLARE_ASN1_FUNCTIONS(PKCS7_RECIP_INFO) +DECLARE_ASN1_FUNCTIONS(PKCS7_SIGNED) +DECLARE_ASN1_FUNCTIONS(PKCS7_ENC_CONTENT) +DECLARE_ASN1_FUNCTIONS(PKCS7_ENVELOPE) +DECLARE_ASN1_FUNCTIONS(PKCS7_SIGN_ENVELOPE) +DECLARE_ASN1_FUNCTIONS(PKCS7_DIGEST) +DECLARE_ASN1_FUNCTIONS(PKCS7_ENCRYPT) +DECLARE_ASN1_FUNCTIONS(PKCS7) + +DECLARE_ASN1_ITEM(PKCS7_ATTR_SIGN) +DECLARE_ASN1_ITEM(PKCS7_ATTR_VERIFY) + +DECLARE_ASN1_NDEF_FUNCTION(PKCS7) +DECLARE_ASN1_PRINT_FUNCTION(PKCS7) + +long PKCS7_ctrl(PKCS7 *p7, int cmd, long larg, char *parg); + +int PKCS7_set_type(PKCS7 *p7, int type); +int PKCS7_set0_type_other(PKCS7 *p7, int type, ASN1_TYPE *other); +int PKCS7_set_content(PKCS7 *p7, PKCS7 *p7_data); +int PKCS7_SIGNER_INFO_set(PKCS7_SIGNER_INFO *p7i, X509 *x509, EVP_PKEY *pkey, + const EVP_MD *dgst); +int PKCS7_SIGNER_INFO_sign(PKCS7_SIGNER_INFO *si); +int PKCS7_add_signer(PKCS7 *p7, PKCS7_SIGNER_INFO *p7i); +int PKCS7_add_certificate(PKCS7 *p7, X509 *x509); +int PKCS7_add_crl(PKCS7 *p7, X509_CRL *x509); +int PKCS7_content_new(PKCS7 *p7, int nid); +int PKCS7_dataVerify(X509_STORE *cert_store, X509_STORE_CTX *ctx, + BIO *bio, PKCS7 *p7, PKCS7_SIGNER_INFO *si); +int PKCS7_signatureVerify(BIO *bio, PKCS7 *p7, PKCS7_SIGNER_INFO *si, + X509 *x509); + +BIO *PKCS7_dataInit(PKCS7 *p7, BIO *bio); +int PKCS7_dataFinal(PKCS7 *p7, BIO *bio); +BIO *PKCS7_dataDecode(PKCS7 *p7, EVP_PKEY *pkey, BIO *in_bio, X509 *pcert); + +PKCS7_SIGNER_INFO *PKCS7_add_signature(PKCS7 *p7, X509 *x509, + EVP_PKEY *pkey, const EVP_MD *dgst); +X509 *PKCS7_cert_from_signer_info(PKCS7 *p7, PKCS7_SIGNER_INFO *si); +int PKCS7_set_digest(PKCS7 *p7, const EVP_MD *md); +STACK_OF(PKCS7_SIGNER_INFO) *PKCS7_get_signer_info(PKCS7 *p7); + +PKCS7_RECIP_INFO *PKCS7_add_recipient(PKCS7 *p7, X509 *x509); +void PKCS7_SIGNER_INFO_get0_algs(PKCS7_SIGNER_INFO *si, EVP_PKEY **pk, + X509_ALGOR **pdig, X509_ALGOR **psig); +void PKCS7_RECIP_INFO_get0_alg(PKCS7_RECIP_INFO *ri, X509_ALGOR **penc); +int PKCS7_add_recipient_info(PKCS7 *p7, PKCS7_RECIP_INFO *ri); +int PKCS7_RECIP_INFO_set(PKCS7_RECIP_INFO *p7i, X509 *x509); +int PKCS7_set_cipher(PKCS7 *p7, const EVP_CIPHER *cipher); +int PKCS7_stream(unsigned char ***boundary, PKCS7 *p7); + +PKCS7_ISSUER_AND_SERIAL *PKCS7_get_issuer_and_serial(PKCS7 *p7, int idx); +ASN1_OCTET_STRING *PKCS7_digest_from_attributes(STACK_OF(X509_ATTRIBUTE) *sk); +int PKCS7_add_signed_attribute(PKCS7_SIGNER_INFO *p7si, int nid, int type, + void *data); +int PKCS7_add_attribute(PKCS7_SIGNER_INFO *p7si, int nid, int atrtype, + void *value); +ASN1_TYPE *PKCS7_get_attribute(PKCS7_SIGNER_INFO *si, int nid); +ASN1_TYPE *PKCS7_get_signed_attribute(PKCS7_SIGNER_INFO *si, int nid); +int PKCS7_set_signed_attributes(PKCS7_SIGNER_INFO *p7si, + STACK_OF(X509_ATTRIBUTE) *sk); +int PKCS7_set_attributes(PKCS7_SIGNER_INFO *p7si, + STACK_OF(X509_ATTRIBUTE) *sk); + +PKCS7 *PKCS7_sign(X509 *signcert, EVP_PKEY *pkey, STACK_OF(X509) *certs, + BIO *data, int flags); + +PKCS7_SIGNER_INFO *PKCS7_sign_add_signer(PKCS7 *p7, + X509 *signcert, EVP_PKEY *pkey, + const EVP_MD *md, int flags); + +int PKCS7_final(PKCS7 *p7, BIO *data, int flags); +int PKCS7_verify(PKCS7 *p7, STACK_OF(X509) *certs, X509_STORE *store, + BIO *indata, BIO *out, int flags); +STACK_OF(X509) *PKCS7_get0_signers(PKCS7 *p7, STACK_OF(X509) *certs, + int flags); +PKCS7 *PKCS7_encrypt(STACK_OF(X509) *certs, BIO *in, const EVP_CIPHER *cipher, + int flags); +int PKCS7_decrypt(PKCS7 *p7, EVP_PKEY *pkey, X509 *cert, BIO *data, + int flags); + +int PKCS7_add_attrib_smimecap(PKCS7_SIGNER_INFO *si, + STACK_OF(X509_ALGOR) *cap); +STACK_OF(X509_ALGOR) *PKCS7_get_smimecap(PKCS7_SIGNER_INFO *si); +int PKCS7_simple_smimecap(STACK_OF(X509_ALGOR) *sk, int nid, int arg); + +int PKCS7_add_attrib_content_type(PKCS7_SIGNER_INFO *si, ASN1_OBJECT *coid); +int PKCS7_add0_attrib_signing_time(PKCS7_SIGNER_INFO *si, ASN1_TIME *t); +int PKCS7_add1_attrib_digest(PKCS7_SIGNER_INFO *si, + const unsigned char *md, int mdlen); + +int SMIME_write_PKCS7(BIO *bio, PKCS7 *p7, BIO *data, int flags); +PKCS7 *SMIME_read_PKCS7(BIO *bio, BIO **bcont); + +BIO *BIO_new_PKCS7(BIO *out, PKCS7 *p7); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/pkcs7err.h b/third_party/openssl/uos/sw_64/include/openssl/pkcs7err.h new file mode 100644 index 00000000..02e0299a --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/pkcs7err.h @@ -0,0 +1,103 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_PKCS7ERR_H +# define HEADER_PKCS7ERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_PKCS7_strings(void); + +/* + * PKCS7 function codes. + */ +# define PKCS7_F_DO_PKCS7_SIGNED_ATTRIB 136 +# define PKCS7_F_PKCS7_ADD0_ATTRIB_SIGNING_TIME 135 +# define PKCS7_F_PKCS7_ADD_ATTRIB_SMIMECAP 118 +# define PKCS7_F_PKCS7_ADD_CERTIFICATE 100 +# define PKCS7_F_PKCS7_ADD_CRL 101 +# define PKCS7_F_PKCS7_ADD_RECIPIENT_INFO 102 +# define PKCS7_F_PKCS7_ADD_SIGNATURE 131 +# define PKCS7_F_PKCS7_ADD_SIGNER 103 +# define PKCS7_F_PKCS7_BIO_ADD_DIGEST 125 +# define PKCS7_F_PKCS7_COPY_EXISTING_DIGEST 138 +# define PKCS7_F_PKCS7_CTRL 104 +# define PKCS7_F_PKCS7_DATADECODE 112 +# define PKCS7_F_PKCS7_DATAFINAL 128 +# define PKCS7_F_PKCS7_DATAINIT 105 +# define PKCS7_F_PKCS7_DATAVERIFY 107 +# define PKCS7_F_PKCS7_DECRYPT 114 +# define PKCS7_F_PKCS7_DECRYPT_RINFO 133 +# define PKCS7_F_PKCS7_ENCODE_RINFO 132 +# define PKCS7_F_PKCS7_ENCRYPT 115 +# define PKCS7_F_PKCS7_FINAL 134 +# define PKCS7_F_PKCS7_FIND_DIGEST 127 +# define PKCS7_F_PKCS7_GET0_SIGNERS 124 +# define PKCS7_F_PKCS7_RECIP_INFO_SET 130 +# define PKCS7_F_PKCS7_SET_CIPHER 108 +# define PKCS7_F_PKCS7_SET_CONTENT 109 +# define PKCS7_F_PKCS7_SET_DIGEST 126 +# define PKCS7_F_PKCS7_SET_TYPE 110 +# define PKCS7_F_PKCS7_SIGN 116 +# define PKCS7_F_PKCS7_SIGNATUREVERIFY 113 +# define PKCS7_F_PKCS7_SIGNER_INFO_SET 129 +# define PKCS7_F_PKCS7_SIGNER_INFO_SIGN 139 +# define PKCS7_F_PKCS7_SIGN_ADD_SIGNER 137 +# define PKCS7_F_PKCS7_SIMPLE_SMIMECAP 119 +# define PKCS7_F_PKCS7_VERIFY 117 + +/* + * PKCS7 reason codes. + */ +# define PKCS7_R_CERTIFICATE_VERIFY_ERROR 117 +# define PKCS7_R_CIPHER_HAS_NO_OBJECT_IDENTIFIER 144 +# define PKCS7_R_CIPHER_NOT_INITIALIZED 116 +# define PKCS7_R_CONTENT_AND_DATA_PRESENT 118 +# define PKCS7_R_CTRL_ERROR 152 +# define PKCS7_R_DECRYPT_ERROR 119 +# define PKCS7_R_DIGEST_FAILURE 101 +# define PKCS7_R_ENCRYPTION_CTRL_FAILURE 149 +# define PKCS7_R_ENCRYPTION_NOT_SUPPORTED_FOR_THIS_KEY_TYPE 150 +# define PKCS7_R_ERROR_ADDING_RECIPIENT 120 +# define PKCS7_R_ERROR_SETTING_CIPHER 121 +# define PKCS7_R_INVALID_NULL_POINTER 143 +# define PKCS7_R_INVALID_SIGNED_DATA_TYPE 155 +# define PKCS7_R_NO_CONTENT 122 +# define PKCS7_R_NO_DEFAULT_DIGEST 151 +# define PKCS7_R_NO_MATCHING_DIGEST_TYPE_FOUND 154 +# define PKCS7_R_NO_RECIPIENT_MATCHES_CERTIFICATE 115 +# define PKCS7_R_NO_SIGNATURES_ON_DATA 123 +# define PKCS7_R_NO_SIGNERS 142 +# define PKCS7_R_OPERATION_NOT_SUPPORTED_ON_THIS_TYPE 104 +# define PKCS7_R_PKCS7_ADD_SIGNATURE_ERROR 124 +# define PKCS7_R_PKCS7_ADD_SIGNER_ERROR 153 +# define PKCS7_R_PKCS7_DATASIGN 145 +# define PKCS7_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE 127 +# define PKCS7_R_SIGNATURE_FAILURE 105 +# define PKCS7_R_SIGNER_CERTIFICATE_NOT_FOUND 128 +# define PKCS7_R_SIGNING_CTRL_FAILURE 147 +# define PKCS7_R_SIGNING_NOT_SUPPORTED_FOR_THIS_KEY_TYPE 148 +# define PKCS7_R_SMIME_TEXT_ERROR 129 +# define PKCS7_R_UNABLE_TO_FIND_CERTIFICATE 106 +# define PKCS7_R_UNABLE_TO_FIND_MEM_BIO 107 +# define PKCS7_R_UNABLE_TO_FIND_MESSAGE_DIGEST 108 +# define PKCS7_R_UNKNOWN_DIGEST_TYPE 109 +# define PKCS7_R_UNKNOWN_OPERATION 110 +# define PKCS7_R_UNSUPPORTED_CIPHER_TYPE 111 +# define PKCS7_R_UNSUPPORTED_CONTENT_TYPE 112 +# define PKCS7_R_WRONG_CONTENT_TYPE 113 +# define PKCS7_R_WRONG_PKCS7_TYPE 114 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rand.h b/third_party/openssl/uos/sw_64/include/openssl/rand.h new file mode 100644 index 00000000..38a2a271 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rand.h @@ -0,0 +1,77 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RAND_H +# define HEADER_RAND_H + +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +struct rand_meth_st { + int (*seed) (const void *buf, int num); + int (*bytes) (unsigned char *buf, int num); + void (*cleanup) (void); + int (*add) (const void *buf, int num, double randomness); + int (*pseudorand) (unsigned char *buf, int num); + int (*status) (void); +}; + +int RAND_set_rand_method(const RAND_METHOD *meth); +const RAND_METHOD *RAND_get_rand_method(void); +# ifndef OPENSSL_NO_ENGINE +int RAND_set_rand_engine(ENGINE *engine); +# endif + +RAND_METHOD *RAND_OpenSSL(void); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define RAND_cleanup() while(0) continue +# endif +int RAND_bytes(unsigned char *buf, int num); +int RAND_priv_bytes(unsigned char *buf, int num); +DEPRECATEDIN_1_1_0(int RAND_pseudo_bytes(unsigned char *buf, int num)) + +void RAND_seed(const void *buf, int num); +void RAND_keep_random_devices_open(int keep); + +# if defined(__ANDROID__) && defined(__NDK_FPABI__) +__NDK_FPABI__ /* __attribute__((pcs("aapcs"))) on ARM */ +# endif +void RAND_add(const void *buf, int num, double randomness); +int RAND_load_file(const char *file, long max_bytes); +int RAND_write_file(const char *file); +const char *RAND_file_name(char *file, size_t num); +int RAND_status(void); + +# ifndef OPENSSL_NO_EGD +int RAND_query_egd_bytes(const char *path, unsigned char *buf, int bytes); +int RAND_egd(const char *path); +int RAND_egd_bytes(const char *path, int bytes); +# endif + +int RAND_poll(void); + +# if defined(_WIN32) && (defined(BASETYPES) || defined(_WINDEF_H)) +/* application has to include in order to use these */ +DEPRECATEDIN_1_1_0(void RAND_screen(void)) +DEPRECATEDIN_1_1_0(int RAND_event(UINT, WPARAM, LPARAM)) +# endif + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rand_drbg.h b/third_party/openssl/uos/sw_64/include/openssl/rand_drbg.h new file mode 100644 index 00000000..45b731b7 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rand_drbg.h @@ -0,0 +1,130 @@ +/* + * Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_DRBG_RAND_H +# define HEADER_DRBG_RAND_H + +# include +# include +# include + +/* + * RAND_DRBG flags + * + * Note: if new flags are added, the constant `rand_drbg_used_flags` + * in drbg_lib.c needs to be updated accordingly. + */ + +/* In CTR mode, disable derivation function ctr_df */ +# define RAND_DRBG_FLAG_CTR_NO_DF 0x1 + + +# if OPENSSL_API_COMPAT < 0x10200000L +/* This #define was replaced by an internal constant and should not be used. */ +# define RAND_DRBG_USED_FLAGS (RAND_DRBG_FLAG_CTR_NO_DF) +# endif + +/* + * Default security strength (in the sense of [NIST SP 800-90Ar1]) + * + * NIST SP 800-90Ar1 supports the strength of the DRBG being smaller than that + * of the cipher by collecting less entropy. The current DRBG implementation + * does not take RAND_DRBG_STRENGTH into account and sets the strength of the + * DRBG to that of the cipher. + * + * RAND_DRBG_STRENGTH is currently only used for the legacy RAND + * implementation. + * + * Currently supported ciphers are: NID_aes_128_ctr, NID_aes_192_ctr and + * NID_aes_256_ctr + */ +# define RAND_DRBG_STRENGTH 256 +/* Default drbg type */ +# define RAND_DRBG_TYPE NID_aes_256_ctr +/* Default drbg flags */ +# define RAND_DRBG_FLAGS 0 + + +# ifdef __cplusplus +extern "C" { +# endif + +/* + * Object lifetime functions. + */ +RAND_DRBG *RAND_DRBG_new(int type, unsigned int flags, RAND_DRBG *parent); +RAND_DRBG *RAND_DRBG_secure_new(int type, unsigned int flags, RAND_DRBG *parent); +int RAND_DRBG_set(RAND_DRBG *drbg, int type, unsigned int flags); +int RAND_DRBG_set_defaults(int type, unsigned int flags); +int RAND_DRBG_instantiate(RAND_DRBG *drbg, + const unsigned char *pers, size_t perslen); +int RAND_DRBG_uninstantiate(RAND_DRBG *drbg); +void RAND_DRBG_free(RAND_DRBG *drbg); + +/* + * Object "use" functions. + */ +int RAND_DRBG_reseed(RAND_DRBG *drbg, + const unsigned char *adin, size_t adinlen, + int prediction_resistance); +int RAND_DRBG_generate(RAND_DRBG *drbg, unsigned char *out, size_t outlen, + int prediction_resistance, + const unsigned char *adin, size_t adinlen); +int RAND_DRBG_bytes(RAND_DRBG *drbg, unsigned char *out, size_t outlen); + +int RAND_DRBG_set_reseed_interval(RAND_DRBG *drbg, unsigned int interval); +int RAND_DRBG_set_reseed_time_interval(RAND_DRBG *drbg, time_t interval); + +int RAND_DRBG_set_reseed_defaults( + unsigned int master_reseed_interval, + unsigned int slave_reseed_interval, + time_t master_reseed_time_interval, + time_t slave_reseed_time_interval + ); + +RAND_DRBG *RAND_DRBG_get0_master(void); +RAND_DRBG *RAND_DRBG_get0_public(void); +RAND_DRBG *RAND_DRBG_get0_private(void); + +/* + * EXDATA + */ +# define RAND_DRBG_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_DRBG, l, p, newf, dupf, freef) +int RAND_DRBG_set_ex_data(RAND_DRBG *drbg, int idx, void *arg); +void *RAND_DRBG_get_ex_data(const RAND_DRBG *drbg, int idx); + +/* + * Callback function typedefs + */ +typedef size_t (*RAND_DRBG_get_entropy_fn)(RAND_DRBG *drbg, + unsigned char **pout, + int entropy, size_t min_len, + size_t max_len, + int prediction_resistance); +typedef void (*RAND_DRBG_cleanup_entropy_fn)(RAND_DRBG *ctx, + unsigned char *out, size_t outlen); +typedef size_t (*RAND_DRBG_get_nonce_fn)(RAND_DRBG *drbg, unsigned char **pout, + int entropy, size_t min_len, + size_t max_len); +typedef void (*RAND_DRBG_cleanup_nonce_fn)(RAND_DRBG *drbg, + unsigned char *out, size_t outlen); + +int RAND_DRBG_set_callbacks(RAND_DRBG *drbg, + RAND_DRBG_get_entropy_fn get_entropy, + RAND_DRBG_cleanup_entropy_fn cleanup_entropy, + RAND_DRBG_get_nonce_fn get_nonce, + RAND_DRBG_cleanup_nonce_fn cleanup_nonce); + + +# ifdef __cplusplus +} +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/randerr.h b/third_party/openssl/uos/sw_64/include/openssl/randerr.h new file mode 100644 index 00000000..79d57905 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/randerr.h @@ -0,0 +1,94 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RANDERR_H +# define HEADER_RANDERR_H + +# include + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_RAND_strings(void); + +/* + * RAND function codes. + */ +# define RAND_F_DATA_COLLECT_METHOD 127 +# define RAND_F_DRBG_BYTES 101 +# define RAND_F_DRBG_GET_ENTROPY 105 +# define RAND_F_DRBG_SETUP 117 +# define RAND_F_GET_ENTROPY 106 +# define RAND_F_RAND_BYTES 100 +# define RAND_F_RAND_DRBG_ENABLE_LOCKING 119 +# define RAND_F_RAND_DRBG_GENERATE 107 +# define RAND_F_RAND_DRBG_GET_ENTROPY 120 +# define RAND_F_RAND_DRBG_GET_NONCE 123 +# define RAND_F_RAND_DRBG_INSTANTIATE 108 +# define RAND_F_RAND_DRBG_NEW 109 +# define RAND_F_RAND_DRBG_RESEED 110 +# define RAND_F_RAND_DRBG_RESTART 102 +# define RAND_F_RAND_DRBG_SET 104 +# define RAND_F_RAND_DRBG_SET_DEFAULTS 121 +# define RAND_F_RAND_DRBG_UNINSTANTIATE 118 +# define RAND_F_RAND_LOAD_FILE 111 +# define RAND_F_RAND_POOL_ACQUIRE_ENTROPY 122 +# define RAND_F_RAND_POOL_ADD 103 +# define RAND_F_RAND_POOL_ADD_BEGIN 113 +# define RAND_F_RAND_POOL_ADD_END 114 +# define RAND_F_RAND_POOL_ATTACH 124 +# define RAND_F_RAND_POOL_BYTES_NEEDED 115 +# define RAND_F_RAND_POOL_GROW 125 +# define RAND_F_RAND_POOL_NEW 116 +# define RAND_F_RAND_PSEUDO_BYTES 126 +# define RAND_F_RAND_WRITE_FILE 112 + +/* + * RAND reason codes. + */ +# define RAND_R_ADDITIONAL_INPUT_TOO_LONG 102 +# define RAND_R_ALREADY_INSTANTIATED 103 +# define RAND_R_ARGUMENT_OUT_OF_RANGE 105 +# define RAND_R_CANNOT_OPEN_FILE 121 +# define RAND_R_DRBG_ALREADY_INITIALIZED 129 +# define RAND_R_DRBG_NOT_INITIALISED 104 +# define RAND_R_ENTROPY_INPUT_TOO_LONG 106 +# define RAND_R_ENTROPY_OUT_OF_RANGE 124 +# define RAND_R_ERROR_ENTROPY_POOL_WAS_IGNORED 127 +# define RAND_R_ERROR_INITIALISING_DRBG 107 +# define RAND_R_ERROR_INSTANTIATING_DRBG 108 +# define RAND_R_ERROR_RETRIEVING_ADDITIONAL_INPUT 109 +# define RAND_R_ERROR_RETRIEVING_ENTROPY 110 +# define RAND_R_ERROR_RETRIEVING_NONCE 111 +# define RAND_R_FAILED_TO_CREATE_LOCK 126 +# define RAND_R_FUNC_NOT_IMPLEMENTED 101 +# define RAND_R_FWRITE_ERROR 123 +# define RAND_R_GENERATE_ERROR 112 +# define RAND_R_INTERNAL_ERROR 113 +# define RAND_R_IN_ERROR_STATE 114 +# define RAND_R_NOT_A_REGULAR_FILE 122 +# define RAND_R_NOT_INSTANTIATED 115 +# define RAND_R_NO_DRBG_IMPLEMENTATION_SELECTED 128 +# define RAND_R_PARENT_LOCKING_NOT_ENABLED 130 +# define RAND_R_PARENT_STRENGTH_TOO_WEAK 131 +# define RAND_R_PERSONALISATION_STRING_TOO_LONG 116 +# define RAND_R_PREDICTION_RESISTANCE_NOT_SUPPORTED 133 +# define RAND_R_PRNG_NOT_SEEDED 100 +# define RAND_R_RANDOM_POOL_OVERFLOW 125 +# define RAND_R_RANDOM_POOL_UNDERFLOW 134 +# define RAND_R_REQUEST_TOO_LARGE_FOR_DRBG 117 +# define RAND_R_RESEED_ERROR 118 +# define RAND_R_SELFTEST_FAILURE 119 +# define RAND_R_TOO_LITTLE_NONCE_REQUESTED 135 +# define RAND_R_TOO_MUCH_NONCE_REQUESTED 136 +# define RAND_R_UNSUPPORTED_DRBG_FLAGS 132 +# define RAND_R_UNSUPPORTED_DRBG_TYPE 120 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rc2.h b/third_party/openssl/uos/sw_64/include/openssl/rc2.h new file mode 100644 index 00000000..585f9e4c --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rc2.h @@ -0,0 +1,51 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RC2_H +# define HEADER_RC2_H + +# include + +# ifndef OPENSSL_NO_RC2 +# ifdef __cplusplus +extern "C" { +# endif + +typedef unsigned int RC2_INT; + +# define RC2_ENCRYPT 1 +# define RC2_DECRYPT 0 + +# define RC2_BLOCK 8 +# define RC2_KEY_LENGTH 16 + +typedef struct rc2_key_st { + RC2_INT data[64]; +} RC2_KEY; + +void RC2_set_key(RC2_KEY *key, int len, const unsigned char *data, int bits); +void RC2_ecb_encrypt(const unsigned char *in, unsigned char *out, + RC2_KEY *key, int enc); +void RC2_encrypt(unsigned long *data, RC2_KEY *key); +void RC2_decrypt(unsigned long *data, RC2_KEY *key); +void RC2_cbc_encrypt(const unsigned char *in, unsigned char *out, long length, + RC2_KEY *ks, unsigned char *iv, int enc); +void RC2_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, RC2_KEY *schedule, unsigned char *ivec, + int *num, int enc); +void RC2_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, RC2_KEY *schedule, unsigned char *ivec, + int *num); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rc4.h b/third_party/openssl/uos/sw_64/include/openssl/rc4.h new file mode 100644 index 00000000..86803b37 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rc4.h @@ -0,0 +1,36 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RC4_H +# define HEADER_RC4_H + +# include + +# ifndef OPENSSL_NO_RC4 +# include +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct rc4_key_st { + RC4_INT x, y; + RC4_INT data[256]; +} RC4_KEY; + +const char *RC4_options(void); +void RC4_set_key(RC4_KEY *key, int len, const unsigned char *data); +void RC4(RC4_KEY *key, size_t len, const unsigned char *indata, + unsigned char *outdata); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rc5.h b/third_party/openssl/uos/sw_64/include/openssl/rc5.h new file mode 100644 index 00000000..793f88e4 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rc5.h @@ -0,0 +1,63 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RC5_H +# define HEADER_RC5_H + +# include + +# ifndef OPENSSL_NO_RC5 +# ifdef __cplusplus +extern "C" { +# endif + +# define RC5_ENCRYPT 1 +# define RC5_DECRYPT 0 + +# define RC5_32_INT unsigned int + +# define RC5_32_BLOCK 8 +# define RC5_32_KEY_LENGTH 16/* This is a default, max is 255 */ + +/* + * This are the only values supported. Tweak the code if you want more The + * most supported modes will be RC5-32/12/16 RC5-32/16/8 + */ +# define RC5_8_ROUNDS 8 +# define RC5_12_ROUNDS 12 +# define RC5_16_ROUNDS 16 + +typedef struct rc5_key_st { + /* Number of rounds */ + int rounds; + RC5_32_INT data[2 * (RC5_16_ROUNDS + 1)]; +} RC5_32_KEY; + +void RC5_32_set_key(RC5_32_KEY *key, int len, const unsigned char *data, + int rounds); +void RC5_32_ecb_encrypt(const unsigned char *in, unsigned char *out, + RC5_32_KEY *key, int enc); +void RC5_32_encrypt(unsigned long *data, RC5_32_KEY *key); +void RC5_32_decrypt(unsigned long *data, RC5_32_KEY *key); +void RC5_32_cbc_encrypt(const unsigned char *in, unsigned char *out, + long length, RC5_32_KEY *ks, unsigned char *iv, + int enc); +void RC5_32_cfb64_encrypt(const unsigned char *in, unsigned char *out, + long length, RC5_32_KEY *schedule, + unsigned char *ivec, int *num, int enc); +void RC5_32_ofb64_encrypt(const unsigned char *in, unsigned char *out, + long length, RC5_32_KEY *schedule, + unsigned char *ivec, int *num); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ripemd.h b/third_party/openssl/uos/sw_64/include/openssl/ripemd.h new file mode 100644 index 00000000..c42026aa --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ripemd.h @@ -0,0 +1,47 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RIPEMD_H +# define HEADER_RIPEMD_H + +# include + +#ifndef OPENSSL_NO_RMD160 +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +# define RIPEMD160_LONG unsigned int + +# define RIPEMD160_CBLOCK 64 +# define RIPEMD160_LBLOCK (RIPEMD160_CBLOCK/4) +# define RIPEMD160_DIGEST_LENGTH 20 + +typedef struct RIPEMD160state_st { + RIPEMD160_LONG A, B, C, D, E; + RIPEMD160_LONG Nl, Nh; + RIPEMD160_LONG data[RIPEMD160_LBLOCK]; + unsigned int num; +} RIPEMD160_CTX; + +int RIPEMD160_Init(RIPEMD160_CTX *c); +int RIPEMD160_Update(RIPEMD160_CTX *c, const void *data, size_t len); +int RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c); +unsigned char *RIPEMD160(const unsigned char *d, size_t n, unsigned char *md); +void RIPEMD160_Transform(RIPEMD160_CTX *c, const unsigned char *b); + +# ifdef __cplusplus +} +# endif +# endif + + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rsa.h b/third_party/openssl/uos/sw_64/include/openssl/rsa.h new file mode 100644 index 00000000..5e76365c --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rsa.h @@ -0,0 +1,513 @@ +/* + * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RSA_H +# define HEADER_RSA_H + +# include + +# ifndef OPENSSL_NO_RSA +# include +# include +# include +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif +# include +# ifdef __cplusplus +extern "C" { +# endif + +/* The types RSA and RSA_METHOD are defined in ossl_typ.h */ + +# ifndef OPENSSL_RSA_MAX_MODULUS_BITS +# define OPENSSL_RSA_MAX_MODULUS_BITS 16384 +# endif + +# define OPENSSL_RSA_FIPS_MIN_MODULUS_BITS 1024 + +# ifndef OPENSSL_RSA_SMALL_MODULUS_BITS +# define OPENSSL_RSA_SMALL_MODULUS_BITS 3072 +# endif +# ifndef OPENSSL_RSA_MAX_PUBEXP_BITS + +/* exponent limit enforced for "large" modulus only */ +# define OPENSSL_RSA_MAX_PUBEXP_BITS 64 +# endif + +# define RSA_3 0x3L +# define RSA_F4 0x10001L + +/* based on RFC 8017 appendix A.1.2 */ +# define RSA_ASN1_VERSION_DEFAULT 0 +# define RSA_ASN1_VERSION_MULTI 1 + +# define RSA_DEFAULT_PRIME_NUM 2 + +# define RSA_METHOD_FLAG_NO_CHECK 0x0001/* don't check pub/private + * match */ + +# define RSA_FLAG_CACHE_PUBLIC 0x0002 +# define RSA_FLAG_CACHE_PRIVATE 0x0004 +# define RSA_FLAG_BLINDING 0x0008 +# define RSA_FLAG_THREAD_SAFE 0x0010 +/* + * This flag means the private key operations will be handled by rsa_mod_exp + * and that they do not depend on the private key components being present: + * for example a key stored in external hardware. Without this flag + * bn_mod_exp gets called when private key components are absent. + */ +# define RSA_FLAG_EXT_PKEY 0x0020 + +/* + * new with 0.9.6j and 0.9.7b; the built-in + * RSA implementation now uses blinding by + * default (ignoring RSA_FLAG_BLINDING), + * but other engines might not need it + */ +# define RSA_FLAG_NO_BLINDING 0x0080 +# if OPENSSL_API_COMPAT < 0x10100000L +/* + * Does nothing. Previously this switched off constant time behaviour. + */ +# define RSA_FLAG_NO_CONSTTIME 0x0000 +# endif +# if OPENSSL_API_COMPAT < 0x00908000L +/* deprecated name for the flag*/ +/* + * new with 0.9.7h; the built-in RSA + * implementation now uses constant time + * modular exponentiation for secret exponents + * by default. This flag causes the + * faster variable sliding window method to + * be used for all exponents. + */ +# define RSA_FLAG_NO_EXP_CONSTTIME RSA_FLAG_NO_CONSTTIME +# endif + +# define EVP_PKEY_CTX_set_rsa_padding(ctx, pad) \ + RSA_pkey_ctx_ctrl(ctx, -1, EVP_PKEY_CTRL_RSA_PADDING, pad, NULL) + +# define EVP_PKEY_CTX_get_rsa_padding(ctx, ppad) \ + RSA_pkey_ctx_ctrl(ctx, -1, EVP_PKEY_CTRL_GET_RSA_PADDING, 0, ppad) + +# define EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, len) \ + RSA_pkey_ctx_ctrl(ctx, (EVP_PKEY_OP_SIGN|EVP_PKEY_OP_VERIFY), \ + EVP_PKEY_CTRL_RSA_PSS_SALTLEN, len, NULL) +/* Salt length matches digest */ +# define RSA_PSS_SALTLEN_DIGEST -1 +/* Verify only: auto detect salt length */ +# define RSA_PSS_SALTLEN_AUTO -2 +/* Set salt length to maximum possible */ +# define RSA_PSS_SALTLEN_MAX -3 +/* Old compatible max salt length for sign only */ +# define RSA_PSS_SALTLEN_MAX_SIGN -2 + +# define EVP_PKEY_CTX_set_rsa_pss_keygen_saltlen(ctx, len) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA_PSS, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_RSA_PSS_SALTLEN, len, NULL) + +# define EVP_PKEY_CTX_get_rsa_pss_saltlen(ctx, plen) \ + RSA_pkey_ctx_ctrl(ctx, (EVP_PKEY_OP_SIGN|EVP_PKEY_OP_VERIFY), \ + EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN, 0, plen) + +# define EVP_PKEY_CTX_set_rsa_keygen_bits(ctx, bits) \ + RSA_pkey_ctx_ctrl(ctx, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_RSA_KEYGEN_BITS, bits, NULL) + +# define EVP_PKEY_CTX_set_rsa_keygen_pubexp(ctx, pubexp) \ + RSA_pkey_ctx_ctrl(ctx, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP, 0, pubexp) + +# define EVP_PKEY_CTX_set_rsa_keygen_primes(ctx, primes) \ + RSA_pkey_ctx_ctrl(ctx, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_RSA_KEYGEN_PRIMES, primes, NULL) + +# define EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, md) \ + RSA_pkey_ctx_ctrl(ctx, EVP_PKEY_OP_TYPE_SIG | EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_RSA_MGF1_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_set_rsa_pss_keygen_mgf1_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA_PSS, EVP_PKEY_OP_KEYGEN, \ + EVP_PKEY_CTRL_RSA_MGF1_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_set_rsa_oaep_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_RSA_OAEP_MD, 0, (void *)(md)) + +# define EVP_PKEY_CTX_get_rsa_mgf1_md(ctx, pmd) \ + RSA_pkey_ctx_ctrl(ctx, EVP_PKEY_OP_TYPE_SIG | EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_GET_RSA_MGF1_MD, 0, (void *)(pmd)) + +# define EVP_PKEY_CTX_get_rsa_oaep_md(ctx, pmd) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_GET_RSA_OAEP_MD, 0, (void *)(pmd)) + +# define EVP_PKEY_CTX_set0_rsa_oaep_label(ctx, l, llen) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_RSA_OAEP_LABEL, llen, (void *)(l)) + +# define EVP_PKEY_CTX_get0_rsa_oaep_label(ctx, l) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, \ + EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL, 0, (void *)(l)) + +# define EVP_PKEY_CTX_set_rsa_pss_keygen_md(ctx, md) \ + EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA_PSS, \ + EVP_PKEY_OP_KEYGEN, EVP_PKEY_CTRL_MD, \ + 0, (void *)(md)) + +# define EVP_PKEY_CTRL_RSA_PADDING (EVP_PKEY_ALG_CTRL + 1) +# define EVP_PKEY_CTRL_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 2) + +# define EVP_PKEY_CTRL_RSA_KEYGEN_BITS (EVP_PKEY_ALG_CTRL + 3) +# define EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP (EVP_PKEY_ALG_CTRL + 4) +# define EVP_PKEY_CTRL_RSA_MGF1_MD (EVP_PKEY_ALG_CTRL + 5) + +# define EVP_PKEY_CTRL_GET_RSA_PADDING (EVP_PKEY_ALG_CTRL + 6) +# define EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 7) +# define EVP_PKEY_CTRL_GET_RSA_MGF1_MD (EVP_PKEY_ALG_CTRL + 8) + +# define EVP_PKEY_CTRL_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 9) +# define EVP_PKEY_CTRL_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 10) + +# define EVP_PKEY_CTRL_GET_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 11) +# define EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 12) + +# define EVP_PKEY_CTRL_RSA_KEYGEN_PRIMES (EVP_PKEY_ALG_CTRL + 13) + +# define RSA_PKCS1_PADDING 1 +# define RSA_SSLV23_PADDING 2 +# define RSA_NO_PADDING 3 +# define RSA_PKCS1_OAEP_PADDING 4 +# define RSA_X931_PADDING 5 +/* EVP_PKEY_ only */ +# define RSA_PKCS1_PSS_PADDING 6 + +# define RSA_PKCS1_PADDING_SIZE 11 + +# define RSA_set_app_data(s,arg) RSA_set_ex_data(s,0,arg) +# define RSA_get_app_data(s) RSA_get_ex_data(s,0) + +RSA *RSA_new(void); +RSA *RSA_new_method(ENGINE *engine); +int RSA_bits(const RSA *rsa); +int RSA_size(const RSA *rsa); +int RSA_security_bits(const RSA *rsa); + +int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d); +int RSA_set0_factors(RSA *r, BIGNUM *p, BIGNUM *q); +int RSA_set0_crt_params(RSA *r,BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp); +int RSA_set0_multi_prime_params(RSA *r, BIGNUM *primes[], BIGNUM *exps[], + BIGNUM *coeffs[], int pnum); +void RSA_get0_key(const RSA *r, + const BIGNUM **n, const BIGNUM **e, const BIGNUM **d); +void RSA_get0_factors(const RSA *r, const BIGNUM **p, const BIGNUM **q); +int RSA_get_multi_prime_extra_count(const RSA *r); +int RSA_get0_multi_prime_factors(const RSA *r, const BIGNUM *primes[]); +void RSA_get0_crt_params(const RSA *r, + const BIGNUM **dmp1, const BIGNUM **dmq1, + const BIGNUM **iqmp); +int RSA_get0_multi_prime_crt_params(const RSA *r, const BIGNUM *exps[], + const BIGNUM *coeffs[]); +const BIGNUM *RSA_get0_n(const RSA *d); +const BIGNUM *RSA_get0_e(const RSA *d); +const BIGNUM *RSA_get0_d(const RSA *d); +const BIGNUM *RSA_get0_p(const RSA *d); +const BIGNUM *RSA_get0_q(const RSA *d); +const BIGNUM *RSA_get0_dmp1(const RSA *r); +const BIGNUM *RSA_get0_dmq1(const RSA *r); +const BIGNUM *RSA_get0_iqmp(const RSA *r); +const RSA_PSS_PARAMS *RSA_get0_pss_params(const RSA *r); +void RSA_clear_flags(RSA *r, int flags); +int RSA_test_flags(const RSA *r, int flags); +void RSA_set_flags(RSA *r, int flags); +int RSA_get_version(RSA *r); +ENGINE *RSA_get0_engine(const RSA *r); + +/* Deprecated version */ +DEPRECATEDIN_0_9_8(RSA *RSA_generate_key(int bits, unsigned long e, void + (*callback) (int, int, void *), + void *cb_arg)) + +/* New version */ +int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); +/* Multi-prime version */ +int RSA_generate_multi_prime_key(RSA *rsa, int bits, int primes, + BIGNUM *e, BN_GENCB *cb); + +int RSA_X931_derive_ex(RSA *rsa, BIGNUM *p1, BIGNUM *p2, BIGNUM *q1, + BIGNUM *q2, const BIGNUM *Xp1, const BIGNUM *Xp2, + const BIGNUM *Xp, const BIGNUM *Xq1, const BIGNUM *Xq2, + const BIGNUM *Xq, const BIGNUM *e, BN_GENCB *cb); +int RSA_X931_generate_key_ex(RSA *rsa, int bits, const BIGNUM *e, + BN_GENCB *cb); + +int RSA_check_key(const RSA *); +int RSA_check_key_ex(const RSA *, BN_GENCB *cb); + /* next 4 return -1 on error */ +int RSA_public_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_private_encrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_public_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_private_decrypt(int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +void RSA_free(RSA *r); +/* "up" the RSA object's reference count */ +int RSA_up_ref(RSA *r); + +int RSA_flags(const RSA *r); + +void RSA_set_default_method(const RSA_METHOD *meth); +const RSA_METHOD *RSA_get_default_method(void); +const RSA_METHOD *RSA_null_method(void); +const RSA_METHOD *RSA_get_method(const RSA *rsa); +int RSA_set_method(RSA *rsa, const RSA_METHOD *meth); + +/* these are the actual RSA functions */ +const RSA_METHOD *RSA_PKCS1_OpenSSL(void); + +int RSA_pkey_ctx_ctrl(EVP_PKEY_CTX *ctx, int optype, int cmd, int p1, void *p2); + +DECLARE_ASN1_ENCODE_FUNCTIONS_const(RSA, RSAPublicKey) +DECLARE_ASN1_ENCODE_FUNCTIONS_const(RSA, RSAPrivateKey) + +struct rsa_pss_params_st { + X509_ALGOR *hashAlgorithm; + X509_ALGOR *maskGenAlgorithm; + ASN1_INTEGER *saltLength; + ASN1_INTEGER *trailerField; + /* Decoded hash algorithm from maskGenAlgorithm */ + X509_ALGOR *maskHash; +}; + +DECLARE_ASN1_FUNCTIONS(RSA_PSS_PARAMS) + +typedef struct rsa_oaep_params_st { + X509_ALGOR *hashFunc; + X509_ALGOR *maskGenFunc; + X509_ALGOR *pSourceFunc; + /* Decoded hash algorithm from maskGenFunc */ + X509_ALGOR *maskHash; +} RSA_OAEP_PARAMS; + +DECLARE_ASN1_FUNCTIONS(RSA_OAEP_PARAMS) + +# ifndef OPENSSL_NO_STDIO +int RSA_print_fp(FILE *fp, const RSA *r, int offset); +# endif + +int RSA_print(BIO *bp, const RSA *r, int offset); + +/* + * The following 2 functions sign and verify a X509_SIG ASN1 object inside + * PKCS#1 padded RSA encryption + */ +int RSA_sign(int type, const unsigned char *m, unsigned int m_length, + unsigned char *sigret, unsigned int *siglen, RSA *rsa); +int RSA_verify(int type, const unsigned char *m, unsigned int m_length, + const unsigned char *sigbuf, unsigned int siglen, RSA *rsa); + +/* + * The following 2 function sign and verify a ASN1_OCTET_STRING object inside + * PKCS#1 padded RSA encryption + */ +int RSA_sign_ASN1_OCTET_STRING(int type, + const unsigned char *m, unsigned int m_length, + unsigned char *sigret, unsigned int *siglen, + RSA *rsa); +int RSA_verify_ASN1_OCTET_STRING(int type, const unsigned char *m, + unsigned int m_length, unsigned char *sigbuf, + unsigned int siglen, RSA *rsa); + +int RSA_blinding_on(RSA *rsa, BN_CTX *ctx); +void RSA_blinding_off(RSA *rsa); +BN_BLINDING *RSA_setup_blinding(RSA *rsa, BN_CTX *ctx); + +int RSA_padding_add_PKCS1_type_1(unsigned char *to, int tlen, + const unsigned char *f, int fl); +int RSA_padding_check_PKCS1_type_1(unsigned char *to, int tlen, + const unsigned char *f, int fl, + int rsa_len); +int RSA_padding_add_PKCS1_type_2(unsigned char *to, int tlen, + const unsigned char *f, int fl); +int RSA_padding_check_PKCS1_type_2(unsigned char *to, int tlen, + const unsigned char *f, int fl, + int rsa_len); +int PKCS1_MGF1(unsigned char *mask, long len, const unsigned char *seed, + long seedlen, const EVP_MD *dgst); +int RSA_padding_add_PKCS1_OAEP(unsigned char *to, int tlen, + const unsigned char *f, int fl, + const unsigned char *p, int pl); +int RSA_padding_check_PKCS1_OAEP(unsigned char *to, int tlen, + const unsigned char *f, int fl, int rsa_len, + const unsigned char *p, int pl); +int RSA_padding_add_PKCS1_OAEP_mgf1(unsigned char *to, int tlen, + const unsigned char *from, int flen, + const unsigned char *param, int plen, + const EVP_MD *md, const EVP_MD *mgf1md); +int RSA_padding_check_PKCS1_OAEP_mgf1(unsigned char *to, int tlen, + const unsigned char *from, int flen, + int num, const unsigned char *param, + int plen, const EVP_MD *md, + const EVP_MD *mgf1md); +int RSA_padding_add_SSLv23(unsigned char *to, int tlen, + const unsigned char *f, int fl); +int RSA_padding_check_SSLv23(unsigned char *to, int tlen, + const unsigned char *f, int fl, int rsa_len); +int RSA_padding_add_none(unsigned char *to, int tlen, const unsigned char *f, + int fl); +int RSA_padding_check_none(unsigned char *to, int tlen, + const unsigned char *f, int fl, int rsa_len); +int RSA_padding_add_X931(unsigned char *to, int tlen, const unsigned char *f, + int fl); +int RSA_padding_check_X931(unsigned char *to, int tlen, + const unsigned char *f, int fl, int rsa_len); +int RSA_X931_hash_id(int nid); + +int RSA_verify_PKCS1_PSS(RSA *rsa, const unsigned char *mHash, + const EVP_MD *Hash, const unsigned char *EM, + int sLen); +int RSA_padding_add_PKCS1_PSS(RSA *rsa, unsigned char *EM, + const unsigned char *mHash, const EVP_MD *Hash, + int sLen); + +int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const unsigned char *mHash, + const EVP_MD *Hash, const EVP_MD *mgf1Hash, + const unsigned char *EM, int sLen); + +int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, + const unsigned char *mHash, + const EVP_MD *Hash, const EVP_MD *mgf1Hash, + int sLen); + +#define RSA_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_RSA, l, p, newf, dupf, freef) +int RSA_set_ex_data(RSA *r, int idx, void *arg); +void *RSA_get_ex_data(const RSA *r, int idx); + +RSA *RSAPublicKey_dup(RSA *rsa); +RSA *RSAPrivateKey_dup(RSA *rsa); + +/* + * If this flag is set the RSA method is FIPS compliant and can be used in + * FIPS mode. This is set in the validated module method. If an application + * sets this flag in its own methods it is its responsibility to ensure the + * result is compliant. + */ + +# define RSA_FLAG_FIPS_METHOD 0x0400 + +/* + * If this flag is set the operations normally disabled in FIPS mode are + * permitted it is then the applications responsibility to ensure that the + * usage is compliant. + */ + +# define RSA_FLAG_NON_FIPS_ALLOW 0x0400 +/* + * Application has decided PRNG is good enough to generate a key: don't + * check. + */ +# define RSA_FLAG_CHECKED 0x0800 + +RSA_METHOD *RSA_meth_new(const char *name, int flags); +void RSA_meth_free(RSA_METHOD *meth); +RSA_METHOD *RSA_meth_dup(const RSA_METHOD *meth); +const char *RSA_meth_get0_name(const RSA_METHOD *meth); +int RSA_meth_set1_name(RSA_METHOD *meth, const char *name); +int RSA_meth_get_flags(const RSA_METHOD *meth); +int RSA_meth_set_flags(RSA_METHOD *meth, int flags); +void *RSA_meth_get0_app_data(const RSA_METHOD *meth); +int RSA_meth_set0_app_data(RSA_METHOD *meth, void *app_data); +int (*RSA_meth_get_pub_enc(const RSA_METHOD *meth)) + (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_meth_set_pub_enc(RSA_METHOD *rsa, + int (*pub_enc) (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, + int padding)); +int (*RSA_meth_get_pub_dec(const RSA_METHOD *meth)) + (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_meth_set_pub_dec(RSA_METHOD *rsa, + int (*pub_dec) (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, + int padding)); +int (*RSA_meth_get_priv_enc(const RSA_METHOD *meth)) + (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_meth_set_priv_enc(RSA_METHOD *rsa, + int (*priv_enc) (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, + int padding)); +int (*RSA_meth_get_priv_dec(const RSA_METHOD *meth)) + (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, int padding); +int RSA_meth_set_priv_dec(RSA_METHOD *rsa, + int (*priv_dec) (int flen, const unsigned char *from, + unsigned char *to, RSA *rsa, + int padding)); +int (*RSA_meth_get_mod_exp(const RSA_METHOD *meth)) + (BIGNUM *r0, const BIGNUM *i, RSA *rsa, BN_CTX *ctx); +int RSA_meth_set_mod_exp(RSA_METHOD *rsa, + int (*mod_exp) (BIGNUM *r0, const BIGNUM *i, RSA *rsa, + BN_CTX *ctx)); +int (*RSA_meth_get_bn_mod_exp(const RSA_METHOD *meth)) + (BIGNUM *r, const BIGNUM *a, const BIGNUM *p, + const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); +int RSA_meth_set_bn_mod_exp(RSA_METHOD *rsa, + int (*bn_mod_exp) (BIGNUM *r, + const BIGNUM *a, + const BIGNUM *p, + const BIGNUM *m, + BN_CTX *ctx, + BN_MONT_CTX *m_ctx)); +int (*RSA_meth_get_init(const RSA_METHOD *meth)) (RSA *rsa); +int RSA_meth_set_init(RSA_METHOD *rsa, int (*init) (RSA *rsa)); +int (*RSA_meth_get_finish(const RSA_METHOD *meth)) (RSA *rsa); +int RSA_meth_set_finish(RSA_METHOD *rsa, int (*finish) (RSA *rsa)); +int (*RSA_meth_get_sign(const RSA_METHOD *meth)) + (int type, + const unsigned char *m, unsigned int m_length, + unsigned char *sigret, unsigned int *siglen, + const RSA *rsa); +int RSA_meth_set_sign(RSA_METHOD *rsa, + int (*sign) (int type, const unsigned char *m, + unsigned int m_length, + unsigned char *sigret, unsigned int *siglen, + const RSA *rsa)); +int (*RSA_meth_get_verify(const RSA_METHOD *meth)) + (int dtype, const unsigned char *m, + unsigned int m_length, const unsigned char *sigbuf, + unsigned int siglen, const RSA *rsa); +int RSA_meth_set_verify(RSA_METHOD *rsa, + int (*verify) (int dtype, const unsigned char *m, + unsigned int m_length, + const unsigned char *sigbuf, + unsigned int siglen, const RSA *rsa)); +int (*RSA_meth_get_keygen(const RSA_METHOD *meth)) + (RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); +int RSA_meth_set_keygen(RSA_METHOD *rsa, + int (*keygen) (RSA *rsa, int bits, BIGNUM *e, + BN_GENCB *cb)); +int (*RSA_meth_get_multi_prime_keygen(const RSA_METHOD *meth)) + (RSA *rsa, int bits, int primes, BIGNUM *e, BN_GENCB *cb); +int RSA_meth_set_multi_prime_keygen(RSA_METHOD *meth, + int (*keygen) (RSA *rsa, int bits, + int primes, BIGNUM *e, + BN_GENCB *cb)); + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/rsaerr.h b/third_party/openssl/uos/sw_64/include/openssl/rsaerr.h new file mode 100644 index 00000000..59b15e13 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/rsaerr.h @@ -0,0 +1,167 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_RSAERR_H +# define HEADER_RSAERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_RSA_strings(void); + +/* + * RSA function codes. + */ +# define RSA_F_CHECK_PADDING_MD 140 +# define RSA_F_ENCODE_PKCS1 146 +# define RSA_F_INT_RSA_VERIFY 145 +# define RSA_F_OLD_RSA_PRIV_DECODE 147 +# define RSA_F_PKEY_PSS_INIT 165 +# define RSA_F_PKEY_RSA_CTRL 143 +# define RSA_F_PKEY_RSA_CTRL_STR 144 +# define RSA_F_PKEY_RSA_SIGN 142 +# define RSA_F_PKEY_RSA_VERIFY 149 +# define RSA_F_PKEY_RSA_VERIFYRECOVER 141 +# define RSA_F_RSA_ALGOR_TO_MD 156 +# define RSA_F_RSA_BUILTIN_KEYGEN 129 +# define RSA_F_RSA_CHECK_KEY 123 +# define RSA_F_RSA_CHECK_KEY_EX 160 +# define RSA_F_RSA_CMS_DECRYPT 159 +# define RSA_F_RSA_CMS_VERIFY 158 +# define RSA_F_RSA_ITEM_VERIFY 148 +# define RSA_F_RSA_METH_DUP 161 +# define RSA_F_RSA_METH_NEW 162 +# define RSA_F_RSA_METH_SET1_NAME 163 +# define RSA_F_RSA_MGF1_TO_MD 157 +# define RSA_F_RSA_MULTIP_INFO_NEW 166 +# define RSA_F_RSA_NEW_METHOD 106 +# define RSA_F_RSA_NULL 124 +# define RSA_F_RSA_NULL_PRIVATE_DECRYPT 132 +# define RSA_F_RSA_NULL_PRIVATE_ENCRYPT 133 +# define RSA_F_RSA_NULL_PUBLIC_DECRYPT 134 +# define RSA_F_RSA_NULL_PUBLIC_ENCRYPT 135 +# define RSA_F_RSA_OSSL_PRIVATE_DECRYPT 101 +# define RSA_F_RSA_OSSL_PRIVATE_ENCRYPT 102 +# define RSA_F_RSA_OSSL_PUBLIC_DECRYPT 103 +# define RSA_F_RSA_OSSL_PUBLIC_ENCRYPT 104 +# define RSA_F_RSA_PADDING_ADD_NONE 107 +# define RSA_F_RSA_PADDING_ADD_PKCS1_OAEP 121 +# define RSA_F_RSA_PADDING_ADD_PKCS1_OAEP_MGF1 154 +# define RSA_F_RSA_PADDING_ADD_PKCS1_PSS 125 +# define RSA_F_RSA_PADDING_ADD_PKCS1_PSS_MGF1 152 +# define RSA_F_RSA_PADDING_ADD_PKCS1_TYPE_1 108 +# define RSA_F_RSA_PADDING_ADD_PKCS1_TYPE_2 109 +# define RSA_F_RSA_PADDING_ADD_SSLV23 110 +# define RSA_F_RSA_PADDING_ADD_X931 127 +# define RSA_F_RSA_PADDING_CHECK_NONE 111 +# define RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP 122 +# define RSA_F_RSA_PADDING_CHECK_PKCS1_OAEP_MGF1 153 +# define RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_1 112 +# define RSA_F_RSA_PADDING_CHECK_PKCS1_TYPE_2 113 +# define RSA_F_RSA_PADDING_CHECK_SSLV23 114 +# define RSA_F_RSA_PADDING_CHECK_X931 128 +# define RSA_F_RSA_PARAM_DECODE 164 +# define RSA_F_RSA_PRINT 115 +# define RSA_F_RSA_PRINT_FP 116 +# define RSA_F_RSA_PRIV_DECODE 150 +# define RSA_F_RSA_PRIV_ENCODE 138 +# define RSA_F_RSA_PSS_GET_PARAM 151 +# define RSA_F_RSA_PSS_TO_CTX 155 +# define RSA_F_RSA_PUB_DECODE 139 +# define RSA_F_RSA_SETUP_BLINDING 136 +# define RSA_F_RSA_SIGN 117 +# define RSA_F_RSA_SIGN_ASN1_OCTET_STRING 118 +# define RSA_F_RSA_VERIFY 119 +# define RSA_F_RSA_VERIFY_ASN1_OCTET_STRING 120 +# define RSA_F_RSA_VERIFY_PKCS1_PSS_MGF1 126 +# define RSA_F_SETUP_TBUF 167 + +/* + * RSA reason codes. + */ +# define RSA_R_ALGORITHM_MISMATCH 100 +# define RSA_R_BAD_E_VALUE 101 +# define RSA_R_BAD_FIXED_HEADER_DECRYPT 102 +# define RSA_R_BAD_PAD_BYTE_COUNT 103 +# define RSA_R_BAD_SIGNATURE 104 +# define RSA_R_BLOCK_TYPE_IS_NOT_01 106 +# define RSA_R_BLOCK_TYPE_IS_NOT_02 107 +# define RSA_R_DATA_GREATER_THAN_MOD_LEN 108 +# define RSA_R_DATA_TOO_LARGE 109 +# define RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE 110 +# define RSA_R_DATA_TOO_LARGE_FOR_MODULUS 132 +# define RSA_R_DATA_TOO_SMALL 111 +# define RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE 122 +# define RSA_R_DIGEST_DOES_NOT_MATCH 158 +# define RSA_R_DIGEST_NOT_ALLOWED 145 +# define RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY 112 +# define RSA_R_DMP1_NOT_CONGRUENT_TO_D 124 +# define RSA_R_DMQ1_NOT_CONGRUENT_TO_D 125 +# define RSA_R_D_E_NOT_CONGRUENT_TO_1 123 +# define RSA_R_FIRST_OCTET_INVALID 133 +# define RSA_R_ILLEGAL_OR_UNSUPPORTED_PADDING_MODE 144 +# define RSA_R_INVALID_DIGEST 157 +# define RSA_R_INVALID_DIGEST_LENGTH 143 +# define RSA_R_INVALID_HEADER 137 +# define RSA_R_INVALID_LABEL 160 +# define RSA_R_INVALID_MESSAGE_LENGTH 131 +# define RSA_R_INVALID_MGF1_MD 156 +# define RSA_R_INVALID_MULTI_PRIME_KEY 167 +# define RSA_R_INVALID_OAEP_PARAMETERS 161 +# define RSA_R_INVALID_PADDING 138 +# define RSA_R_INVALID_PADDING_MODE 141 +# define RSA_R_INVALID_PSS_PARAMETERS 149 +# define RSA_R_INVALID_PSS_SALTLEN 146 +# define RSA_R_INVALID_SALT_LENGTH 150 +# define RSA_R_INVALID_TRAILER 139 +# define RSA_R_INVALID_X931_DIGEST 142 +# define RSA_R_IQMP_NOT_INVERSE_OF_Q 126 +# define RSA_R_KEY_PRIME_NUM_INVALID 165 +# define RSA_R_KEY_SIZE_TOO_SMALL 120 +# define RSA_R_LAST_OCTET_INVALID 134 +# define RSA_R_MISSING_PRIVATE_KEY 179 +# define RSA_R_MGF1_DIGEST_NOT_ALLOWED 152 +# define RSA_R_MODULUS_TOO_LARGE 105 +# define RSA_R_MP_COEFFICIENT_NOT_INVERSE_OF_R 168 +# define RSA_R_MP_EXPONENT_NOT_CONGRUENT_TO_D 169 +# define RSA_R_MP_R_NOT_PRIME 170 +# define RSA_R_NO_PUBLIC_EXPONENT 140 +# define RSA_R_NULL_BEFORE_BLOCK_MISSING 113 +# define RSA_R_N_DOES_NOT_EQUAL_PRODUCT_OF_PRIMES 172 +# define RSA_R_N_DOES_NOT_EQUAL_P_Q 127 +# define RSA_R_OAEP_DECODING_ERROR 121 +# define RSA_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE 148 +# define RSA_R_PADDING_CHECK_FAILED 114 +# define RSA_R_PKCS_DECODING_ERROR 159 +# define RSA_R_PSS_SALTLEN_TOO_SMALL 164 +# define RSA_R_P_NOT_PRIME 128 +# define RSA_R_Q_NOT_PRIME 129 +# define RSA_R_RSA_OPERATIONS_NOT_SUPPORTED 130 +# define RSA_R_SLEN_CHECK_FAILED 136 +# define RSA_R_SLEN_RECOVERY_FAILED 135 +# define RSA_R_SSLV3_ROLLBACK_ATTACK 115 +# define RSA_R_THE_ASN1_OBJECT_IDENTIFIER_IS_NOT_KNOWN_FOR_THIS_MD 116 +# define RSA_R_UNKNOWN_ALGORITHM_TYPE 117 +# define RSA_R_UNKNOWN_DIGEST 166 +# define RSA_R_UNKNOWN_MASK_DIGEST 151 +# define RSA_R_UNKNOWN_PADDING_TYPE 118 +# define RSA_R_UNSUPPORTED_ENCRYPTION_TYPE 162 +# define RSA_R_UNSUPPORTED_LABEL_SOURCE 163 +# define RSA_R_UNSUPPORTED_MASK_ALGORITHM 153 +# define RSA_R_UNSUPPORTED_MASK_PARAMETER 154 +# define RSA_R_UNSUPPORTED_SIGNATURE_TYPE 155 +# define RSA_R_VALUE_MISSING 147 +# define RSA_R_WRONG_SIGNATURE_LENGTH 119 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/safestack.h b/third_party/openssl/uos/sw_64/include/openssl/safestack.h new file mode 100644 index 00000000..38b55789 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/safestack.h @@ -0,0 +1,207 @@ +/* + * Copyright 1999-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SAFESTACK_H +# define HEADER_SAFESTACK_H + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# define STACK_OF(type) struct stack_st_##type + +# define SKM_DEFINE_STACK_OF(t1, t2, t3) \ + STACK_OF(t1); \ + typedef int (*sk_##t1##_compfunc)(const t3 * const *a, const t3 *const *b); \ + typedef void (*sk_##t1##_freefunc)(t3 *a); \ + typedef t3 * (*sk_##t1##_copyfunc)(const t3 *a); \ + static ossl_unused ossl_inline int sk_##t1##_num(const STACK_OF(t1) *sk) \ + { \ + return OPENSSL_sk_num((const OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_value(const STACK_OF(t1) *sk, int idx) \ + { \ + return (t2 *)OPENSSL_sk_value((const OPENSSL_STACK *)sk, idx); \ + } \ + static ossl_unused ossl_inline STACK_OF(t1) *sk_##t1##_new(sk_##t1##_compfunc compare) \ + { \ + return (STACK_OF(t1) *)OPENSSL_sk_new((OPENSSL_sk_compfunc)compare); \ + } \ + static ossl_unused ossl_inline STACK_OF(t1) *sk_##t1##_new_null(void) \ + { \ + return (STACK_OF(t1) *)OPENSSL_sk_new_null(); \ + } \ + static ossl_unused ossl_inline STACK_OF(t1) *sk_##t1##_new_reserve(sk_##t1##_compfunc compare, int n) \ + { \ + return (STACK_OF(t1) *)OPENSSL_sk_new_reserve((OPENSSL_sk_compfunc)compare, n); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_reserve(STACK_OF(t1) *sk, int n) \ + { \ + return OPENSSL_sk_reserve((OPENSSL_STACK *)sk, n); \ + } \ + static ossl_unused ossl_inline void sk_##t1##_free(STACK_OF(t1) *sk) \ + { \ + OPENSSL_sk_free((OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline void sk_##t1##_zero(STACK_OF(t1) *sk) \ + { \ + OPENSSL_sk_zero((OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_delete(STACK_OF(t1) *sk, int i) \ + { \ + return (t2 *)OPENSSL_sk_delete((OPENSSL_STACK *)sk, i); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_delete_ptr(STACK_OF(t1) *sk, t2 *ptr) \ + { \ + return (t2 *)OPENSSL_sk_delete_ptr((OPENSSL_STACK *)sk, \ + (const void *)ptr); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_push(STACK_OF(t1) *sk, t2 *ptr) \ + { \ + return OPENSSL_sk_push((OPENSSL_STACK *)sk, (const void *)ptr); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_unshift(STACK_OF(t1) *sk, t2 *ptr) \ + { \ + return OPENSSL_sk_unshift((OPENSSL_STACK *)sk, (const void *)ptr); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_pop(STACK_OF(t1) *sk) \ + { \ + return (t2 *)OPENSSL_sk_pop((OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_shift(STACK_OF(t1) *sk) \ + { \ + return (t2 *)OPENSSL_sk_shift((OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline void sk_##t1##_pop_free(STACK_OF(t1) *sk, sk_##t1##_freefunc freefunc) \ + { \ + OPENSSL_sk_pop_free((OPENSSL_STACK *)sk, (OPENSSL_sk_freefunc)freefunc); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_insert(STACK_OF(t1) *sk, t2 *ptr, int idx) \ + { \ + return OPENSSL_sk_insert((OPENSSL_STACK *)sk, (const void *)ptr, idx); \ + } \ + static ossl_unused ossl_inline t2 *sk_##t1##_set(STACK_OF(t1) *sk, int idx, t2 *ptr) \ + { \ + return (t2 *)OPENSSL_sk_set((OPENSSL_STACK *)sk, idx, (const void *)ptr); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_find(STACK_OF(t1) *sk, t2 *ptr) \ + { \ + return OPENSSL_sk_find((OPENSSL_STACK *)sk, (const void *)ptr); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_find_ex(STACK_OF(t1) *sk, t2 *ptr) \ + { \ + return OPENSSL_sk_find_ex((OPENSSL_STACK *)sk, (const void *)ptr); \ + } \ + static ossl_unused ossl_inline void sk_##t1##_sort(STACK_OF(t1) *sk) \ + { \ + OPENSSL_sk_sort((OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline int sk_##t1##_is_sorted(const STACK_OF(t1) *sk) \ + { \ + return OPENSSL_sk_is_sorted((const OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline STACK_OF(t1) * sk_##t1##_dup(const STACK_OF(t1) *sk) \ + { \ + return (STACK_OF(t1) *)OPENSSL_sk_dup((const OPENSSL_STACK *)sk); \ + } \ + static ossl_unused ossl_inline STACK_OF(t1) *sk_##t1##_deep_copy(const STACK_OF(t1) *sk, \ + sk_##t1##_copyfunc copyfunc, \ + sk_##t1##_freefunc freefunc) \ + { \ + return (STACK_OF(t1) *)OPENSSL_sk_deep_copy((const OPENSSL_STACK *)sk, \ + (OPENSSL_sk_copyfunc)copyfunc, \ + (OPENSSL_sk_freefunc)freefunc); \ + } \ + static ossl_unused ossl_inline sk_##t1##_compfunc sk_##t1##_set_cmp_func(STACK_OF(t1) *sk, sk_##t1##_compfunc compare) \ + { \ + return (sk_##t1##_compfunc)OPENSSL_sk_set_cmp_func((OPENSSL_STACK *)sk, (OPENSSL_sk_compfunc)compare); \ + } + +# define DEFINE_SPECIAL_STACK_OF(t1, t2) SKM_DEFINE_STACK_OF(t1, t2, t2) +# define DEFINE_STACK_OF(t) SKM_DEFINE_STACK_OF(t, t, t) +# define DEFINE_SPECIAL_STACK_OF_CONST(t1, t2) \ + SKM_DEFINE_STACK_OF(t1, const t2, t2) +# define DEFINE_STACK_OF_CONST(t) SKM_DEFINE_STACK_OF(t, const t, t) + +/*- + * Strings are special: normally an lhash entry will point to a single + * (somewhat) mutable object. In the case of strings: + * + * a) Instead of a single char, there is an array of chars, NUL-terminated. + * b) The string may have be immutable. + * + * So, they need their own declarations. Especially important for + * type-checking tools, such as Deputy. + * + * In practice, however, it appears to be hard to have a const + * string. For now, I'm settling for dealing with the fact it is a + * string at all. + */ +typedef char *OPENSSL_STRING; +typedef const char *OPENSSL_CSTRING; + +/*- + * Confusingly, LHASH_OF(STRING) deals with char ** throughout, but + * STACK_OF(STRING) is really more like STACK_OF(char), only, as mentioned + * above, instead of a single char each entry is a NUL-terminated array of + * chars. So, we have to implement STRING specially for STACK_OF. This is + * dealt with in the autogenerated macros below. + */ +DEFINE_SPECIAL_STACK_OF(OPENSSL_STRING, char) +DEFINE_SPECIAL_STACK_OF_CONST(OPENSSL_CSTRING, char) + +/* + * Similarly, we sometimes use a block of characters, NOT nul-terminated. + * These should also be distinguished from "normal" stacks. + */ +typedef void *OPENSSL_BLOCK; +DEFINE_SPECIAL_STACK_OF(OPENSSL_BLOCK, void) + +/* + * If called without higher optimization (min. -xO3) the Oracle Developer + * Studio compiler generates code for the defined (static inline) functions + * above. + * This would later lead to the linker complaining about missing symbols when + * this header file is included but the resulting object is not linked against + * the Crypto library (openssl#6912). + */ +# ifdef __SUNPRO_C +# pragma weak OPENSSL_sk_num +# pragma weak OPENSSL_sk_value +# pragma weak OPENSSL_sk_new +# pragma weak OPENSSL_sk_new_null +# pragma weak OPENSSL_sk_new_reserve +# pragma weak OPENSSL_sk_reserve +# pragma weak OPENSSL_sk_free +# pragma weak OPENSSL_sk_zero +# pragma weak OPENSSL_sk_delete +# pragma weak OPENSSL_sk_delete_ptr +# pragma weak OPENSSL_sk_push +# pragma weak OPENSSL_sk_unshift +# pragma weak OPENSSL_sk_pop +# pragma weak OPENSSL_sk_shift +# pragma weak OPENSSL_sk_pop_free +# pragma weak OPENSSL_sk_insert +# pragma weak OPENSSL_sk_set +# pragma weak OPENSSL_sk_find +# pragma weak OPENSSL_sk_find_ex +# pragma weak OPENSSL_sk_sort +# pragma weak OPENSSL_sk_is_sorted +# pragma weak OPENSSL_sk_dup +# pragma weak OPENSSL_sk_deep_copy +# pragma weak OPENSSL_sk_set_cmp_func +# endif /* __SUNPRO_C */ + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/seed.h b/third_party/openssl/uos/sw_64/include/openssl/seed.h new file mode 100644 index 00000000..de10b085 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/seed.h @@ -0,0 +1,96 @@ +/* + * Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +/* + * Copyright (c) 2007 KISA(Korea Information Security Agency). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of author nor the names of its contributors may + * be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef HEADER_SEED_H +# define HEADER_SEED_H + +# include + +# ifndef OPENSSL_NO_SEED +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* look whether we need 'long' to get 32 bits */ +# ifdef AES_LONG +# ifndef SEED_LONG +# define SEED_LONG 1 +# endif +# endif + +# include + +# define SEED_BLOCK_SIZE 16 +# define SEED_KEY_LENGTH 16 + +typedef struct seed_key_st { +# ifdef SEED_LONG + unsigned long data[32]; +# else + unsigned int data[32]; +# endif +} SEED_KEY_SCHEDULE; + +void SEED_set_key(const unsigned char rawkey[SEED_KEY_LENGTH], + SEED_KEY_SCHEDULE *ks); + +void SEED_encrypt(const unsigned char s[SEED_BLOCK_SIZE], + unsigned char d[SEED_BLOCK_SIZE], + const SEED_KEY_SCHEDULE *ks); +void SEED_decrypt(const unsigned char s[SEED_BLOCK_SIZE], + unsigned char d[SEED_BLOCK_SIZE], + const SEED_KEY_SCHEDULE *ks); + +void SEED_ecb_encrypt(const unsigned char *in, unsigned char *out, + const SEED_KEY_SCHEDULE *ks, int enc); +void SEED_cbc_encrypt(const unsigned char *in, unsigned char *out, size_t len, + const SEED_KEY_SCHEDULE *ks, + unsigned char ivec[SEED_BLOCK_SIZE], int enc); +void SEED_cfb128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const SEED_KEY_SCHEDULE *ks, + unsigned char ivec[SEED_BLOCK_SIZE], int *num, + int enc); +void SEED_ofb128_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const SEED_KEY_SCHEDULE *ks, + unsigned char ivec[SEED_BLOCK_SIZE], int *num); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/sha.h b/third_party/openssl/uos/sw_64/include/openssl/sha.h new file mode 100644 index 00000000..6a1eb0de --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/sha.h @@ -0,0 +1,119 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SHA_H +# define HEADER_SHA_H + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/*- + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * ! SHA_LONG has to be at least 32 bits wide. ! + * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + */ +# define SHA_LONG unsigned int + +# define SHA_LBLOCK 16 +# define SHA_CBLOCK (SHA_LBLOCK*4)/* SHA treats input data as a + * contiguous array of 32 bit wide + * big-endian values. */ +# define SHA_LAST_BLOCK (SHA_CBLOCK-8) +# define SHA_DIGEST_LENGTH 20 + +typedef struct SHAstate_st { + SHA_LONG h0, h1, h2, h3, h4; + SHA_LONG Nl, Nh; + SHA_LONG data[SHA_LBLOCK]; + unsigned int num; +} SHA_CTX; + +int SHA1_Init(SHA_CTX *c); +int SHA1_Update(SHA_CTX *c, const void *data, size_t len); +int SHA1_Final(unsigned char *md, SHA_CTX *c); +unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md); +void SHA1_Transform(SHA_CTX *c, const unsigned char *data); + +# define SHA256_CBLOCK (SHA_LBLOCK*4)/* SHA-256 treats input data as a + * contiguous array of 32 bit wide + * big-endian values. */ + +typedef struct SHA256state_st { + SHA_LONG h[8]; + SHA_LONG Nl, Nh; + SHA_LONG data[SHA_LBLOCK]; + unsigned int num, md_len; +} SHA256_CTX; + +int SHA224_Init(SHA256_CTX *c); +int SHA224_Update(SHA256_CTX *c, const void *data, size_t len); +int SHA224_Final(unsigned char *md, SHA256_CTX *c); +unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md); +int SHA256_Init(SHA256_CTX *c); +int SHA256_Update(SHA256_CTX *c, const void *data, size_t len); +int SHA256_Final(unsigned char *md, SHA256_CTX *c); +unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md); +void SHA256_Transform(SHA256_CTX *c, const unsigned char *data); + +# define SHA224_DIGEST_LENGTH 28 +# define SHA256_DIGEST_LENGTH 32 +# define SHA384_DIGEST_LENGTH 48 +# define SHA512_DIGEST_LENGTH 64 + +/* + * Unlike 32-bit digest algorithms, SHA-512 *relies* on SHA_LONG64 + * being exactly 64-bit wide. See Implementation Notes in sha512.c + * for further details. + */ +/* + * SHA-512 treats input data as a + * contiguous array of 64 bit + * wide big-endian values. + */ +# define SHA512_CBLOCK (SHA_LBLOCK*8) +# if (defined(_WIN32) || defined(_WIN64)) && !defined(__MINGW32__) +# define SHA_LONG64 unsigned __int64 +# define U64(C) C##UI64 +# elif defined(__arch64__) +# define SHA_LONG64 unsigned long +# define U64(C) C##UL +# else +# define SHA_LONG64 unsigned long long +# define U64(C) C##ULL +# endif + +typedef struct SHA512state_st { + SHA_LONG64 h[8]; + SHA_LONG64 Nl, Nh; + union { + SHA_LONG64 d[SHA_LBLOCK]; + unsigned char p[SHA512_CBLOCK]; + } u; + unsigned int num, md_len; +} SHA512_CTX; + +int SHA384_Init(SHA512_CTX *c); +int SHA384_Update(SHA512_CTX *c, const void *data, size_t len); +int SHA384_Final(unsigned char *md, SHA512_CTX *c); +unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md); +int SHA512_Init(SHA512_CTX *c); +int SHA512_Update(SHA512_CTX *c, const void *data, size_t len); +int SHA512_Final(unsigned char *md, SHA512_CTX *c); +unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md); +void SHA512_Transform(SHA512_CTX *c, const unsigned char *data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/srp.h b/third_party/openssl/uos/sw_64/include/openssl/srp.h new file mode 100644 index 00000000..aaf13558 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/srp.h @@ -0,0 +1,135 @@ +/* + * Copyright 2004-2018 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2004, EdelKey Project. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + * + * Originally written by Christophe Renou and Peter Sylvester, + * for the EdelKey project. + */ + +#ifndef HEADER_SRP_H +# define HEADER_SRP_H + +#include + +#ifndef OPENSSL_NO_SRP +# include +# include +# include +# include +# include + +# ifdef __cplusplus +extern "C" { +# endif + +typedef struct SRP_gN_cache_st { + char *b64_bn; + BIGNUM *bn; +} SRP_gN_cache; + + +DEFINE_STACK_OF(SRP_gN_cache) + +typedef struct SRP_user_pwd_st { + /* Owned by us. */ + char *id; + BIGNUM *s; + BIGNUM *v; + /* Not owned by us. */ + const BIGNUM *g; + const BIGNUM *N; + /* Owned by us. */ + char *info; +} SRP_user_pwd; + +void SRP_user_pwd_free(SRP_user_pwd *user_pwd); + +DEFINE_STACK_OF(SRP_user_pwd) + +typedef struct SRP_VBASE_st { + STACK_OF(SRP_user_pwd) *users_pwd; + STACK_OF(SRP_gN_cache) *gN_cache; +/* to simulate a user */ + char *seed_key; + const BIGNUM *default_g; + const BIGNUM *default_N; +} SRP_VBASE; + +/* + * Internal structure storing N and g pair + */ +typedef struct SRP_gN_st { + char *id; + const BIGNUM *g; + const BIGNUM *N; +} SRP_gN; + +DEFINE_STACK_OF(SRP_gN) + +SRP_VBASE *SRP_VBASE_new(char *seed_key); +void SRP_VBASE_free(SRP_VBASE *vb); +int SRP_VBASE_init(SRP_VBASE *vb, char *verifier_file); + +/* This method ignores the configured seed and fails for an unknown user. */ +DEPRECATEDIN_1_1_0(SRP_user_pwd *SRP_VBASE_get_by_user(SRP_VBASE *vb, char *username)) +/* NOTE: unlike in SRP_VBASE_get_by_user, caller owns the returned pointer.*/ +SRP_user_pwd *SRP_VBASE_get1_by_user(SRP_VBASE *vb, char *username); + +char *SRP_create_verifier(const char *user, const char *pass, char **salt, + char **verifier, const char *N, const char *g); +int SRP_create_verifier_BN(const char *user, const char *pass, BIGNUM **salt, + BIGNUM **verifier, const BIGNUM *N, + const BIGNUM *g); + +# define SRP_NO_ERROR 0 +# define SRP_ERR_VBASE_INCOMPLETE_FILE 1 +# define SRP_ERR_VBASE_BN_LIB 2 +# define SRP_ERR_OPEN_FILE 3 +# define SRP_ERR_MEMORY 4 + +# define DB_srptype 0 +# define DB_srpverifier 1 +# define DB_srpsalt 2 +# define DB_srpid 3 +# define DB_srpgN 4 +# define DB_srpinfo 5 +# undef DB_NUMBER +# define DB_NUMBER 6 + +# define DB_SRP_INDEX 'I' +# define DB_SRP_VALID 'V' +# define DB_SRP_REVOKED 'R' +# define DB_SRP_MODIF 'v' + +/* see srp.c */ +char *SRP_check_known_gN_param(const BIGNUM *g, const BIGNUM *N); +SRP_gN *SRP_get_default_gN(const char *id); + +/* server side .... */ +BIGNUM *SRP_Calc_server_key(const BIGNUM *A, const BIGNUM *v, const BIGNUM *u, + const BIGNUM *b, const BIGNUM *N); +BIGNUM *SRP_Calc_B(const BIGNUM *b, const BIGNUM *N, const BIGNUM *g, + const BIGNUM *v); +int SRP_Verify_A_mod_N(const BIGNUM *A, const BIGNUM *N); +BIGNUM *SRP_Calc_u(const BIGNUM *A, const BIGNUM *B, const BIGNUM *N); + +/* client side .... */ +BIGNUM *SRP_Calc_x(const BIGNUM *s, const char *user, const char *pass); +BIGNUM *SRP_Calc_A(const BIGNUM *a, const BIGNUM *N, const BIGNUM *g); +BIGNUM *SRP_Calc_client_key(const BIGNUM *N, const BIGNUM *B, const BIGNUM *g, + const BIGNUM *x, const BIGNUM *a, const BIGNUM *u); +int SRP_Verify_B_mod_N(const BIGNUM *B, const BIGNUM *N); + +# define SRP_MINIMAL_N 1024 + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/srtp.h b/third_party/openssl/uos/sw_64/include/openssl/srtp.h new file mode 100644 index 00000000..0b57c235 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/srtp.h @@ -0,0 +1,50 @@ +/* + * Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +/* + * DTLS code by Eric Rescorla + * + * Copyright (C) 2006, Network Resonance, Inc. Copyright (C) 2011, RTFM, Inc. + */ + +#ifndef HEADER_D1_SRTP_H +# define HEADER_D1_SRTP_H + +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# define SRTP_AES128_CM_SHA1_80 0x0001 +# define SRTP_AES128_CM_SHA1_32 0x0002 +# define SRTP_AES128_F8_SHA1_80 0x0003 +# define SRTP_AES128_F8_SHA1_32 0x0004 +# define SRTP_NULL_SHA1_80 0x0005 +# define SRTP_NULL_SHA1_32 0x0006 + +/* AEAD SRTP protection profiles from RFC 7714 */ +# define SRTP_AEAD_AES_128_GCM 0x0007 +# define SRTP_AEAD_AES_256_GCM 0x0008 + +# ifndef OPENSSL_NO_SRTP + +__owur int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles); +__owur int SSL_set_tlsext_use_srtp(SSL *ssl, const char *profiles); + +__owur STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles(SSL *ssl); +__owur SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *s); + +# endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ssl.h b/third_party/openssl/uos/sw_64/include/openssl/ssl.h new file mode 100644 index 00000000..9af0c899 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ssl.h @@ -0,0 +1,2448 @@ +/* + * Copyright 1995-2022 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * Copyright 2005 Nokia. All rights reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SSL_H +# define HEADER_SSL_H + +# include +# include +# include +# include +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# include +# include +# endif +# include +# include +# include +# include + +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* OpenSSL version number for ASN.1 encoding of the session information */ +/*- + * Version 0 - initial version + * Version 1 - added the optional peer certificate + */ +# define SSL_SESSION_ASN1_VERSION 0x0001 + +# define SSL_MAX_SSL_SESSION_ID_LENGTH 32 +# define SSL_MAX_SID_CTX_LENGTH 32 + +# define SSL_MIN_RSA_MODULUS_LENGTH_IN_BYTES (512/8) +# define SSL_MAX_KEY_ARG_LENGTH 8 +# define SSL_MAX_MASTER_KEY_LENGTH 48 + +/* The maximum number of encrypt/decrypt pipelines we can support */ +# define SSL_MAX_PIPELINES 32 + +/* text strings for the ciphers */ + +/* These are used to specify which ciphers to use and not to use */ + +# define SSL_TXT_LOW "LOW" +# define SSL_TXT_MEDIUM "MEDIUM" +# define SSL_TXT_HIGH "HIGH" +# define SSL_TXT_FIPS "FIPS" + +# define SSL_TXT_aNULL "aNULL" +# define SSL_TXT_eNULL "eNULL" +# define SSL_TXT_NULL "NULL" + +# define SSL_TXT_kRSA "kRSA" +# define SSL_TXT_kDHr "kDHr"/* this cipher class has been removed */ +# define SSL_TXT_kDHd "kDHd"/* this cipher class has been removed */ +# define SSL_TXT_kDH "kDH"/* this cipher class has been removed */ +# define SSL_TXT_kEDH "kEDH"/* alias for kDHE */ +# define SSL_TXT_kDHE "kDHE" +# define SSL_TXT_kECDHr "kECDHr"/* this cipher class has been removed */ +# define SSL_TXT_kECDHe "kECDHe"/* this cipher class has been removed */ +# define SSL_TXT_kECDH "kECDH"/* this cipher class has been removed */ +# define SSL_TXT_kEECDH "kEECDH"/* alias for kECDHE */ +# define SSL_TXT_kECDHE "kECDHE" +# define SSL_TXT_kPSK "kPSK" +# define SSL_TXT_kRSAPSK "kRSAPSK" +# define SSL_TXT_kECDHEPSK "kECDHEPSK" +# define SSL_TXT_kDHEPSK "kDHEPSK" +# define SSL_TXT_kGOST "kGOST" +# define SSL_TXT_kSRP "kSRP" + +# define SSL_TXT_aRSA "aRSA" +# define SSL_TXT_aDSS "aDSS" +# define SSL_TXT_aDH "aDH"/* this cipher class has been removed */ +# define SSL_TXT_aECDH "aECDH"/* this cipher class has been removed */ +# define SSL_TXT_aECDSA "aECDSA" +# define SSL_TXT_aPSK "aPSK" +# define SSL_TXT_aGOST94 "aGOST94" +# define SSL_TXT_aGOST01 "aGOST01" +# define SSL_TXT_aGOST12 "aGOST12" +# define SSL_TXT_aGOST "aGOST" +# define SSL_TXT_aSRP "aSRP" + +# define SSL_TXT_DSS "DSS" +# define SSL_TXT_DH "DH" +# define SSL_TXT_DHE "DHE"/* same as "kDHE:-ADH" */ +# define SSL_TXT_EDH "EDH"/* alias for DHE */ +# define SSL_TXT_ADH "ADH" +# define SSL_TXT_RSA "RSA" +# define SSL_TXT_ECDH "ECDH" +# define SSL_TXT_EECDH "EECDH"/* alias for ECDHE" */ +# define SSL_TXT_ECDHE "ECDHE"/* same as "kECDHE:-AECDH" */ +# define SSL_TXT_AECDH "AECDH" +# define SSL_TXT_ECDSA "ECDSA" +# define SSL_TXT_PSK "PSK" +# define SSL_TXT_SRP "SRP" + +# define SSL_TXT_DES "DES" +# define SSL_TXT_3DES "3DES" +# define SSL_TXT_RC4 "RC4" +# define SSL_TXT_RC2 "RC2" +# define SSL_TXT_IDEA "IDEA" +# define SSL_TXT_SEED "SEED" +# define SSL_TXT_AES128 "AES128" +# define SSL_TXT_AES256 "AES256" +# define SSL_TXT_AES "AES" +# define SSL_TXT_AES_GCM "AESGCM" +# define SSL_TXT_AES_CCM "AESCCM" +# define SSL_TXT_AES_CCM_8 "AESCCM8" +# define SSL_TXT_CAMELLIA128 "CAMELLIA128" +# define SSL_TXT_CAMELLIA256 "CAMELLIA256" +# define SSL_TXT_CAMELLIA "CAMELLIA" +# define SSL_TXT_CHACHA20 "CHACHA20" +# define SSL_TXT_GOST "GOST89" +# define SSL_TXT_ARIA "ARIA" +# define SSL_TXT_ARIA_GCM "ARIAGCM" +# define SSL_TXT_ARIA128 "ARIA128" +# define SSL_TXT_ARIA256 "ARIA256" + +# define SSL_TXT_MD5 "MD5" +# define SSL_TXT_SHA1 "SHA1" +# define SSL_TXT_SHA "SHA"/* same as "SHA1" */ +# define SSL_TXT_GOST94 "GOST94" +# define SSL_TXT_GOST89MAC "GOST89MAC" +# define SSL_TXT_GOST12 "GOST12" +# define SSL_TXT_GOST89MAC12 "GOST89MAC12" +# define SSL_TXT_SHA256 "SHA256" +# define SSL_TXT_SHA384 "SHA384" + +# define SSL_TXT_SSLV3 "SSLv3" +# define SSL_TXT_TLSV1 "TLSv1" +# define SSL_TXT_TLSV1_1 "TLSv1.1" +# define SSL_TXT_TLSV1_2 "TLSv1.2" + +# define SSL_TXT_ALL "ALL" + +/*- + * COMPLEMENTOF* definitions. These identifiers are used to (de-select) + * ciphers normally not being used. + * Example: "RC4" will activate all ciphers using RC4 including ciphers + * without authentication, which would normally disabled by DEFAULT (due + * the "!ADH" being part of default). Therefore "RC4:!COMPLEMENTOFDEFAULT" + * will make sure that it is also disabled in the specific selection. + * COMPLEMENTOF* identifiers are portable between version, as adjustments + * to the default cipher setup will also be included here. + * + * COMPLEMENTOFDEFAULT does not experience the same special treatment that + * DEFAULT gets, as only selection is being done and no sorting as needed + * for DEFAULT. + */ +# define SSL_TXT_CMPALL "COMPLEMENTOFALL" +# define SSL_TXT_CMPDEF "COMPLEMENTOFDEFAULT" + +/* + * The following cipher list is used by default. It also is substituted when + * an application-defined cipher list string starts with 'DEFAULT'. + * This applies to ciphersuites for TLSv1.2 and below. + */ +# define SSL_DEFAULT_CIPHER_LIST "ALL:!COMPLEMENTOFDEFAULT:!eNULL" +/* This is the default set of TLSv1.3 ciphersuites */ +# if !defined(OPENSSL_NO_CHACHA) && !defined(OPENSSL_NO_POLY1305) +# define TLS_DEFAULT_CIPHERSUITES "TLS_AES_256_GCM_SHA384:" \ + "TLS_CHACHA20_POLY1305_SHA256:" \ + "TLS_AES_128_GCM_SHA256" +# else +# define TLS_DEFAULT_CIPHERSUITES "TLS_AES_256_GCM_SHA384:" \ + "TLS_AES_128_GCM_SHA256" +#endif +/* + * As of OpenSSL 1.0.0, ssl_create_cipher_list() in ssl/ssl_ciph.c always + * starts with a reasonable order, and all we have to do for DEFAULT is + * throwing out anonymous and unencrypted ciphersuites! (The latter are not + * actually enabled by ALL, but "ALL:RSA" would enable some of them.) + */ + +/* Used in SSL_set_shutdown()/SSL_get_shutdown(); */ +# define SSL_SENT_SHUTDOWN 1 +# define SSL_RECEIVED_SHUTDOWN 2 + +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +# define SSL_FILETYPE_ASN1 X509_FILETYPE_ASN1 +# define SSL_FILETYPE_PEM X509_FILETYPE_PEM + +/* + * This is needed to stop compilers complaining about the 'struct ssl_st *' + * function parameters used to prototype callbacks in SSL_CTX. + */ +typedef struct ssl_st *ssl_crock_st; +typedef struct tls_session_ticket_ext_st TLS_SESSION_TICKET_EXT; +typedef struct ssl_method_st SSL_METHOD; +typedef struct ssl_cipher_st SSL_CIPHER; +typedef struct ssl_session_st SSL_SESSION; +typedef struct tls_sigalgs_st TLS_SIGALGS; +typedef struct ssl_conf_ctx_st SSL_CONF_CTX; +typedef struct ssl_comp_st SSL_COMP; + +STACK_OF(SSL_CIPHER); +STACK_OF(SSL_COMP); + +/* SRTP protection profiles for use with the use_srtp extension (RFC 5764)*/ +typedef struct srtp_protection_profile_st { + const char *name; + unsigned long id; +} SRTP_PROTECTION_PROFILE; + +DEFINE_STACK_OF(SRTP_PROTECTION_PROFILE) + +typedef int (*tls_session_ticket_ext_cb_fn)(SSL *s, const unsigned char *data, + int len, void *arg); +typedef int (*tls_session_secret_cb_fn)(SSL *s, void *secret, int *secret_len, + STACK_OF(SSL_CIPHER) *peer_ciphers, + const SSL_CIPHER **cipher, void *arg); + +/* Extension context codes */ +/* This extension is only allowed in TLS */ +#define SSL_EXT_TLS_ONLY 0x0001 +/* This extension is only allowed in DTLS */ +#define SSL_EXT_DTLS_ONLY 0x0002 +/* Some extensions may be allowed in DTLS but we don't implement them for it */ +#define SSL_EXT_TLS_IMPLEMENTATION_ONLY 0x0004 +/* Most extensions are not defined for SSLv3 but EXT_TYPE_renegotiate is */ +#define SSL_EXT_SSL3_ALLOWED 0x0008 +/* Extension is only defined for TLS1.2 and below */ +#define SSL_EXT_TLS1_2_AND_BELOW_ONLY 0x0010 +/* Extension is only defined for TLS1.3 and above */ +#define SSL_EXT_TLS1_3_ONLY 0x0020 +/* Ignore this extension during parsing if we are resuming */ +#define SSL_EXT_IGNORE_ON_RESUMPTION 0x0040 +#define SSL_EXT_CLIENT_HELLO 0x0080 +/* Really means TLS1.2 or below */ +#define SSL_EXT_TLS1_2_SERVER_HELLO 0x0100 +#define SSL_EXT_TLS1_3_SERVER_HELLO 0x0200 +#define SSL_EXT_TLS1_3_ENCRYPTED_EXTENSIONS 0x0400 +#define SSL_EXT_TLS1_3_HELLO_RETRY_REQUEST 0x0800 +#define SSL_EXT_TLS1_3_CERTIFICATE 0x1000 +#define SSL_EXT_TLS1_3_NEW_SESSION_TICKET 0x2000 +#define SSL_EXT_TLS1_3_CERTIFICATE_REQUEST 0x4000 + +/* Typedefs for handling custom extensions */ + +typedef int (*custom_ext_add_cb)(SSL *s, unsigned int ext_type, + const unsigned char **out, size_t *outlen, + int *al, void *add_arg); + +typedef void (*custom_ext_free_cb)(SSL *s, unsigned int ext_type, + const unsigned char *out, void *add_arg); + +typedef int (*custom_ext_parse_cb)(SSL *s, unsigned int ext_type, + const unsigned char *in, size_t inlen, + int *al, void *parse_arg); + + +typedef int (*SSL_custom_ext_add_cb_ex)(SSL *s, unsigned int ext_type, + unsigned int context, + const unsigned char **out, + size_t *outlen, X509 *x, + size_t chainidx, + int *al, void *add_arg); + +typedef void (*SSL_custom_ext_free_cb_ex)(SSL *s, unsigned int ext_type, + unsigned int context, + const unsigned char *out, + void *add_arg); + +typedef int (*SSL_custom_ext_parse_cb_ex)(SSL *s, unsigned int ext_type, + unsigned int context, + const unsigned char *in, + size_t inlen, X509 *x, + size_t chainidx, + int *al, void *parse_arg); + +/* Typedef for verification callback */ +typedef int (*SSL_verify_cb)(int preverify_ok, X509_STORE_CTX *x509_ctx); + +/* + * Some values are reserved until OpenSSL 1.2.0 because they were previously + * included in SSL_OP_ALL in a 1.1.x release. + * + * Reserved value (until OpenSSL 1.2.0) 0x00000001U + * Reserved value (until OpenSSL 1.2.0) 0x00000002U + */ +/* Allow initial connection to servers that don't support RI */ +# define SSL_OP_LEGACY_SERVER_CONNECT 0x00000004U + +/* Reserved value (until OpenSSL 1.2.0) 0x00000008U */ +# define SSL_OP_TLSEXT_PADDING 0x00000010U +/* Reserved value (until OpenSSL 1.2.0) 0x00000020U */ +# define SSL_OP_SAFARI_ECDHE_ECDSA_BUG 0x00000040U +/* + * Reserved value (until OpenSSL 1.2.0) 0x00000080U + * Reserved value (until OpenSSL 1.2.0) 0x00000100U + * Reserved value (until OpenSSL 1.2.0) 0x00000200U + */ + +/* In TLSv1.3 allow a non-(ec)dhe based kex_mode */ +# define SSL_OP_ALLOW_NO_DHE_KEX 0x00000400U + +/* + * Disable SSL 3.0/TLS 1.0 CBC vulnerability workaround that was added in + * OpenSSL 0.9.6d. Usually (depending on the application protocol) the + * workaround is not needed. Unfortunately some broken SSL/TLS + * implementations cannot handle it at all, which is why we include it in + * SSL_OP_ALL. Added in 0.9.6e + */ +# define SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS 0x00000800U + +/* DTLS options */ +# define SSL_OP_NO_QUERY_MTU 0x00001000U +/* Turn on Cookie Exchange (on relevant for servers) */ +# define SSL_OP_COOKIE_EXCHANGE 0x00002000U +/* Don't use RFC4507 ticket extension */ +# define SSL_OP_NO_TICKET 0x00004000U +# ifndef OPENSSL_NO_DTLS1_METHOD +/* Use Cisco's "speshul" version of DTLS_BAD_VER + * (only with deprecated DTLSv1_client_method()) */ +# define SSL_OP_CISCO_ANYCONNECT 0x00008000U +# endif + +/* As server, disallow session resumption on renegotiation */ +# define SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION 0x00010000U +/* Don't use compression even if supported */ +# define SSL_OP_NO_COMPRESSION 0x00020000U +/* Permit unsafe legacy renegotiation */ +# define SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION 0x00040000U +/* Disable encrypt-then-mac */ +# define SSL_OP_NO_ENCRYPT_THEN_MAC 0x00080000U + +/* + * Enable TLSv1.3 Compatibility mode. This is on by default. A future version + * of OpenSSL may have this disabled by default. + */ +# define SSL_OP_ENABLE_MIDDLEBOX_COMPAT 0x00100000U + +/* Prioritize Chacha20Poly1305 when client does. + * Modifies SSL_OP_CIPHER_SERVER_PREFERENCE */ +# define SSL_OP_PRIORITIZE_CHACHA 0x00200000U + +/* + * Set on servers to choose the cipher according to the server's preferences + */ +# define SSL_OP_CIPHER_SERVER_PREFERENCE 0x00400000U +/* + * If set, a server will allow a client to issue a SSLv3.0 version number as + * latest version supported in the premaster secret, even when TLSv1.0 + * (version 3.1) was announced in the client hello. Normally this is + * forbidden to prevent version rollback attacks. + */ +# define SSL_OP_TLS_ROLLBACK_BUG 0x00800000U + +/* + * Switches off automatic TLSv1.3 anti-replay protection for early data. This + * is a server-side option only (no effect on the client). + */ +# define SSL_OP_NO_ANTI_REPLAY 0x01000000U + +# define SSL_OP_NO_SSLv3 0x02000000U +# define SSL_OP_NO_TLSv1 0x04000000U +# define SSL_OP_NO_TLSv1_2 0x08000000U +# define SSL_OP_NO_TLSv1_1 0x10000000U +# define SSL_OP_NO_TLSv1_3 0x20000000U + +# define SSL_OP_NO_DTLSv1 0x04000000U +# define SSL_OP_NO_DTLSv1_2 0x08000000U + +# define SSL_OP_NO_SSL_MASK (SSL_OP_NO_SSLv3|\ + SSL_OP_NO_TLSv1|SSL_OP_NO_TLSv1_1|SSL_OP_NO_TLSv1_2|SSL_OP_NO_TLSv1_3) +# define SSL_OP_NO_DTLS_MASK (SSL_OP_NO_DTLSv1|SSL_OP_NO_DTLSv1_2) + +/* Disallow all renegotiation */ +# define SSL_OP_NO_RENEGOTIATION 0x40000000U + +/* + * Make server add server-hello extension from early version of cryptopro + * draft, when GOST ciphersuite is negotiated. Required for interoperability + * with CryptoPro CSP 3.x + */ +# define SSL_OP_CRYPTOPRO_TLSEXT_BUG 0x80000000U + +/* + * SSL_OP_ALL: various bug workarounds that should be rather harmless. + * This used to be 0x000FFFFFL before 0.9.7. + * This used to be 0x80000BFFU before 1.1.1. + */ +# define SSL_OP_ALL (SSL_OP_CRYPTOPRO_TLSEXT_BUG|\ + SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS|\ + SSL_OP_LEGACY_SERVER_CONNECT|\ + SSL_OP_TLSEXT_PADDING|\ + SSL_OP_SAFARI_ECDHE_ECDSA_BUG) + +/* OBSOLETE OPTIONS: retained for compatibility */ + +/* Removed from OpenSSL 1.1.0. Was 0x00000001L */ +/* Related to removed SSLv2. */ +# define SSL_OP_MICROSOFT_SESS_ID_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00000002L */ +/* Related to removed SSLv2. */ +# define SSL_OP_NETSCAPE_CHALLENGE_BUG 0x0 +/* Removed from OpenSSL 0.9.8q and 1.0.0c. Was 0x00000008L */ +/* Dead forever, see CVE-2010-4180 */ +# define SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG 0x0 +/* Removed from OpenSSL 1.0.1h and 1.0.2. Was 0x00000010L */ +/* Refers to ancient SSLREF and SSLv2. */ +# define SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00000020 */ +# define SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER 0x0 +/* Removed from OpenSSL 0.9.7h and 0.9.8b. Was 0x00000040L */ +# define SSL_OP_MSIE_SSLV2_RSA_PADDING 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00000080 */ +/* Ancient SSLeay version. */ +# define SSL_OP_SSLEAY_080_CLIENT_DH_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00000100L */ +# define SSL_OP_TLS_D5_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00000200L */ +# define SSL_OP_TLS_BLOCK_PADDING_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00080000L */ +# define SSL_OP_SINGLE_ECDH_USE 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x00100000L */ +# define SSL_OP_SINGLE_DH_USE 0x0 +/* Removed from OpenSSL 1.0.1k and 1.0.2. Was 0x00200000L */ +# define SSL_OP_EPHEMERAL_RSA 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x01000000L */ +# define SSL_OP_NO_SSLv2 0x0 +/* Removed from OpenSSL 1.0.1. Was 0x08000000L */ +# define SSL_OP_PKCS1_CHECK_1 0x0 +/* Removed from OpenSSL 1.0.1. Was 0x10000000L */ +# define SSL_OP_PKCS1_CHECK_2 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x20000000L */ +# define SSL_OP_NETSCAPE_CA_DN_BUG 0x0 +/* Removed from OpenSSL 1.1.0. Was 0x40000000L */ +# define SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG 0x0 + +/* + * Allow SSL_write(..., n) to return r with 0 < r < n (i.e. report success + * when just a single record has been written): + */ +# define SSL_MODE_ENABLE_PARTIAL_WRITE 0x00000001U +/* + * Make it possible to retry SSL_write() with changed buffer location (buffer + * contents must stay the same!); this is not the default to avoid the + * misconception that non-blocking SSL_write() behaves like non-blocking + * write(): + */ +# define SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER 0x00000002U +/* + * Never bother the application with retries if the transport is blocking: + */ +# define SSL_MODE_AUTO_RETRY 0x00000004U +/* Don't attempt to automatically build certificate chain */ +# define SSL_MODE_NO_AUTO_CHAIN 0x00000008U +/* + * Save RAM by releasing read and write buffers when they're empty. (SSL3 and + * TLS only.) Released buffers are freed. + */ +# define SSL_MODE_RELEASE_BUFFERS 0x00000010U +/* + * Send the current time in the Random fields of the ClientHello and + * ServerHello records for compatibility with hypothetical implementations + * that require it. + */ +# define SSL_MODE_SEND_CLIENTHELLO_TIME 0x00000020U +# define SSL_MODE_SEND_SERVERHELLO_TIME 0x00000040U +/* + * Send TLS_FALLBACK_SCSV in the ClientHello. To be set only by applications + * that reconnect with a downgraded protocol version; see + * draft-ietf-tls-downgrade-scsv-00 for details. DO NOT ENABLE THIS if your + * application attempts a normal handshake. Only use this in explicit + * fallback retries, following the guidance in + * draft-ietf-tls-downgrade-scsv-00. + */ +# define SSL_MODE_SEND_FALLBACK_SCSV 0x00000080U +/* + * Support Asynchronous operation + */ +# define SSL_MODE_ASYNC 0x00000100U + +/* + * When using DTLS/SCTP, include the terminating zero in the label + * used for computing the endpoint-pair shared secret. Required for + * interoperability with implementations having this bug like these + * older version of OpenSSL: + * - OpenSSL 1.0.0 series + * - OpenSSL 1.0.1 series + * - OpenSSL 1.0.2 series + * - OpenSSL 1.1.0 series + * - OpenSSL 1.1.1 and 1.1.1a + */ +# define SSL_MODE_DTLS_SCTP_LABEL_LENGTH_BUG 0x00000400U + +/* Cert related flags */ +/* + * Many implementations ignore some aspects of the TLS standards such as + * enforcing certificate chain algorithms. When this is set we enforce them. + */ +# define SSL_CERT_FLAG_TLS_STRICT 0x00000001U + +/* Suite B modes, takes same values as certificate verify flags */ +# define SSL_CERT_FLAG_SUITEB_128_LOS_ONLY 0x10000 +/* Suite B 192 bit only mode */ +# define SSL_CERT_FLAG_SUITEB_192_LOS 0x20000 +/* Suite B 128 bit mode allowing 192 bit algorithms */ +# define SSL_CERT_FLAG_SUITEB_128_LOS 0x30000 + +/* Perform all sorts of protocol violations for testing purposes */ +# define SSL_CERT_FLAG_BROKEN_PROTOCOL 0x10000000 + +/* Flags for building certificate chains */ +/* Treat any existing certificates as untrusted CAs */ +# define SSL_BUILD_CHAIN_FLAG_UNTRUSTED 0x1 +/* Don't include root CA in chain */ +# define SSL_BUILD_CHAIN_FLAG_NO_ROOT 0x2 +/* Just check certificates already there */ +# define SSL_BUILD_CHAIN_FLAG_CHECK 0x4 +/* Ignore verification errors */ +# define SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR 0x8 +/* Clear verification errors from queue */ +# define SSL_BUILD_CHAIN_FLAG_CLEAR_ERROR 0x10 + +/* Flags returned by SSL_check_chain */ +/* Certificate can be used with this session */ +# define CERT_PKEY_VALID 0x1 +/* Certificate can also be used for signing */ +# define CERT_PKEY_SIGN 0x2 +/* EE certificate signing algorithm OK */ +# define CERT_PKEY_EE_SIGNATURE 0x10 +/* CA signature algorithms OK */ +# define CERT_PKEY_CA_SIGNATURE 0x20 +/* EE certificate parameters OK */ +# define CERT_PKEY_EE_PARAM 0x40 +/* CA certificate parameters OK */ +# define CERT_PKEY_CA_PARAM 0x80 +/* Signing explicitly allowed as opposed to SHA1 fallback */ +# define CERT_PKEY_EXPLICIT_SIGN 0x100 +/* Client CA issuer names match (always set for server cert) */ +# define CERT_PKEY_ISSUER_NAME 0x200 +/* Cert type matches client types (always set for server cert) */ +# define CERT_PKEY_CERT_TYPE 0x400 +/* Cert chain suitable to Suite B */ +# define CERT_PKEY_SUITEB 0x800 + +# define SSL_CONF_FLAG_CMDLINE 0x1 +# define SSL_CONF_FLAG_FILE 0x2 +# define SSL_CONF_FLAG_CLIENT 0x4 +# define SSL_CONF_FLAG_SERVER 0x8 +# define SSL_CONF_FLAG_SHOW_ERRORS 0x10 +# define SSL_CONF_FLAG_CERTIFICATE 0x20 +# define SSL_CONF_FLAG_REQUIRE_PRIVATE 0x40 +/* Configuration value types */ +# define SSL_CONF_TYPE_UNKNOWN 0x0 +# define SSL_CONF_TYPE_STRING 0x1 +# define SSL_CONF_TYPE_FILE 0x2 +# define SSL_CONF_TYPE_DIR 0x3 +# define SSL_CONF_TYPE_NONE 0x4 + +/* Maximum length of the application-controlled segment of a a TLSv1.3 cookie */ +# define SSL_COOKIE_LENGTH 4096 + +/* + * Note: SSL[_CTX]_set_{options,mode} use |= op on the previous value, they + * cannot be used to clear bits. + */ + +unsigned long SSL_CTX_get_options(const SSL_CTX *ctx); +unsigned long SSL_get_options(const SSL *s); +unsigned long SSL_CTX_clear_options(SSL_CTX *ctx, unsigned long op); +unsigned long SSL_clear_options(SSL *s, unsigned long op); +unsigned long SSL_CTX_set_options(SSL_CTX *ctx, unsigned long op); +unsigned long SSL_set_options(SSL *s, unsigned long op); + +# define SSL_CTX_set_mode(ctx,op) \ + SSL_CTX_ctrl((ctx),SSL_CTRL_MODE,(op),NULL) +# define SSL_CTX_clear_mode(ctx,op) \ + SSL_CTX_ctrl((ctx),SSL_CTRL_CLEAR_MODE,(op),NULL) +# define SSL_CTX_get_mode(ctx) \ + SSL_CTX_ctrl((ctx),SSL_CTRL_MODE,0,NULL) +# define SSL_clear_mode(ssl,op) \ + SSL_ctrl((ssl),SSL_CTRL_CLEAR_MODE,(op),NULL) +# define SSL_set_mode(ssl,op) \ + SSL_ctrl((ssl),SSL_CTRL_MODE,(op),NULL) +# define SSL_get_mode(ssl) \ + SSL_ctrl((ssl),SSL_CTRL_MODE,0,NULL) +# define SSL_set_mtu(ssl, mtu) \ + SSL_ctrl((ssl),SSL_CTRL_SET_MTU,(mtu),NULL) +# define DTLS_set_link_mtu(ssl, mtu) \ + SSL_ctrl((ssl),DTLS_CTRL_SET_LINK_MTU,(mtu),NULL) +# define DTLS_get_link_min_mtu(ssl) \ + SSL_ctrl((ssl),DTLS_CTRL_GET_LINK_MIN_MTU,0,NULL) + +# define SSL_get_secure_renegotiation_support(ssl) \ + SSL_ctrl((ssl), SSL_CTRL_GET_RI_SUPPORT, 0, NULL) + +# ifndef OPENSSL_NO_HEARTBEATS +# define SSL_heartbeat(ssl) \ + SSL_ctrl((ssl),SSL_CTRL_DTLS_EXT_SEND_HEARTBEAT,0,NULL) +# endif + +# define SSL_CTX_set_cert_flags(ctx,op) \ + SSL_CTX_ctrl((ctx),SSL_CTRL_CERT_FLAGS,(op),NULL) +# define SSL_set_cert_flags(s,op) \ + SSL_ctrl((s),SSL_CTRL_CERT_FLAGS,(op),NULL) +# define SSL_CTX_clear_cert_flags(ctx,op) \ + SSL_CTX_ctrl((ctx),SSL_CTRL_CLEAR_CERT_FLAGS,(op),NULL) +# define SSL_clear_cert_flags(s,op) \ + SSL_ctrl((s),SSL_CTRL_CLEAR_CERT_FLAGS,(op),NULL) + +void SSL_CTX_set_msg_callback(SSL_CTX *ctx, + void (*cb) (int write_p, int version, + int content_type, const void *buf, + size_t len, SSL *ssl, void *arg)); +void SSL_set_msg_callback(SSL *ssl, + void (*cb) (int write_p, int version, + int content_type, const void *buf, + size_t len, SSL *ssl, void *arg)); +# define SSL_CTX_set_msg_callback_arg(ctx, arg) SSL_CTX_ctrl((ctx), SSL_CTRL_SET_MSG_CALLBACK_ARG, 0, (arg)) +# define SSL_set_msg_callback_arg(ssl, arg) SSL_ctrl((ssl), SSL_CTRL_SET_MSG_CALLBACK_ARG, 0, (arg)) + +# define SSL_get_extms_support(s) \ + SSL_ctrl((s),SSL_CTRL_GET_EXTMS_SUPPORT,0,NULL) + +# ifndef OPENSSL_NO_SRP + +/* see tls_srp.c */ +__owur int SSL_SRP_CTX_init(SSL *s); +__owur int SSL_CTX_SRP_CTX_init(SSL_CTX *ctx); +int SSL_SRP_CTX_free(SSL *ctx); +int SSL_CTX_SRP_CTX_free(SSL_CTX *ctx); +__owur int SSL_srp_server_param_with_username(SSL *s, int *ad); +__owur int SRP_Calc_A_param(SSL *s); + +# endif + +/* 100k max cert list */ +# define SSL_MAX_CERT_LIST_DEFAULT 1024*100 + +# define SSL_SESSION_CACHE_MAX_SIZE_DEFAULT (1024*20) + +/* + * This callback type is used inside SSL_CTX, SSL, and in the functions that + * set them. It is used to override the generation of SSL/TLS session IDs in + * a server. Return value should be zero on an error, non-zero to proceed. + * Also, callbacks should themselves check if the id they generate is unique + * otherwise the SSL handshake will fail with an error - callbacks can do + * this using the 'ssl' value they're passed by; + * SSL_has_matching_session_id(ssl, id, *id_len) The length value passed in + * is set at the maximum size the session ID can be. In SSLv3/TLSv1 it is 32 + * bytes. The callback can alter this length to be less if desired. It is + * also an error for the callback to set the size to zero. + */ +typedef int (*GEN_SESSION_CB) (SSL *ssl, unsigned char *id, + unsigned int *id_len); + +# define SSL_SESS_CACHE_OFF 0x0000 +# define SSL_SESS_CACHE_CLIENT 0x0001 +# define SSL_SESS_CACHE_SERVER 0x0002 +# define SSL_SESS_CACHE_BOTH (SSL_SESS_CACHE_CLIENT|SSL_SESS_CACHE_SERVER) +# define SSL_SESS_CACHE_NO_AUTO_CLEAR 0x0080 +/* enough comments already ... see SSL_CTX_set_session_cache_mode(3) */ +# define SSL_SESS_CACHE_NO_INTERNAL_LOOKUP 0x0100 +# define SSL_SESS_CACHE_NO_INTERNAL_STORE 0x0200 +# define SSL_SESS_CACHE_NO_INTERNAL \ + (SSL_SESS_CACHE_NO_INTERNAL_LOOKUP|SSL_SESS_CACHE_NO_INTERNAL_STORE) + +LHASH_OF(SSL_SESSION) *SSL_CTX_sessions(SSL_CTX *ctx); +# define SSL_CTX_sess_number(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_NUMBER,0,NULL) +# define SSL_CTX_sess_connect(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT,0,NULL) +# define SSL_CTX_sess_connect_good(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT_GOOD,0,NULL) +# define SSL_CTX_sess_connect_renegotiate(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CONNECT_RENEGOTIATE,0,NULL) +# define SSL_CTX_sess_accept(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT,0,NULL) +# define SSL_CTX_sess_accept_renegotiate(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT_RENEGOTIATE,0,NULL) +# define SSL_CTX_sess_accept_good(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_ACCEPT_GOOD,0,NULL) +# define SSL_CTX_sess_hits(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_HIT,0,NULL) +# define SSL_CTX_sess_cb_hits(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CB_HIT,0,NULL) +# define SSL_CTX_sess_misses(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_MISSES,0,NULL) +# define SSL_CTX_sess_timeouts(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_TIMEOUTS,0,NULL) +# define SSL_CTX_sess_cache_full(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SESS_CACHE_FULL,0,NULL) + +void SSL_CTX_sess_set_new_cb(SSL_CTX *ctx, + int (*new_session_cb) (struct ssl_st *ssl, + SSL_SESSION *sess)); +int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx)) (struct ssl_st *ssl, + SSL_SESSION *sess); +void SSL_CTX_sess_set_remove_cb(SSL_CTX *ctx, + void (*remove_session_cb) (struct ssl_ctx_st + *ctx, + SSL_SESSION *sess)); +void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx)) (struct ssl_ctx_st *ctx, + SSL_SESSION *sess); +void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx, + SSL_SESSION *(*get_session_cb) (struct ssl_st + *ssl, + const unsigned char + *data, int len, + int *copy)); +SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx)) (struct ssl_st *ssl, + const unsigned char *data, + int len, int *copy); +void SSL_CTX_set_info_callback(SSL_CTX *ctx, + void (*cb) (const SSL *ssl, int type, int val)); +void (*SSL_CTX_get_info_callback(SSL_CTX *ctx)) (const SSL *ssl, int type, + int val); +void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx, + int (*client_cert_cb) (SSL *ssl, X509 **x509, + EVP_PKEY **pkey)); +int (*SSL_CTX_get_client_cert_cb(SSL_CTX *ctx)) (SSL *ssl, X509 **x509, + EVP_PKEY **pkey); +# ifndef OPENSSL_NO_ENGINE +__owur int SSL_CTX_set_client_cert_engine(SSL_CTX *ctx, ENGINE *e); +# endif +void SSL_CTX_set_cookie_generate_cb(SSL_CTX *ctx, + int (*app_gen_cookie_cb) (SSL *ssl, + unsigned char + *cookie, + unsigned int + *cookie_len)); +void SSL_CTX_set_cookie_verify_cb(SSL_CTX *ctx, + int (*app_verify_cookie_cb) (SSL *ssl, + const unsigned + char *cookie, + unsigned int + cookie_len)); + +void SSL_CTX_set_stateless_cookie_generate_cb( + SSL_CTX *ctx, + int (*gen_stateless_cookie_cb) (SSL *ssl, + unsigned char *cookie, + size_t *cookie_len)); +void SSL_CTX_set_stateless_cookie_verify_cb( + SSL_CTX *ctx, + int (*verify_stateless_cookie_cb) (SSL *ssl, + const unsigned char *cookie, + size_t cookie_len)); +# ifndef OPENSSL_NO_NEXTPROTONEG + +typedef int (*SSL_CTX_npn_advertised_cb_func)(SSL *ssl, + const unsigned char **out, + unsigned int *outlen, + void *arg); +void SSL_CTX_set_next_protos_advertised_cb(SSL_CTX *s, + SSL_CTX_npn_advertised_cb_func cb, + void *arg); +# define SSL_CTX_set_npn_advertised_cb SSL_CTX_set_next_protos_advertised_cb + +typedef int (*SSL_CTX_npn_select_cb_func)(SSL *s, + unsigned char **out, + unsigned char *outlen, + const unsigned char *in, + unsigned int inlen, + void *arg); +void SSL_CTX_set_next_proto_select_cb(SSL_CTX *s, + SSL_CTX_npn_select_cb_func cb, + void *arg); +# define SSL_CTX_set_npn_select_cb SSL_CTX_set_next_proto_select_cb + +void SSL_get0_next_proto_negotiated(const SSL *s, const unsigned char **data, + unsigned *len); +# define SSL_get0_npn_negotiated SSL_get0_next_proto_negotiated +# endif + +__owur int SSL_select_next_proto(unsigned char **out, unsigned char *outlen, + const unsigned char *in, unsigned int inlen, + const unsigned char *client, + unsigned int client_len); + +# define OPENSSL_NPN_UNSUPPORTED 0 +# define OPENSSL_NPN_NEGOTIATED 1 +# define OPENSSL_NPN_NO_OVERLAP 2 + +__owur int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const unsigned char *protos, + unsigned int protos_len); +__owur int SSL_set_alpn_protos(SSL *ssl, const unsigned char *protos, + unsigned int protos_len); +typedef int (*SSL_CTX_alpn_select_cb_func)(SSL *ssl, + const unsigned char **out, + unsigned char *outlen, + const unsigned char *in, + unsigned int inlen, + void *arg); +void SSL_CTX_set_alpn_select_cb(SSL_CTX *ctx, + SSL_CTX_alpn_select_cb_func cb, + void *arg); +void SSL_get0_alpn_selected(const SSL *ssl, const unsigned char **data, + unsigned int *len); + +# ifndef OPENSSL_NO_PSK +/* + * the maximum length of the buffer given to callbacks containing the + * resulting identity/psk + */ +# define PSK_MAX_IDENTITY_LEN 128 +# define PSK_MAX_PSK_LEN 256 +typedef unsigned int (*SSL_psk_client_cb_func)(SSL *ssl, + const char *hint, + char *identity, + unsigned int max_identity_len, + unsigned char *psk, + unsigned int max_psk_len); +void SSL_CTX_set_psk_client_callback(SSL_CTX *ctx, SSL_psk_client_cb_func cb); +void SSL_set_psk_client_callback(SSL *ssl, SSL_psk_client_cb_func cb); + +typedef unsigned int (*SSL_psk_server_cb_func)(SSL *ssl, + const char *identity, + unsigned char *psk, + unsigned int max_psk_len); +void SSL_CTX_set_psk_server_callback(SSL_CTX *ctx, SSL_psk_server_cb_func cb); +void SSL_set_psk_server_callback(SSL *ssl, SSL_psk_server_cb_func cb); + +__owur int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint); +__owur int SSL_use_psk_identity_hint(SSL *s, const char *identity_hint); +const char *SSL_get_psk_identity_hint(const SSL *s); +const char *SSL_get_psk_identity(const SSL *s); +# endif + +typedef int (*SSL_psk_find_session_cb_func)(SSL *ssl, + const unsigned char *identity, + size_t identity_len, + SSL_SESSION **sess); +typedef int (*SSL_psk_use_session_cb_func)(SSL *ssl, const EVP_MD *md, + const unsigned char **id, + size_t *idlen, + SSL_SESSION **sess); + +void SSL_set_psk_find_session_callback(SSL *s, SSL_psk_find_session_cb_func cb); +void SSL_CTX_set_psk_find_session_callback(SSL_CTX *ctx, + SSL_psk_find_session_cb_func cb); +void SSL_set_psk_use_session_callback(SSL *s, SSL_psk_use_session_cb_func cb); +void SSL_CTX_set_psk_use_session_callback(SSL_CTX *ctx, + SSL_psk_use_session_cb_func cb); + +/* Register callbacks to handle custom TLS Extensions for client or server. */ + +__owur int SSL_CTX_has_client_custom_ext(const SSL_CTX *ctx, + unsigned int ext_type); + +__owur int SSL_CTX_add_client_custom_ext(SSL_CTX *ctx, + unsigned int ext_type, + custom_ext_add_cb add_cb, + custom_ext_free_cb free_cb, + void *add_arg, + custom_ext_parse_cb parse_cb, + void *parse_arg); + +__owur int SSL_CTX_add_server_custom_ext(SSL_CTX *ctx, + unsigned int ext_type, + custom_ext_add_cb add_cb, + custom_ext_free_cb free_cb, + void *add_arg, + custom_ext_parse_cb parse_cb, + void *parse_arg); + +__owur int SSL_CTX_add_custom_ext(SSL_CTX *ctx, unsigned int ext_type, + unsigned int context, + SSL_custom_ext_add_cb_ex add_cb, + SSL_custom_ext_free_cb_ex free_cb, + void *add_arg, + SSL_custom_ext_parse_cb_ex parse_cb, + void *parse_arg); + +__owur int SSL_extension_supported(unsigned int ext_type); + +# define SSL_NOTHING 1 +# define SSL_WRITING 2 +# define SSL_READING 3 +# define SSL_X509_LOOKUP 4 +# define SSL_ASYNC_PAUSED 5 +# define SSL_ASYNC_NO_JOBS 6 +# define SSL_CLIENT_HELLO_CB 7 + +/* These will only be used when doing non-blocking IO */ +# define SSL_want_nothing(s) (SSL_want(s) == SSL_NOTHING) +# define SSL_want_read(s) (SSL_want(s) == SSL_READING) +# define SSL_want_write(s) (SSL_want(s) == SSL_WRITING) +# define SSL_want_x509_lookup(s) (SSL_want(s) == SSL_X509_LOOKUP) +# define SSL_want_async(s) (SSL_want(s) == SSL_ASYNC_PAUSED) +# define SSL_want_async_job(s) (SSL_want(s) == SSL_ASYNC_NO_JOBS) +# define SSL_want_client_hello_cb(s) (SSL_want(s) == SSL_CLIENT_HELLO_CB) + +# define SSL_MAC_FLAG_READ_MAC_STREAM 1 +# define SSL_MAC_FLAG_WRITE_MAC_STREAM 2 + +/* + * A callback for logging out TLS key material. This callback should log out + * |line| followed by a newline. + */ +typedef void (*SSL_CTX_keylog_cb_func)(const SSL *ssl, const char *line); + +/* + * SSL_CTX_set_keylog_callback configures a callback to log key material. This + * is intended for debugging use with tools like Wireshark. The cb function + * should log line followed by a newline. + */ +void SSL_CTX_set_keylog_callback(SSL_CTX *ctx, SSL_CTX_keylog_cb_func cb); + +/* + * SSL_CTX_get_keylog_callback returns the callback configured by + * SSL_CTX_set_keylog_callback. + */ +SSL_CTX_keylog_cb_func SSL_CTX_get_keylog_callback(const SSL_CTX *ctx); + +int SSL_CTX_set_max_early_data(SSL_CTX *ctx, uint32_t max_early_data); +uint32_t SSL_CTX_get_max_early_data(const SSL_CTX *ctx); +int SSL_set_max_early_data(SSL *s, uint32_t max_early_data); +uint32_t SSL_get_max_early_data(const SSL *s); +int SSL_CTX_set_recv_max_early_data(SSL_CTX *ctx, uint32_t recv_max_early_data); +uint32_t SSL_CTX_get_recv_max_early_data(const SSL_CTX *ctx); +int SSL_set_recv_max_early_data(SSL *s, uint32_t recv_max_early_data); +uint32_t SSL_get_recv_max_early_data(const SSL *s); + +#ifdef __cplusplus +} +#endif + +# include +# include +# include /* This is mostly sslv3 with a few tweaks */ +# include /* Datagram TLS */ +# include /* Support for the use_srtp extension */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * These need to be after the above set of includes due to a compiler bug + * in VisualStudio 2015 + */ +DEFINE_STACK_OF_CONST(SSL_CIPHER) +DEFINE_STACK_OF(SSL_COMP) + +/* compatibility */ +# define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)(arg))) +# define SSL_get_app_data(s) (SSL_get_ex_data(s,0)) +# define SSL_SESSION_set_app_data(s,a) (SSL_SESSION_set_ex_data(s,0, \ + (char *)(a))) +# define SSL_SESSION_get_app_data(s) (SSL_SESSION_get_ex_data(s,0)) +# define SSL_CTX_get_app_data(ctx) (SSL_CTX_get_ex_data(ctx,0)) +# define SSL_CTX_set_app_data(ctx,arg) (SSL_CTX_set_ex_data(ctx,0, \ + (char *)(arg))) +DEPRECATEDIN_1_1_0(void SSL_set_debug(SSL *s, int debug)) + +/* TLSv1.3 KeyUpdate message types */ +/* -1 used so that this is an invalid value for the on-the-wire protocol */ +#define SSL_KEY_UPDATE_NONE -1 +/* Values as defined for the on-the-wire protocol */ +#define SSL_KEY_UPDATE_NOT_REQUESTED 0 +#define SSL_KEY_UPDATE_REQUESTED 1 + +/* + * The valid handshake states (one for each type message sent and one for each + * type of message received). There are also two "special" states: + * TLS = TLS or DTLS state + * DTLS = DTLS specific state + * CR/SR = Client Read/Server Read + * CW/SW = Client Write/Server Write + * + * The "special" states are: + * TLS_ST_BEFORE = No handshake has been initiated yet + * TLS_ST_OK = A handshake has been successfully completed + */ +typedef enum { + TLS_ST_BEFORE, + TLS_ST_OK, + DTLS_ST_CR_HELLO_VERIFY_REQUEST, + TLS_ST_CR_SRVR_HELLO, + TLS_ST_CR_CERT, + TLS_ST_CR_CERT_STATUS, + TLS_ST_CR_KEY_EXCH, + TLS_ST_CR_CERT_REQ, + TLS_ST_CR_SRVR_DONE, + TLS_ST_CR_SESSION_TICKET, + TLS_ST_CR_CHANGE, + TLS_ST_CR_FINISHED, + TLS_ST_CW_CLNT_HELLO, + TLS_ST_CW_CERT, + TLS_ST_CW_KEY_EXCH, + TLS_ST_CW_CERT_VRFY, + TLS_ST_CW_CHANGE, + TLS_ST_CW_NEXT_PROTO, + TLS_ST_CW_FINISHED, + TLS_ST_SW_HELLO_REQ, + TLS_ST_SR_CLNT_HELLO, + DTLS_ST_SW_HELLO_VERIFY_REQUEST, + TLS_ST_SW_SRVR_HELLO, + TLS_ST_SW_CERT, + TLS_ST_SW_KEY_EXCH, + TLS_ST_SW_CERT_REQ, + TLS_ST_SW_SRVR_DONE, + TLS_ST_SR_CERT, + TLS_ST_SR_KEY_EXCH, + TLS_ST_SR_CERT_VRFY, + TLS_ST_SR_NEXT_PROTO, + TLS_ST_SR_CHANGE, + TLS_ST_SR_FINISHED, + TLS_ST_SW_SESSION_TICKET, + TLS_ST_SW_CERT_STATUS, + TLS_ST_SW_CHANGE, + TLS_ST_SW_FINISHED, + TLS_ST_SW_ENCRYPTED_EXTENSIONS, + TLS_ST_CR_ENCRYPTED_EXTENSIONS, + TLS_ST_CR_CERT_VRFY, + TLS_ST_SW_CERT_VRFY, + TLS_ST_CR_HELLO_REQ, + TLS_ST_SW_KEY_UPDATE, + TLS_ST_CW_KEY_UPDATE, + TLS_ST_SR_KEY_UPDATE, + TLS_ST_CR_KEY_UPDATE, + TLS_ST_EARLY_DATA, + TLS_ST_PENDING_EARLY_DATA_END, + TLS_ST_CW_END_OF_EARLY_DATA, + TLS_ST_SR_END_OF_EARLY_DATA +} OSSL_HANDSHAKE_STATE; + +/* + * Most of the following state values are no longer used and are defined to be + * the closest equivalent value in the current state machine code. Not all + * defines have an equivalent and are set to a dummy value (-1). SSL_ST_CONNECT + * and SSL_ST_ACCEPT are still in use in the definition of SSL_CB_ACCEPT_LOOP, + * SSL_CB_ACCEPT_EXIT, SSL_CB_CONNECT_LOOP and SSL_CB_CONNECT_EXIT. + */ + +# define SSL_ST_CONNECT 0x1000 +# define SSL_ST_ACCEPT 0x2000 + +# define SSL_ST_MASK 0x0FFF + +# define SSL_CB_LOOP 0x01 +# define SSL_CB_EXIT 0x02 +# define SSL_CB_READ 0x04 +# define SSL_CB_WRITE 0x08 +# define SSL_CB_ALERT 0x4000/* used in callback */ +# define SSL_CB_READ_ALERT (SSL_CB_ALERT|SSL_CB_READ) +# define SSL_CB_WRITE_ALERT (SSL_CB_ALERT|SSL_CB_WRITE) +# define SSL_CB_ACCEPT_LOOP (SSL_ST_ACCEPT|SSL_CB_LOOP) +# define SSL_CB_ACCEPT_EXIT (SSL_ST_ACCEPT|SSL_CB_EXIT) +# define SSL_CB_CONNECT_LOOP (SSL_ST_CONNECT|SSL_CB_LOOP) +# define SSL_CB_CONNECT_EXIT (SSL_ST_CONNECT|SSL_CB_EXIT) +# define SSL_CB_HANDSHAKE_START 0x10 +# define SSL_CB_HANDSHAKE_DONE 0x20 + +/* Is the SSL_connection established? */ +# define SSL_in_connect_init(a) (SSL_in_init(a) && !SSL_is_server(a)) +# define SSL_in_accept_init(a) (SSL_in_init(a) && SSL_is_server(a)) +int SSL_in_init(const SSL *s); +int SSL_in_before(const SSL *s); +int SSL_is_init_finished(const SSL *s); + +/* + * The following 3 states are kept in ssl->rlayer.rstate when reads fail, you + * should not need these + */ +# define SSL_ST_READ_HEADER 0xF0 +# define SSL_ST_READ_BODY 0xF1 +# define SSL_ST_READ_DONE 0xF2 + +/*- + * Obtain latest Finished message + * -- that we sent (SSL_get_finished) + * -- that we expected from peer (SSL_get_peer_finished). + * Returns length (0 == no Finished so far), copies up to 'count' bytes. + */ +size_t SSL_get_finished(const SSL *s, void *buf, size_t count); +size_t SSL_get_peer_finished(const SSL *s, void *buf, size_t count); + +/* + * use either SSL_VERIFY_NONE or SSL_VERIFY_PEER, the last 3 options are + * 'ored' with SSL_VERIFY_PEER if they are desired + */ +# define SSL_VERIFY_NONE 0x00 +# define SSL_VERIFY_PEER 0x01 +# define SSL_VERIFY_FAIL_IF_NO_PEER_CERT 0x02 +# define SSL_VERIFY_CLIENT_ONCE 0x04 +# define SSL_VERIFY_POST_HANDSHAKE 0x08 + +# if OPENSSL_API_COMPAT < 0x10100000L +# define OpenSSL_add_ssl_algorithms() SSL_library_init() +# define SSLeay_add_ssl_algorithms() SSL_library_init() +# endif + +/* More backward compatibility */ +# define SSL_get_cipher(s) \ + SSL_CIPHER_get_name(SSL_get_current_cipher(s)) +# define SSL_get_cipher_bits(s,np) \ + SSL_CIPHER_get_bits(SSL_get_current_cipher(s),np) +# define SSL_get_cipher_version(s) \ + SSL_CIPHER_get_version(SSL_get_current_cipher(s)) +# define SSL_get_cipher_name(s) \ + SSL_CIPHER_get_name(SSL_get_current_cipher(s)) +# define SSL_get_time(a) SSL_SESSION_get_time(a) +# define SSL_set_time(a,b) SSL_SESSION_set_time((a),(b)) +# define SSL_get_timeout(a) SSL_SESSION_get_timeout(a) +# define SSL_set_timeout(a,b) SSL_SESSION_set_timeout((a),(b)) + +# define d2i_SSL_SESSION_bio(bp,s_id) ASN1_d2i_bio_of(SSL_SESSION,SSL_SESSION_new,d2i_SSL_SESSION,bp,s_id) +# define i2d_SSL_SESSION_bio(bp,s_id) ASN1_i2d_bio_of(SSL_SESSION,i2d_SSL_SESSION,bp,s_id) + +DECLARE_PEM_rw(SSL_SESSION, SSL_SESSION) +# define SSL_AD_REASON_OFFSET 1000/* offset to get SSL_R_... value + * from SSL_AD_... */ +/* These alert types are for SSLv3 and TLSv1 */ +# define SSL_AD_CLOSE_NOTIFY SSL3_AD_CLOSE_NOTIFY +/* fatal */ +# define SSL_AD_UNEXPECTED_MESSAGE SSL3_AD_UNEXPECTED_MESSAGE +/* fatal */ +# define SSL_AD_BAD_RECORD_MAC SSL3_AD_BAD_RECORD_MAC +# define SSL_AD_DECRYPTION_FAILED TLS1_AD_DECRYPTION_FAILED +# define SSL_AD_RECORD_OVERFLOW TLS1_AD_RECORD_OVERFLOW +/* fatal */ +# define SSL_AD_DECOMPRESSION_FAILURE SSL3_AD_DECOMPRESSION_FAILURE +/* fatal */ +# define SSL_AD_HANDSHAKE_FAILURE SSL3_AD_HANDSHAKE_FAILURE +/* Not for TLS */ +# define SSL_AD_NO_CERTIFICATE SSL3_AD_NO_CERTIFICATE +# define SSL_AD_BAD_CERTIFICATE SSL3_AD_BAD_CERTIFICATE +# define SSL_AD_UNSUPPORTED_CERTIFICATE SSL3_AD_UNSUPPORTED_CERTIFICATE +# define SSL_AD_CERTIFICATE_REVOKED SSL3_AD_CERTIFICATE_REVOKED +# define SSL_AD_CERTIFICATE_EXPIRED SSL3_AD_CERTIFICATE_EXPIRED +# define SSL_AD_CERTIFICATE_UNKNOWN SSL3_AD_CERTIFICATE_UNKNOWN +/* fatal */ +# define SSL_AD_ILLEGAL_PARAMETER SSL3_AD_ILLEGAL_PARAMETER +/* fatal */ +# define SSL_AD_UNKNOWN_CA TLS1_AD_UNKNOWN_CA +/* fatal */ +# define SSL_AD_ACCESS_DENIED TLS1_AD_ACCESS_DENIED +/* fatal */ +# define SSL_AD_DECODE_ERROR TLS1_AD_DECODE_ERROR +# define SSL_AD_DECRYPT_ERROR TLS1_AD_DECRYPT_ERROR +/* fatal */ +# define SSL_AD_EXPORT_RESTRICTION TLS1_AD_EXPORT_RESTRICTION +/* fatal */ +# define SSL_AD_PROTOCOL_VERSION TLS1_AD_PROTOCOL_VERSION +/* fatal */ +# define SSL_AD_INSUFFICIENT_SECURITY TLS1_AD_INSUFFICIENT_SECURITY +/* fatal */ +# define SSL_AD_INTERNAL_ERROR TLS1_AD_INTERNAL_ERROR +# define SSL_AD_USER_CANCELLED TLS1_AD_USER_CANCELLED +# define SSL_AD_NO_RENEGOTIATION TLS1_AD_NO_RENEGOTIATION +# define SSL_AD_MISSING_EXTENSION TLS13_AD_MISSING_EXTENSION +# define SSL_AD_CERTIFICATE_REQUIRED TLS13_AD_CERTIFICATE_REQUIRED +# define SSL_AD_UNSUPPORTED_EXTENSION TLS1_AD_UNSUPPORTED_EXTENSION +# define SSL_AD_CERTIFICATE_UNOBTAINABLE TLS1_AD_CERTIFICATE_UNOBTAINABLE +# define SSL_AD_UNRECOGNIZED_NAME TLS1_AD_UNRECOGNIZED_NAME +# define SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE +# define SSL_AD_BAD_CERTIFICATE_HASH_VALUE TLS1_AD_BAD_CERTIFICATE_HASH_VALUE +/* fatal */ +# define SSL_AD_UNKNOWN_PSK_IDENTITY TLS1_AD_UNKNOWN_PSK_IDENTITY +/* fatal */ +# define SSL_AD_INAPPROPRIATE_FALLBACK TLS1_AD_INAPPROPRIATE_FALLBACK +# define SSL_AD_NO_APPLICATION_PROTOCOL TLS1_AD_NO_APPLICATION_PROTOCOL +# define SSL_ERROR_NONE 0 +# define SSL_ERROR_SSL 1 +# define SSL_ERROR_WANT_READ 2 +# define SSL_ERROR_WANT_WRITE 3 +# define SSL_ERROR_WANT_X509_LOOKUP 4 +# define SSL_ERROR_SYSCALL 5/* look at error stack/return + * value/errno */ +# define SSL_ERROR_ZERO_RETURN 6 +# define SSL_ERROR_WANT_CONNECT 7 +# define SSL_ERROR_WANT_ACCEPT 8 +# define SSL_ERROR_WANT_ASYNC 9 +# define SSL_ERROR_WANT_ASYNC_JOB 10 +# define SSL_ERROR_WANT_CLIENT_HELLO_CB 11 +# define SSL_CTRL_SET_TMP_DH 3 +# define SSL_CTRL_SET_TMP_ECDH 4 +# define SSL_CTRL_SET_TMP_DH_CB 6 +# define SSL_CTRL_GET_CLIENT_CERT_REQUEST 9 +# define SSL_CTRL_GET_NUM_RENEGOTIATIONS 10 +# define SSL_CTRL_CLEAR_NUM_RENEGOTIATIONS 11 +# define SSL_CTRL_GET_TOTAL_RENEGOTIATIONS 12 +# define SSL_CTRL_GET_FLAGS 13 +# define SSL_CTRL_EXTRA_CHAIN_CERT 14 +# define SSL_CTRL_SET_MSG_CALLBACK 15 +# define SSL_CTRL_SET_MSG_CALLBACK_ARG 16 +/* only applies to datagram connections */ +# define SSL_CTRL_SET_MTU 17 +/* Stats */ +# define SSL_CTRL_SESS_NUMBER 20 +# define SSL_CTRL_SESS_CONNECT 21 +# define SSL_CTRL_SESS_CONNECT_GOOD 22 +# define SSL_CTRL_SESS_CONNECT_RENEGOTIATE 23 +# define SSL_CTRL_SESS_ACCEPT 24 +# define SSL_CTRL_SESS_ACCEPT_GOOD 25 +# define SSL_CTRL_SESS_ACCEPT_RENEGOTIATE 26 +# define SSL_CTRL_SESS_HIT 27 +# define SSL_CTRL_SESS_CB_HIT 28 +# define SSL_CTRL_SESS_MISSES 29 +# define SSL_CTRL_SESS_TIMEOUTS 30 +# define SSL_CTRL_SESS_CACHE_FULL 31 +# define SSL_CTRL_MODE 33 +# define SSL_CTRL_GET_READ_AHEAD 40 +# define SSL_CTRL_SET_READ_AHEAD 41 +# define SSL_CTRL_SET_SESS_CACHE_SIZE 42 +# define SSL_CTRL_GET_SESS_CACHE_SIZE 43 +# define SSL_CTRL_SET_SESS_CACHE_MODE 44 +# define SSL_CTRL_GET_SESS_CACHE_MODE 45 +# define SSL_CTRL_GET_MAX_CERT_LIST 50 +# define SSL_CTRL_SET_MAX_CERT_LIST 51 +# define SSL_CTRL_SET_MAX_SEND_FRAGMENT 52 +/* see tls1.h for macros based on these */ +# define SSL_CTRL_SET_TLSEXT_SERVERNAME_CB 53 +# define SSL_CTRL_SET_TLSEXT_SERVERNAME_ARG 54 +# define SSL_CTRL_SET_TLSEXT_HOSTNAME 55 +# define SSL_CTRL_SET_TLSEXT_DEBUG_CB 56 +# define SSL_CTRL_SET_TLSEXT_DEBUG_ARG 57 +# define SSL_CTRL_GET_TLSEXT_TICKET_KEYS 58 +# define SSL_CTRL_SET_TLSEXT_TICKET_KEYS 59 +/*# define SSL_CTRL_SET_TLSEXT_OPAQUE_PRF_INPUT 60 */ +/*# define SSL_CTRL_SET_TLSEXT_OPAQUE_PRF_INPUT_CB 61 */ +/*# define SSL_CTRL_SET_TLSEXT_OPAQUE_PRF_INPUT_CB_ARG 62 */ +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB 63 +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB_ARG 64 +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_TYPE 65 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_EXTS 66 +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_EXTS 67 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_IDS 68 +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_IDS 69 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_OCSP_RESP 70 +# define SSL_CTRL_SET_TLSEXT_STATUS_REQ_OCSP_RESP 71 +# define SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB 72 +# define SSL_CTRL_SET_TLS_EXT_SRP_USERNAME_CB 75 +# define SSL_CTRL_SET_SRP_VERIFY_PARAM_CB 76 +# define SSL_CTRL_SET_SRP_GIVE_CLIENT_PWD_CB 77 +# define SSL_CTRL_SET_SRP_ARG 78 +# define SSL_CTRL_SET_TLS_EXT_SRP_USERNAME 79 +# define SSL_CTRL_SET_TLS_EXT_SRP_STRENGTH 80 +# define SSL_CTRL_SET_TLS_EXT_SRP_PASSWORD 81 +# ifndef OPENSSL_NO_HEARTBEATS +# define SSL_CTRL_DTLS_EXT_SEND_HEARTBEAT 85 +# define SSL_CTRL_GET_DTLS_EXT_HEARTBEAT_PENDING 86 +# define SSL_CTRL_SET_DTLS_EXT_HEARTBEAT_NO_REQUESTS 87 +# endif +# define DTLS_CTRL_GET_TIMEOUT 73 +# define DTLS_CTRL_HANDLE_TIMEOUT 74 +# define SSL_CTRL_GET_RI_SUPPORT 76 +# define SSL_CTRL_CLEAR_MODE 78 +# define SSL_CTRL_SET_NOT_RESUMABLE_SESS_CB 79 +# define SSL_CTRL_GET_EXTRA_CHAIN_CERTS 82 +# define SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS 83 +# define SSL_CTRL_CHAIN 88 +# define SSL_CTRL_CHAIN_CERT 89 +# define SSL_CTRL_GET_GROUPS 90 +# define SSL_CTRL_SET_GROUPS 91 +# define SSL_CTRL_SET_GROUPS_LIST 92 +# define SSL_CTRL_GET_SHARED_GROUP 93 +# define SSL_CTRL_SET_SIGALGS 97 +# define SSL_CTRL_SET_SIGALGS_LIST 98 +# define SSL_CTRL_CERT_FLAGS 99 +# define SSL_CTRL_CLEAR_CERT_FLAGS 100 +# define SSL_CTRL_SET_CLIENT_SIGALGS 101 +# define SSL_CTRL_SET_CLIENT_SIGALGS_LIST 102 +# define SSL_CTRL_GET_CLIENT_CERT_TYPES 103 +# define SSL_CTRL_SET_CLIENT_CERT_TYPES 104 +# define SSL_CTRL_BUILD_CERT_CHAIN 105 +# define SSL_CTRL_SET_VERIFY_CERT_STORE 106 +# define SSL_CTRL_SET_CHAIN_CERT_STORE 107 +# define SSL_CTRL_GET_PEER_SIGNATURE_NID 108 +# define SSL_CTRL_GET_PEER_TMP_KEY 109 +# define SSL_CTRL_GET_RAW_CIPHERLIST 110 +# define SSL_CTRL_GET_EC_POINT_FORMATS 111 +# define SSL_CTRL_GET_CHAIN_CERTS 115 +# define SSL_CTRL_SELECT_CURRENT_CERT 116 +# define SSL_CTRL_SET_CURRENT_CERT 117 +# define SSL_CTRL_SET_DH_AUTO 118 +# define DTLS_CTRL_SET_LINK_MTU 120 +# define DTLS_CTRL_GET_LINK_MIN_MTU 121 +# define SSL_CTRL_GET_EXTMS_SUPPORT 122 +# define SSL_CTRL_SET_MIN_PROTO_VERSION 123 +# define SSL_CTRL_SET_MAX_PROTO_VERSION 124 +# define SSL_CTRL_SET_SPLIT_SEND_FRAGMENT 125 +# define SSL_CTRL_SET_MAX_PIPELINES 126 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_TYPE 127 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_CB 128 +# define SSL_CTRL_GET_TLSEXT_STATUS_REQ_CB_ARG 129 +# define SSL_CTRL_GET_MIN_PROTO_VERSION 130 +# define SSL_CTRL_GET_MAX_PROTO_VERSION 131 +# define SSL_CTRL_GET_SIGNATURE_NID 132 +# define SSL_CTRL_GET_TMP_KEY 133 +# define SSL_CTRL_GET_VERIFY_CERT_STORE 137 +# define SSL_CTRL_GET_CHAIN_CERT_STORE 138 +# define SSL_CERT_SET_FIRST 1 +# define SSL_CERT_SET_NEXT 2 +# define SSL_CERT_SET_SERVER 3 +# define DTLSv1_get_timeout(ssl, arg) \ + SSL_ctrl(ssl,DTLS_CTRL_GET_TIMEOUT,0, (void *)(arg)) +# define DTLSv1_handle_timeout(ssl) \ + SSL_ctrl(ssl,DTLS_CTRL_HANDLE_TIMEOUT,0, NULL) +# define SSL_num_renegotiations(ssl) \ + SSL_ctrl((ssl),SSL_CTRL_GET_NUM_RENEGOTIATIONS,0,NULL) +# define SSL_clear_num_renegotiations(ssl) \ + SSL_ctrl((ssl),SSL_CTRL_CLEAR_NUM_RENEGOTIATIONS,0,NULL) +# define SSL_total_renegotiations(ssl) \ + SSL_ctrl((ssl),SSL_CTRL_GET_TOTAL_RENEGOTIATIONS,0,NULL) +# define SSL_CTX_set_tmp_dh(ctx,dh) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TMP_DH,0,(char *)(dh)) +# define SSL_CTX_set_tmp_ecdh(ctx,ecdh) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TMP_ECDH,0,(char *)(ecdh)) +# define SSL_CTX_set_dh_auto(ctx, onoff) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_DH_AUTO,onoff,NULL) +# define SSL_set_dh_auto(s, onoff) \ + SSL_ctrl(s,SSL_CTRL_SET_DH_AUTO,onoff,NULL) +# define SSL_set_tmp_dh(ssl,dh) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TMP_DH,0,(char *)(dh)) +# define SSL_set_tmp_ecdh(ssl,ecdh) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TMP_ECDH,0,(char *)(ecdh)) +# define SSL_CTX_add_extra_chain_cert(ctx,x509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_EXTRA_CHAIN_CERT,0,(char *)(x509)) +# define SSL_CTX_get_extra_chain_certs(ctx,px509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_EXTRA_CHAIN_CERTS,0,px509) +# define SSL_CTX_get_extra_chain_certs_only(ctx,px509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_EXTRA_CHAIN_CERTS,1,px509) +# define SSL_CTX_clear_extra_chain_certs(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_CLEAR_EXTRA_CHAIN_CERTS,0,NULL) +# define SSL_CTX_set0_chain(ctx,sk) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_CHAIN,0,(char *)(sk)) +# define SSL_CTX_set1_chain(ctx,sk) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_CHAIN,1,(char *)(sk)) +# define SSL_CTX_add0_chain_cert(ctx,x509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_CHAIN_CERT,0,(char *)(x509)) +# define SSL_CTX_add1_chain_cert(ctx,x509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_CHAIN_CERT,1,(char *)(x509)) +# define SSL_CTX_get0_chain_certs(ctx,px509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_CHAIN_CERTS,0,px509) +# define SSL_CTX_clear_chain_certs(ctx) \ + SSL_CTX_set0_chain(ctx,NULL) +# define SSL_CTX_build_cert_chain(ctx, flags) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_BUILD_CERT_CHAIN, flags, NULL) +# define SSL_CTX_select_current_cert(ctx,x509) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SELECT_CURRENT_CERT,0,(char *)(x509)) +# define SSL_CTX_set_current_cert(ctx, op) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CURRENT_CERT, op, NULL) +# define SSL_CTX_set0_verify_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_VERIFY_CERT_STORE,0,(char *)(st)) +# define SSL_CTX_set1_verify_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_VERIFY_CERT_STORE,1,(char *)(st)) +# define SSL_CTX_get0_verify_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_VERIFY_CERT_STORE,0,(char *)(st)) +# define SSL_CTX_set0_chain_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CHAIN_CERT_STORE,0,(char *)(st)) +# define SSL_CTX_set1_chain_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CHAIN_CERT_STORE,1,(char *)(st)) +# define SSL_CTX_get0_chain_cert_store(ctx,st) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_CHAIN_CERT_STORE,0,(char *)(st)) +# define SSL_set0_chain(s,sk) \ + SSL_ctrl(s,SSL_CTRL_CHAIN,0,(char *)(sk)) +# define SSL_set1_chain(s,sk) \ + SSL_ctrl(s,SSL_CTRL_CHAIN,1,(char *)(sk)) +# define SSL_add0_chain_cert(s,x509) \ + SSL_ctrl(s,SSL_CTRL_CHAIN_CERT,0,(char *)(x509)) +# define SSL_add1_chain_cert(s,x509) \ + SSL_ctrl(s,SSL_CTRL_CHAIN_CERT,1,(char *)(x509)) +# define SSL_get0_chain_certs(s,px509) \ + SSL_ctrl(s,SSL_CTRL_GET_CHAIN_CERTS,0,px509) +# define SSL_clear_chain_certs(s) \ + SSL_set0_chain(s,NULL) +# define SSL_build_cert_chain(s, flags) \ + SSL_ctrl(s,SSL_CTRL_BUILD_CERT_CHAIN, flags, NULL) +# define SSL_select_current_cert(s,x509) \ + SSL_ctrl(s,SSL_CTRL_SELECT_CURRENT_CERT,0,(char *)(x509)) +# define SSL_set_current_cert(s,op) \ + SSL_ctrl(s,SSL_CTRL_SET_CURRENT_CERT, op, NULL) +# define SSL_set0_verify_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_SET_VERIFY_CERT_STORE,0,(char *)(st)) +# define SSL_set1_verify_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_SET_VERIFY_CERT_STORE,1,(char *)(st)) +#define SSL_get0_verify_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_GET_VERIFY_CERT_STORE,0,(char *)(st)) +# define SSL_set0_chain_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_SET_CHAIN_CERT_STORE,0,(char *)(st)) +# define SSL_set1_chain_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_SET_CHAIN_CERT_STORE,1,(char *)(st)) +#define SSL_get0_chain_cert_store(s,st) \ + SSL_ctrl(s,SSL_CTRL_GET_CHAIN_CERT_STORE,0,(char *)(st)) +# define SSL_get1_groups(s, glist) \ + SSL_ctrl(s,SSL_CTRL_GET_GROUPS,0,(int*)(glist)) +# define SSL_CTX_set1_groups(ctx, glist, glistlen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_GROUPS,glistlen,(int *)(glist)) +# define SSL_CTX_set1_groups_list(ctx, s) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_GROUPS_LIST,0,(char *)(s)) +# define SSL_set1_groups(s, glist, glistlen) \ + SSL_ctrl(s,SSL_CTRL_SET_GROUPS,glistlen,(char *)(glist)) +# define SSL_set1_groups_list(s, str) \ + SSL_ctrl(s,SSL_CTRL_SET_GROUPS_LIST,0,(char *)(str)) +# define SSL_get_shared_group(s, n) \ + SSL_ctrl(s,SSL_CTRL_GET_SHARED_GROUP,n,NULL) +# define SSL_CTX_set1_sigalgs(ctx, slist, slistlen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SIGALGS,slistlen,(int *)(slist)) +# define SSL_CTX_set1_sigalgs_list(ctx, s) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SIGALGS_LIST,0,(char *)(s)) +# define SSL_set1_sigalgs(s, slist, slistlen) \ + SSL_ctrl(s,SSL_CTRL_SET_SIGALGS,slistlen,(int *)(slist)) +# define SSL_set1_sigalgs_list(s, str) \ + SSL_ctrl(s,SSL_CTRL_SET_SIGALGS_LIST,0,(char *)(str)) +# define SSL_CTX_set1_client_sigalgs(ctx, slist, slistlen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CLIENT_SIGALGS,slistlen,(int *)(slist)) +# define SSL_CTX_set1_client_sigalgs_list(ctx, s) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CLIENT_SIGALGS_LIST,0,(char *)(s)) +# define SSL_set1_client_sigalgs(s, slist, slistlen) \ + SSL_ctrl(s,SSL_CTRL_SET_CLIENT_SIGALGS,slistlen,(int *)(slist)) +# define SSL_set1_client_sigalgs_list(s, str) \ + SSL_ctrl(s,SSL_CTRL_SET_CLIENT_SIGALGS_LIST,0,(char *)(str)) +# define SSL_get0_certificate_types(s, clist) \ + SSL_ctrl(s, SSL_CTRL_GET_CLIENT_CERT_TYPES, 0, (char *)(clist)) +# define SSL_CTX_set1_client_certificate_types(ctx, clist, clistlen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_CLIENT_CERT_TYPES,clistlen, \ + (char *)(clist)) +# define SSL_set1_client_certificate_types(s, clist, clistlen) \ + SSL_ctrl(s,SSL_CTRL_SET_CLIENT_CERT_TYPES,clistlen,(char *)(clist)) +# define SSL_get_signature_nid(s, pn) \ + SSL_ctrl(s,SSL_CTRL_GET_SIGNATURE_NID,0,pn) +# define SSL_get_peer_signature_nid(s, pn) \ + SSL_ctrl(s,SSL_CTRL_GET_PEER_SIGNATURE_NID,0,pn) +# define SSL_get_peer_tmp_key(s, pk) \ + SSL_ctrl(s,SSL_CTRL_GET_PEER_TMP_KEY,0,pk) +# define SSL_get_tmp_key(s, pk) \ + SSL_ctrl(s,SSL_CTRL_GET_TMP_KEY,0,pk) +# define SSL_get0_raw_cipherlist(s, plst) \ + SSL_ctrl(s,SSL_CTRL_GET_RAW_CIPHERLIST,0,plst) +# define SSL_get0_ec_point_formats(s, plst) \ + SSL_ctrl(s,SSL_CTRL_GET_EC_POINT_FORMATS,0,plst) +# define SSL_CTX_set_min_proto_version(ctx, version) \ + SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MIN_PROTO_VERSION, version, NULL) +# define SSL_CTX_set_max_proto_version(ctx, version) \ + SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MAX_PROTO_VERSION, version, NULL) +# define SSL_CTX_get_min_proto_version(ctx) \ + SSL_CTX_ctrl(ctx, SSL_CTRL_GET_MIN_PROTO_VERSION, 0, NULL) +# define SSL_CTX_get_max_proto_version(ctx) \ + SSL_CTX_ctrl(ctx, SSL_CTRL_GET_MAX_PROTO_VERSION, 0, NULL) +# define SSL_set_min_proto_version(s, version) \ + SSL_ctrl(s, SSL_CTRL_SET_MIN_PROTO_VERSION, version, NULL) +# define SSL_set_max_proto_version(s, version) \ + SSL_ctrl(s, SSL_CTRL_SET_MAX_PROTO_VERSION, version, NULL) +# define SSL_get_min_proto_version(s) \ + SSL_ctrl(s, SSL_CTRL_GET_MIN_PROTO_VERSION, 0, NULL) +# define SSL_get_max_proto_version(s) \ + SSL_ctrl(s, SSL_CTRL_GET_MAX_PROTO_VERSION, 0, NULL) + +/* Backwards compatibility, original 1.1.0 names */ +# define SSL_CTRL_GET_SERVER_TMP_KEY \ + SSL_CTRL_GET_PEER_TMP_KEY +# define SSL_get_server_tmp_key(s, pk) \ + SSL_get_peer_tmp_key(s, pk) + +/* + * The following symbol names are old and obsolete. They are kept + * for compatibility reasons only and should not be used anymore. + */ +# define SSL_CTRL_GET_CURVES SSL_CTRL_GET_GROUPS +# define SSL_CTRL_SET_CURVES SSL_CTRL_SET_GROUPS +# define SSL_CTRL_SET_CURVES_LIST SSL_CTRL_SET_GROUPS_LIST +# define SSL_CTRL_GET_SHARED_CURVE SSL_CTRL_GET_SHARED_GROUP + +# define SSL_get1_curves SSL_get1_groups +# define SSL_CTX_set1_curves SSL_CTX_set1_groups +# define SSL_CTX_set1_curves_list SSL_CTX_set1_groups_list +# define SSL_set1_curves SSL_set1_groups +# define SSL_set1_curves_list SSL_set1_groups_list +# define SSL_get_shared_curve SSL_get_shared_group + + +# if OPENSSL_API_COMPAT < 0x10100000L +/* Provide some compatibility macros for removed functionality. */ +# define SSL_CTX_need_tmp_RSA(ctx) 0 +# define SSL_CTX_set_tmp_rsa(ctx,rsa) 1 +# define SSL_need_tmp_RSA(ssl) 0 +# define SSL_set_tmp_rsa(ssl,rsa) 1 +# define SSL_CTX_set_ecdh_auto(dummy, onoff) ((onoff) != 0) +# define SSL_set_ecdh_auto(dummy, onoff) ((onoff) != 0) +/* + * We "pretend" to call the callback to avoid warnings about unused static + * functions. + */ +# define SSL_CTX_set_tmp_rsa_callback(ctx, cb) while(0) (cb)(NULL, 0, 0) +# define SSL_set_tmp_rsa_callback(ssl, cb) while(0) (cb)(NULL, 0, 0) +# endif +__owur const BIO_METHOD *BIO_f_ssl(void); +__owur BIO *BIO_new_ssl(SSL_CTX *ctx, int client); +__owur BIO *BIO_new_ssl_connect(SSL_CTX *ctx); +__owur BIO *BIO_new_buffer_ssl_connect(SSL_CTX *ctx); +__owur int BIO_ssl_copy_session_id(BIO *to, BIO *from); +void BIO_ssl_shutdown(BIO *ssl_bio); + +__owur int SSL_CTX_set_cipher_list(SSL_CTX *, const char *str); +__owur SSL_CTX *SSL_CTX_new(const SSL_METHOD *meth); +int SSL_CTX_up_ref(SSL_CTX *ctx); +void SSL_CTX_free(SSL_CTX *); +__owur long SSL_CTX_set_timeout(SSL_CTX *ctx, long t); +__owur long SSL_CTX_get_timeout(const SSL_CTX *ctx); +__owur X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *); +void SSL_CTX_set_cert_store(SSL_CTX *, X509_STORE *); +void SSL_CTX_set1_cert_store(SSL_CTX *, X509_STORE *); +__owur int SSL_want(const SSL *s); +__owur int SSL_clear(SSL *s); + +void SSL_CTX_flush_sessions(SSL_CTX *ctx, long tm); + +__owur const SSL_CIPHER *SSL_get_current_cipher(const SSL *s); +__owur const SSL_CIPHER *SSL_get_pending_cipher(const SSL *s); +__owur int SSL_CIPHER_get_bits(const SSL_CIPHER *c, int *alg_bits); +__owur const char *SSL_CIPHER_get_version(const SSL_CIPHER *c); +__owur const char *SSL_CIPHER_get_name(const SSL_CIPHER *c); +__owur const char *SSL_CIPHER_standard_name(const SSL_CIPHER *c); +__owur const char *OPENSSL_cipher_name(const char *rfc_name); +__owur uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *c); +__owur uint16_t SSL_CIPHER_get_protocol_id(const SSL_CIPHER *c); +__owur int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *c); +__owur int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *c); +__owur const EVP_MD *SSL_CIPHER_get_handshake_digest(const SSL_CIPHER *c); +__owur int SSL_CIPHER_is_aead(const SSL_CIPHER *c); + +__owur int SSL_get_fd(const SSL *s); +__owur int SSL_get_rfd(const SSL *s); +__owur int SSL_get_wfd(const SSL *s); +__owur const char *SSL_get_cipher_list(const SSL *s, int n); +__owur char *SSL_get_shared_ciphers(const SSL *s, char *buf, int size); +__owur int SSL_get_read_ahead(const SSL *s); +__owur int SSL_pending(const SSL *s); +__owur int SSL_has_pending(const SSL *s); +# ifndef OPENSSL_NO_SOCK +__owur int SSL_set_fd(SSL *s, int fd); +__owur int SSL_set_rfd(SSL *s, int fd); +__owur int SSL_set_wfd(SSL *s, int fd); +# endif +void SSL_set0_rbio(SSL *s, BIO *rbio); +void SSL_set0_wbio(SSL *s, BIO *wbio); +void SSL_set_bio(SSL *s, BIO *rbio, BIO *wbio); +__owur BIO *SSL_get_rbio(const SSL *s); +__owur BIO *SSL_get_wbio(const SSL *s); +__owur int SSL_set_cipher_list(SSL *s, const char *str); +__owur int SSL_CTX_set_ciphersuites(SSL_CTX *ctx, const char *str); +__owur int SSL_set_ciphersuites(SSL *s, const char *str); +void SSL_set_read_ahead(SSL *s, int yes); +__owur int SSL_get_verify_mode(const SSL *s); +__owur int SSL_get_verify_depth(const SSL *s); +__owur SSL_verify_cb SSL_get_verify_callback(const SSL *s); +void SSL_set_verify(SSL *s, int mode, SSL_verify_cb callback); +void SSL_set_verify_depth(SSL *s, int depth); +void SSL_set_cert_cb(SSL *s, int (*cb) (SSL *ssl, void *arg), void *arg); +# ifndef OPENSSL_NO_RSA +__owur int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa); +__owur int SSL_use_RSAPrivateKey_ASN1(SSL *ssl, const unsigned char *d, + long len); +# endif +__owur int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey); +__owur int SSL_use_PrivateKey_ASN1(int pk, SSL *ssl, const unsigned char *d, + long len); +__owur int SSL_use_certificate(SSL *ssl, X509 *x); +__owur int SSL_use_certificate_ASN1(SSL *ssl, const unsigned char *d, int len); +__owur int SSL_use_cert_and_key(SSL *ssl, X509 *x509, EVP_PKEY *privatekey, + STACK_OF(X509) *chain, int override); + + +/* serverinfo file format versions */ +# define SSL_SERVERINFOV1 1 +# define SSL_SERVERINFOV2 2 + +/* Set serverinfo data for the current active cert. */ +__owur int SSL_CTX_use_serverinfo(SSL_CTX *ctx, const unsigned char *serverinfo, + size_t serverinfo_length); +__owur int SSL_CTX_use_serverinfo_ex(SSL_CTX *ctx, unsigned int version, + const unsigned char *serverinfo, + size_t serverinfo_length); +__owur int SSL_CTX_use_serverinfo_file(SSL_CTX *ctx, const char *file); + +#ifndef OPENSSL_NO_RSA +__owur int SSL_use_RSAPrivateKey_file(SSL *ssl, const char *file, int type); +#endif + +__owur int SSL_use_PrivateKey_file(SSL *ssl, const char *file, int type); +__owur int SSL_use_certificate_file(SSL *ssl, const char *file, int type); + +#ifndef OPENSSL_NO_RSA +__owur int SSL_CTX_use_RSAPrivateKey_file(SSL_CTX *ctx, const char *file, + int type); +#endif +__owur int SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, + int type); +__owur int SSL_CTX_use_certificate_file(SSL_CTX *ctx, const char *file, + int type); +/* PEM type */ +__owur int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file); +__owur int SSL_use_certificate_chain_file(SSL *ssl, const char *file); +__owur STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file); +__owur int SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *stackCAs, + const char *file); +int SSL_add_dir_cert_subjects_to_stack(STACK_OF(X509_NAME) *stackCAs, + const char *dir); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSL_load_error_strings() \ + OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS \ + | OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL) +# endif + +__owur const char *SSL_state_string(const SSL *s); +__owur const char *SSL_rstate_string(const SSL *s); +__owur const char *SSL_state_string_long(const SSL *s); +__owur const char *SSL_rstate_string_long(const SSL *s); +__owur long SSL_SESSION_get_time(const SSL_SESSION *s); +__owur long SSL_SESSION_set_time(SSL_SESSION *s, long t); +__owur long SSL_SESSION_get_timeout(const SSL_SESSION *s); +__owur long SSL_SESSION_set_timeout(SSL_SESSION *s, long t); +__owur int SSL_SESSION_get_protocol_version(const SSL_SESSION *s); +__owur int SSL_SESSION_set_protocol_version(SSL_SESSION *s, int version); + +__owur const char *SSL_SESSION_get0_hostname(const SSL_SESSION *s); +__owur int SSL_SESSION_set1_hostname(SSL_SESSION *s, const char *hostname); +void SSL_SESSION_get0_alpn_selected(const SSL_SESSION *s, + const unsigned char **alpn, + size_t *len); +__owur int SSL_SESSION_set1_alpn_selected(SSL_SESSION *s, + const unsigned char *alpn, + size_t len); +__owur const SSL_CIPHER *SSL_SESSION_get0_cipher(const SSL_SESSION *s); +__owur int SSL_SESSION_set_cipher(SSL_SESSION *s, const SSL_CIPHER *cipher); +__owur int SSL_SESSION_has_ticket(const SSL_SESSION *s); +__owur unsigned long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *s); +void SSL_SESSION_get0_ticket(const SSL_SESSION *s, const unsigned char **tick, + size_t *len); +__owur uint32_t SSL_SESSION_get_max_early_data(const SSL_SESSION *s); +__owur int SSL_SESSION_set_max_early_data(SSL_SESSION *s, + uint32_t max_early_data); +__owur int SSL_copy_session_id(SSL *to, const SSL *from); +__owur X509 *SSL_SESSION_get0_peer(SSL_SESSION *s); +__owur int SSL_SESSION_set1_id_context(SSL_SESSION *s, + const unsigned char *sid_ctx, + unsigned int sid_ctx_len); +__owur int SSL_SESSION_set1_id(SSL_SESSION *s, const unsigned char *sid, + unsigned int sid_len); +__owur int SSL_SESSION_is_resumable(const SSL_SESSION *s); + +__owur SSL_SESSION *SSL_SESSION_new(void); +__owur SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *src); +const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *s, + unsigned int *len); +const unsigned char *SSL_SESSION_get0_id_context(const SSL_SESSION *s, + unsigned int *len); +__owur unsigned int SSL_SESSION_get_compress_id(const SSL_SESSION *s); +# ifndef OPENSSL_NO_STDIO +int SSL_SESSION_print_fp(FILE *fp, const SSL_SESSION *ses); +# endif +int SSL_SESSION_print(BIO *fp, const SSL_SESSION *ses); +int SSL_SESSION_print_keylog(BIO *bp, const SSL_SESSION *x); +int SSL_SESSION_up_ref(SSL_SESSION *ses); +void SSL_SESSION_free(SSL_SESSION *ses); +__owur int i2d_SSL_SESSION(SSL_SESSION *in, unsigned char **pp); +__owur int SSL_set_session(SSL *to, SSL_SESSION *session); +int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session); +int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session); +__owur int SSL_CTX_set_generate_session_id(SSL_CTX *ctx, GEN_SESSION_CB cb); +__owur int SSL_set_generate_session_id(SSL *s, GEN_SESSION_CB cb); +__owur int SSL_has_matching_session_id(const SSL *s, + const unsigned char *id, + unsigned int id_len); +SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const unsigned char **pp, + long length); + +# ifdef HEADER_X509_H +__owur X509 *SSL_get_peer_certificate(const SSL *s); +# endif + +__owur STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *s); + +__owur int SSL_CTX_get_verify_mode(const SSL_CTX *ctx); +__owur int SSL_CTX_get_verify_depth(const SSL_CTX *ctx); +__owur SSL_verify_cb SSL_CTX_get_verify_callback(const SSL_CTX *ctx); +void SSL_CTX_set_verify(SSL_CTX *ctx, int mode, SSL_verify_cb callback); +void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth); +void SSL_CTX_set_cert_verify_callback(SSL_CTX *ctx, + int (*cb) (X509_STORE_CTX *, void *), + void *arg); +void SSL_CTX_set_cert_cb(SSL_CTX *c, int (*cb) (SSL *ssl, void *arg), + void *arg); +# ifndef OPENSSL_NO_RSA +__owur int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa); +__owur int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const unsigned char *d, + long len); +# endif +__owur int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey); +__owur int SSL_CTX_use_PrivateKey_ASN1(int pk, SSL_CTX *ctx, + const unsigned char *d, long len); +__owur int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x); +__owur int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, int len, + const unsigned char *d); +__owur int SSL_CTX_use_cert_and_key(SSL_CTX *ctx, X509 *x509, EVP_PKEY *privatekey, + STACK_OF(X509) *chain, int override); + +void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb); +void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *u); +pem_password_cb *SSL_CTX_get_default_passwd_cb(SSL_CTX *ctx); +void *SSL_CTX_get_default_passwd_cb_userdata(SSL_CTX *ctx); +void SSL_set_default_passwd_cb(SSL *s, pem_password_cb *cb); +void SSL_set_default_passwd_cb_userdata(SSL *s, void *u); +pem_password_cb *SSL_get_default_passwd_cb(SSL *s); +void *SSL_get_default_passwd_cb_userdata(SSL *s); + +__owur int SSL_CTX_check_private_key(const SSL_CTX *ctx); +__owur int SSL_check_private_key(const SSL *ctx); + +__owur int SSL_CTX_set_session_id_context(SSL_CTX *ctx, + const unsigned char *sid_ctx, + unsigned int sid_ctx_len); + +SSL *SSL_new(SSL_CTX *ctx); +int SSL_up_ref(SSL *s); +int SSL_is_dtls(const SSL *s); +__owur int SSL_set_session_id_context(SSL *ssl, const unsigned char *sid_ctx, + unsigned int sid_ctx_len); + +__owur int SSL_CTX_set_purpose(SSL_CTX *ctx, int purpose); +__owur int SSL_set_purpose(SSL *ssl, int purpose); +__owur int SSL_CTX_set_trust(SSL_CTX *ctx, int trust); +__owur int SSL_set_trust(SSL *ssl, int trust); + +__owur int SSL_set1_host(SSL *s, const char *hostname); +__owur int SSL_add1_host(SSL *s, const char *hostname); +__owur const char *SSL_get0_peername(SSL *s); +void SSL_set_hostflags(SSL *s, unsigned int flags); + +__owur int SSL_CTX_dane_enable(SSL_CTX *ctx); +__owur int SSL_CTX_dane_mtype_set(SSL_CTX *ctx, const EVP_MD *md, + uint8_t mtype, uint8_t ord); +__owur int SSL_dane_enable(SSL *s, const char *basedomain); +__owur int SSL_dane_tlsa_add(SSL *s, uint8_t usage, uint8_t selector, + uint8_t mtype, unsigned const char *data, size_t dlen); +__owur int SSL_get0_dane_authority(SSL *s, X509 **mcert, EVP_PKEY **mspki); +__owur int SSL_get0_dane_tlsa(SSL *s, uint8_t *usage, uint8_t *selector, + uint8_t *mtype, unsigned const char **data, + size_t *dlen); +/* + * Bridge opacity barrier between libcrypt and libssl, also needed to support + * offline testing in test/danetest.c + */ +SSL_DANE *SSL_get0_dane(SSL *ssl); +/* + * DANE flags + */ +unsigned long SSL_CTX_dane_set_flags(SSL_CTX *ctx, unsigned long flags); +unsigned long SSL_CTX_dane_clear_flags(SSL_CTX *ctx, unsigned long flags); +unsigned long SSL_dane_set_flags(SSL *ssl, unsigned long flags); +unsigned long SSL_dane_clear_flags(SSL *ssl, unsigned long flags); + +__owur int SSL_CTX_set1_param(SSL_CTX *ctx, X509_VERIFY_PARAM *vpm); +__owur int SSL_set1_param(SSL *ssl, X509_VERIFY_PARAM *vpm); + +__owur X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx); +__owur X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl); + +# ifndef OPENSSL_NO_SRP +int SSL_CTX_set_srp_username(SSL_CTX *ctx, char *name); +int SSL_CTX_set_srp_password(SSL_CTX *ctx, char *password); +int SSL_CTX_set_srp_strength(SSL_CTX *ctx, int strength); +int SSL_CTX_set_srp_client_pwd_callback(SSL_CTX *ctx, + char *(*cb) (SSL *, void *)); +int SSL_CTX_set_srp_verify_param_callback(SSL_CTX *ctx, + int (*cb) (SSL *, void *)); +int SSL_CTX_set_srp_username_callback(SSL_CTX *ctx, + int (*cb) (SSL *, int *, void *)); +int SSL_CTX_set_srp_cb_arg(SSL_CTX *ctx, void *arg); + +int SSL_set_srp_server_param(SSL *s, const BIGNUM *N, const BIGNUM *g, + BIGNUM *sa, BIGNUM *v, char *info); +int SSL_set_srp_server_param_pw(SSL *s, const char *user, const char *pass, + const char *grp); + +__owur BIGNUM *SSL_get_srp_g(SSL *s); +__owur BIGNUM *SSL_get_srp_N(SSL *s); + +__owur char *SSL_get_srp_username(SSL *s); +__owur char *SSL_get_srp_userinfo(SSL *s); +# endif + +/* + * ClientHello callback and helpers. + */ + +# define SSL_CLIENT_HELLO_SUCCESS 1 +# define SSL_CLIENT_HELLO_ERROR 0 +# define SSL_CLIENT_HELLO_RETRY (-1) + +typedef int (*SSL_client_hello_cb_fn) (SSL *s, int *al, void *arg); +void SSL_CTX_set_client_hello_cb(SSL_CTX *c, SSL_client_hello_cb_fn cb, + void *arg); +int SSL_client_hello_isv2(SSL *s); +unsigned int SSL_client_hello_get0_legacy_version(SSL *s); +size_t SSL_client_hello_get0_random(SSL *s, const unsigned char **out); +size_t SSL_client_hello_get0_session_id(SSL *s, const unsigned char **out); +size_t SSL_client_hello_get0_ciphers(SSL *s, const unsigned char **out); +size_t SSL_client_hello_get0_compression_methods(SSL *s, + const unsigned char **out); +int SSL_client_hello_get1_extensions_present(SSL *s, int **out, size_t *outlen); +int SSL_client_hello_get0_ext(SSL *s, unsigned int type, + const unsigned char **out, size_t *outlen); + +void SSL_certs_clear(SSL *s); +void SSL_free(SSL *ssl); +# ifdef OSSL_ASYNC_FD +/* + * Windows application developer has to include windows.h to use these. + */ +__owur int SSL_waiting_for_async(SSL *s); +__owur int SSL_get_all_async_fds(SSL *s, OSSL_ASYNC_FD *fds, size_t *numfds); +__owur int SSL_get_changed_async_fds(SSL *s, OSSL_ASYNC_FD *addfd, + size_t *numaddfds, OSSL_ASYNC_FD *delfd, + size_t *numdelfds); +# endif +__owur int SSL_accept(SSL *ssl); +__owur int SSL_stateless(SSL *s); +__owur int SSL_connect(SSL *ssl); +__owur int SSL_read(SSL *ssl, void *buf, int num); +__owur int SSL_read_ex(SSL *ssl, void *buf, size_t num, size_t *readbytes); + +# define SSL_READ_EARLY_DATA_ERROR 0 +# define SSL_READ_EARLY_DATA_SUCCESS 1 +# define SSL_READ_EARLY_DATA_FINISH 2 + +__owur int SSL_read_early_data(SSL *s, void *buf, size_t num, + size_t *readbytes); +__owur int SSL_peek(SSL *ssl, void *buf, int num); +__owur int SSL_peek_ex(SSL *ssl, void *buf, size_t num, size_t *readbytes); +__owur int SSL_write(SSL *ssl, const void *buf, int num); +__owur int SSL_write_ex(SSL *s, const void *buf, size_t num, size_t *written); +__owur int SSL_write_early_data(SSL *s, const void *buf, size_t num, + size_t *written); +long SSL_ctrl(SSL *ssl, int cmd, long larg, void *parg); +long SSL_callback_ctrl(SSL *, int, void (*)(void)); +long SSL_CTX_ctrl(SSL_CTX *ctx, int cmd, long larg, void *parg); +long SSL_CTX_callback_ctrl(SSL_CTX *, int, void (*)(void)); + +# define SSL_EARLY_DATA_NOT_SENT 0 +# define SSL_EARLY_DATA_REJECTED 1 +# define SSL_EARLY_DATA_ACCEPTED 2 + +__owur int SSL_get_early_data_status(const SSL *s); + +__owur int SSL_get_error(const SSL *s, int ret_code); +__owur const char *SSL_get_version(const SSL *s); + +/* This sets the 'default' SSL version that SSL_new() will create */ +__owur int SSL_CTX_set_ssl_version(SSL_CTX *ctx, const SSL_METHOD *meth); + +# ifndef OPENSSL_NO_SSL3_METHOD +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *SSLv3_method(void)) /* SSLv3 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *SSLv3_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *SSLv3_client_method(void)) +# endif + +#define SSLv23_method TLS_method +#define SSLv23_server_method TLS_server_method +#define SSLv23_client_method TLS_client_method + +/* Negotiate highest available SSL/TLS version */ +__owur const SSL_METHOD *TLS_method(void); +__owur const SSL_METHOD *TLS_server_method(void); +__owur const SSL_METHOD *TLS_client_method(void); + +# ifndef OPENSSL_NO_TLS1_METHOD +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_method(void)) /* TLSv1.0 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_client_method(void)) +# endif + +# ifndef OPENSSL_NO_TLS1_1_METHOD +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_1_method(void)) /* TLSv1.1 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_1_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_1_client_method(void)) +# endif + +# ifndef OPENSSL_NO_TLS1_2_METHOD +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_2_method(void)) /* TLSv1.2 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_2_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *TLSv1_2_client_method(void)) +# endif + +# ifndef OPENSSL_NO_DTLS1_METHOD +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_method(void)) /* DTLSv1.0 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_client_method(void)) +# endif + +# ifndef OPENSSL_NO_DTLS1_2_METHOD +/* DTLSv1.2 */ +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_2_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_2_server_method(void)) +DEPRECATEDIN_1_1_0(__owur const SSL_METHOD *DTLSv1_2_client_method(void)) +# endif + +__owur const SSL_METHOD *DTLS_method(void); /* DTLS 1.0 and 1.2 */ +__owur const SSL_METHOD *DTLS_server_method(void); /* DTLS 1.0 and 1.2 */ +__owur const SSL_METHOD *DTLS_client_method(void); /* DTLS 1.0 and 1.2 */ + +__owur size_t DTLS_get_data_mtu(const SSL *s); + +__owur STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *s); +__owur STACK_OF(SSL_CIPHER) *SSL_CTX_get_ciphers(const SSL_CTX *ctx); +__owur STACK_OF(SSL_CIPHER) *SSL_get_client_ciphers(const SSL *s); +__owur STACK_OF(SSL_CIPHER) *SSL_get1_supported_ciphers(SSL *s); + +__owur int SSL_do_handshake(SSL *s); +int SSL_key_update(SSL *s, int updatetype); +int SSL_get_key_update_type(const SSL *s); +int SSL_renegotiate(SSL *s); +int SSL_renegotiate_abbreviated(SSL *s); +__owur int SSL_renegotiate_pending(const SSL *s); +int SSL_shutdown(SSL *s); +__owur int SSL_verify_client_post_handshake(SSL *s); +void SSL_CTX_set_post_handshake_auth(SSL_CTX *ctx, int val); +void SSL_set_post_handshake_auth(SSL *s, int val); + +__owur const SSL_METHOD *SSL_CTX_get_ssl_method(const SSL_CTX *ctx); +__owur const SSL_METHOD *SSL_get_ssl_method(const SSL *s); +__owur int SSL_set_ssl_method(SSL *s, const SSL_METHOD *method); +__owur const char *SSL_alert_type_string_long(int value); +__owur const char *SSL_alert_type_string(int value); +__owur const char *SSL_alert_desc_string_long(int value); +__owur const char *SSL_alert_desc_string(int value); + +void SSL_set0_CA_list(SSL *s, STACK_OF(X509_NAME) *name_list); +void SSL_CTX_set0_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list); +__owur const STACK_OF(X509_NAME) *SSL_get0_CA_list(const SSL *s); +__owur const STACK_OF(X509_NAME) *SSL_CTX_get0_CA_list(const SSL_CTX *ctx); +__owur int SSL_add1_to_CA_list(SSL *ssl, const X509 *x); +__owur int SSL_CTX_add1_to_CA_list(SSL_CTX *ctx, const X509 *x); +__owur const STACK_OF(X509_NAME) *SSL_get0_peer_CA_list(const SSL *s); + +void SSL_set_client_CA_list(SSL *s, STACK_OF(X509_NAME) *name_list); +void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list); +__owur STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *s); +__owur STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list(const SSL_CTX *s); +__owur int SSL_add_client_CA(SSL *ssl, X509 *x); +__owur int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x); + +void SSL_set_connect_state(SSL *s); +void SSL_set_accept_state(SSL *s); + +__owur long SSL_get_default_timeout(const SSL *s); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSL_library_init() OPENSSL_init_ssl(0, NULL) +# endif + +__owur char *SSL_CIPHER_description(const SSL_CIPHER *, char *buf, int size); +__owur STACK_OF(X509_NAME) *SSL_dup_CA_list(const STACK_OF(X509_NAME) *sk); + +__owur SSL *SSL_dup(SSL *ssl); + +__owur X509 *SSL_get_certificate(const SSL *ssl); +/* + * EVP_PKEY + */ +struct evp_pkey_st *SSL_get_privatekey(const SSL *ssl); + +__owur X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx); +__owur EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx); + +void SSL_CTX_set_quiet_shutdown(SSL_CTX *ctx, int mode); +__owur int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx); +void SSL_set_quiet_shutdown(SSL *ssl, int mode); +__owur int SSL_get_quiet_shutdown(const SSL *ssl); +void SSL_set_shutdown(SSL *ssl, int mode); +__owur int SSL_get_shutdown(const SSL *ssl); +__owur int SSL_version(const SSL *ssl); +__owur int SSL_client_version(const SSL *s); +__owur int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx); +__owur int SSL_CTX_set_default_verify_dir(SSL_CTX *ctx); +__owur int SSL_CTX_set_default_verify_file(SSL_CTX *ctx); +__owur int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *CAfile, + const char *CApath); +# define SSL_get0_session SSL_get_session/* just peek at pointer */ +__owur SSL_SESSION *SSL_get_session(const SSL *ssl); +__owur SSL_SESSION *SSL_get1_session(SSL *ssl); /* obtain a reference count */ +__owur SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl); +SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx); +void SSL_set_info_callback(SSL *ssl, + void (*cb) (const SSL *ssl, int type, int val)); +void (*SSL_get_info_callback(const SSL *ssl)) (const SSL *ssl, int type, + int val); +__owur OSSL_HANDSHAKE_STATE SSL_get_state(const SSL *ssl); + +void SSL_set_verify_result(SSL *ssl, long v); +__owur long SSL_get_verify_result(const SSL *ssl); +__owur STACK_OF(X509) *SSL_get0_verified_chain(const SSL *s); + +__owur size_t SSL_get_client_random(const SSL *ssl, unsigned char *out, + size_t outlen); +__owur size_t SSL_get_server_random(const SSL *ssl, unsigned char *out, + size_t outlen); +__owur size_t SSL_SESSION_get_master_key(const SSL_SESSION *sess, + unsigned char *out, size_t outlen); +__owur int SSL_SESSION_set1_master_key(SSL_SESSION *sess, + const unsigned char *in, size_t len); +uint8_t SSL_SESSION_get_max_fragment_length(const SSL_SESSION *sess); + +#define SSL_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL, l, p, newf, dupf, freef) +__owur int SSL_set_ex_data(SSL *ssl, int idx, void *data); +void *SSL_get_ex_data(const SSL *ssl, int idx); +#define SSL_SESSION_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL_SESSION, l, p, newf, dupf, freef) +__owur int SSL_SESSION_set_ex_data(SSL_SESSION *ss, int idx, void *data); +void *SSL_SESSION_get_ex_data(const SSL_SESSION *ss, int idx); +#define SSL_CTX_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL_CTX, l, p, newf, dupf, freef) +__owur int SSL_CTX_set_ex_data(SSL_CTX *ssl, int idx, void *data); +void *SSL_CTX_get_ex_data(const SSL_CTX *ssl, int idx); + +__owur int SSL_get_ex_data_X509_STORE_CTX_idx(void); + +# define SSL_CTX_sess_set_cache_size(ctx,t) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SESS_CACHE_SIZE,t,NULL) +# define SSL_CTX_sess_get_cache_size(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_SESS_CACHE_SIZE,0,NULL) +# define SSL_CTX_set_session_cache_mode(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SESS_CACHE_MODE,m,NULL) +# define SSL_CTX_get_session_cache_mode(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_SESS_CACHE_MODE,0,NULL) + +# define SSL_CTX_get_default_read_ahead(ctx) SSL_CTX_get_read_ahead(ctx) +# define SSL_CTX_set_default_read_ahead(ctx,m) SSL_CTX_set_read_ahead(ctx,m) +# define SSL_CTX_get_read_ahead(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_READ_AHEAD,0,NULL) +# define SSL_CTX_set_read_ahead(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_READ_AHEAD,m,NULL) +# define SSL_CTX_get_max_cert_list(ctx) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_MAX_CERT_LIST,0,NULL) +# define SSL_CTX_set_max_cert_list(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_MAX_CERT_LIST,m,NULL) +# define SSL_get_max_cert_list(ssl) \ + SSL_ctrl(ssl,SSL_CTRL_GET_MAX_CERT_LIST,0,NULL) +# define SSL_set_max_cert_list(ssl,m) \ + SSL_ctrl(ssl,SSL_CTRL_SET_MAX_CERT_LIST,m,NULL) + +# define SSL_CTX_set_max_send_fragment(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_MAX_SEND_FRAGMENT,m,NULL) +# define SSL_set_max_send_fragment(ssl,m) \ + SSL_ctrl(ssl,SSL_CTRL_SET_MAX_SEND_FRAGMENT,m,NULL) +# define SSL_CTX_set_split_send_fragment(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_SPLIT_SEND_FRAGMENT,m,NULL) +# define SSL_set_split_send_fragment(ssl,m) \ + SSL_ctrl(ssl,SSL_CTRL_SET_SPLIT_SEND_FRAGMENT,m,NULL) +# define SSL_CTX_set_max_pipelines(ctx,m) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_MAX_PIPELINES,m,NULL) +# define SSL_set_max_pipelines(ssl,m) \ + SSL_ctrl(ssl,SSL_CTRL_SET_MAX_PIPELINES,m,NULL) + +void SSL_CTX_set_default_read_buffer_len(SSL_CTX *ctx, size_t len); +void SSL_set_default_read_buffer_len(SSL *s, size_t len); + +# ifndef OPENSSL_NO_DH +/* NB: the |keylength| is only applicable when is_export is true */ +void SSL_CTX_set_tmp_dh_callback(SSL_CTX *ctx, + DH *(*dh) (SSL *ssl, int is_export, + int keylength)); +void SSL_set_tmp_dh_callback(SSL *ssl, + DH *(*dh) (SSL *ssl, int is_export, + int keylength)); +# endif + +__owur const COMP_METHOD *SSL_get_current_compression(const SSL *s); +__owur const COMP_METHOD *SSL_get_current_expansion(const SSL *s); +__owur const char *SSL_COMP_get_name(const COMP_METHOD *comp); +__owur const char *SSL_COMP_get0_name(const SSL_COMP *comp); +__owur int SSL_COMP_get_id(const SSL_COMP *comp); +STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void); +__owur STACK_OF(SSL_COMP) *SSL_COMP_set0_compression_methods(STACK_OF(SSL_COMP) + *meths); +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSL_COMP_free_compression_methods() while(0) continue +# endif +__owur int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm); + +const SSL_CIPHER *SSL_CIPHER_find(SSL *ssl, const unsigned char *ptr); +int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *c); +int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *c); +int SSL_bytes_to_cipher_list(SSL *s, const unsigned char *bytes, size_t len, + int isv2format, STACK_OF(SSL_CIPHER) **sk, + STACK_OF(SSL_CIPHER) **scsvs); + +/* TLS extensions functions */ +__owur int SSL_set_session_ticket_ext(SSL *s, void *ext_data, int ext_len); + +__owur int SSL_set_session_ticket_ext_cb(SSL *s, + tls_session_ticket_ext_cb_fn cb, + void *arg); + +/* Pre-shared secret session resumption functions */ +__owur int SSL_set_session_secret_cb(SSL *s, + tls_session_secret_cb_fn session_secret_cb, + void *arg); + +void SSL_CTX_set_not_resumable_session_callback(SSL_CTX *ctx, + int (*cb) (SSL *ssl, + int + is_forward_secure)); + +void SSL_set_not_resumable_session_callback(SSL *ssl, + int (*cb) (SSL *ssl, + int is_forward_secure)); + +void SSL_CTX_set_record_padding_callback(SSL_CTX *ctx, + size_t (*cb) (SSL *ssl, int type, + size_t len, void *arg)); +void SSL_CTX_set_record_padding_callback_arg(SSL_CTX *ctx, void *arg); +void *SSL_CTX_get_record_padding_callback_arg(const SSL_CTX *ctx); +int SSL_CTX_set_block_padding(SSL_CTX *ctx, size_t block_size); + +void SSL_set_record_padding_callback(SSL *ssl, + size_t (*cb) (SSL *ssl, int type, + size_t len, void *arg)); +void SSL_set_record_padding_callback_arg(SSL *ssl, void *arg); +void *SSL_get_record_padding_callback_arg(const SSL *ssl); +int SSL_set_block_padding(SSL *ssl, size_t block_size); + +int SSL_set_num_tickets(SSL *s, size_t num_tickets); +size_t SSL_get_num_tickets(const SSL *s); +int SSL_CTX_set_num_tickets(SSL_CTX *ctx, size_t num_tickets); +size_t SSL_CTX_get_num_tickets(const SSL_CTX *ctx); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSL_cache_hit(s) SSL_session_reused(s) +# endif + +__owur int SSL_session_reused(const SSL *s); +__owur int SSL_is_server(const SSL *s); + +__owur __owur SSL_CONF_CTX *SSL_CONF_CTX_new(void); +int SSL_CONF_CTX_finish(SSL_CONF_CTX *cctx); +void SSL_CONF_CTX_free(SSL_CONF_CTX *cctx); +unsigned int SSL_CONF_CTX_set_flags(SSL_CONF_CTX *cctx, unsigned int flags); +__owur unsigned int SSL_CONF_CTX_clear_flags(SSL_CONF_CTX *cctx, + unsigned int flags); +__owur int SSL_CONF_CTX_set1_prefix(SSL_CONF_CTX *cctx, const char *pre); + +void SSL_CONF_CTX_set_ssl(SSL_CONF_CTX *cctx, SSL *ssl); +void SSL_CONF_CTX_set_ssl_ctx(SSL_CONF_CTX *cctx, SSL_CTX *ctx); + +__owur int SSL_CONF_cmd(SSL_CONF_CTX *cctx, const char *cmd, const char *value); +__owur int SSL_CONF_cmd_argv(SSL_CONF_CTX *cctx, int *pargc, char ***pargv); +__owur int SSL_CONF_cmd_value_type(SSL_CONF_CTX *cctx, const char *cmd); + +void SSL_add_ssl_module(void); +int SSL_config(SSL *s, const char *name); +int SSL_CTX_config(SSL_CTX *ctx, const char *name); + +# ifndef OPENSSL_NO_SSL_TRACE +void SSL_trace(int write_p, int version, int content_type, + const void *buf, size_t len, SSL *ssl, void *arg); +# endif + +# ifndef OPENSSL_NO_SOCK +int DTLSv1_listen(SSL *s, BIO_ADDR *client); +# endif + +# ifndef OPENSSL_NO_CT + +/* + * A callback for verifying that the received SCTs are sufficient. + * Expected to return 1 if they are sufficient, otherwise 0. + * May return a negative integer if an error occurs. + * A connection should be aborted if the SCTs are deemed insufficient. + */ +typedef int (*ssl_ct_validation_cb)(const CT_POLICY_EVAL_CTX *ctx, + const STACK_OF(SCT) *scts, void *arg); + +/* + * Sets a |callback| that is invoked upon receipt of ServerHelloDone to validate + * the received SCTs. + * If the callback returns a non-positive result, the connection is terminated. + * Call this function before beginning a handshake. + * If a NULL |callback| is provided, SCT validation is disabled. + * |arg| is arbitrary userdata that will be passed to the callback whenever it + * is invoked. Ownership of |arg| remains with the caller. + * + * NOTE: A side-effect of setting a CT callback is that an OCSP stapled response + * will be requested. + */ +int SSL_set_ct_validation_callback(SSL *s, ssl_ct_validation_cb callback, + void *arg); +int SSL_CTX_set_ct_validation_callback(SSL_CTX *ctx, + ssl_ct_validation_cb callback, + void *arg); +#define SSL_disable_ct(s) \ + ((void) SSL_set_validation_callback((s), NULL, NULL)) +#define SSL_CTX_disable_ct(ctx) \ + ((void) SSL_CTX_set_validation_callback((ctx), NULL, NULL)) + +/* + * The validation type enumerates the available behaviours of the built-in SSL + * CT validation callback selected via SSL_enable_ct() and SSL_CTX_enable_ct(). + * The underlying callback is a static function in libssl. + */ +enum { + SSL_CT_VALIDATION_PERMISSIVE = 0, + SSL_CT_VALIDATION_STRICT +}; + +/* + * Enable CT by setting up a callback that implements one of the built-in + * validation variants. The SSL_CT_VALIDATION_PERMISSIVE variant always + * continues the handshake, the application can make appropriate decisions at + * handshake completion. The SSL_CT_VALIDATION_STRICT variant requires at + * least one valid SCT, or else handshake termination will be requested. The + * handshake may continue anyway if SSL_VERIFY_NONE is in effect. + */ +int SSL_enable_ct(SSL *s, int validation_mode); +int SSL_CTX_enable_ct(SSL_CTX *ctx, int validation_mode); + +/* + * Report whether a non-NULL callback is enabled. + */ +int SSL_ct_is_enabled(const SSL *s); +int SSL_CTX_ct_is_enabled(const SSL_CTX *ctx); + +/* Gets the SCTs received from a connection */ +const STACK_OF(SCT) *SSL_get0_peer_scts(SSL *s); + +/* + * Loads the CT log list from the default location. + * If a CTLOG_STORE has previously been set using SSL_CTX_set_ctlog_store, + * the log information loaded from this file will be appended to the + * CTLOG_STORE. + * Returns 1 on success, 0 otherwise. + */ +int SSL_CTX_set_default_ctlog_list_file(SSL_CTX *ctx); + +/* + * Loads the CT log list from the specified file path. + * If a CTLOG_STORE has previously been set using SSL_CTX_set_ctlog_store, + * the log information loaded from this file will be appended to the + * CTLOG_STORE. + * Returns 1 on success, 0 otherwise. + */ +int SSL_CTX_set_ctlog_list_file(SSL_CTX *ctx, const char *path); + +/* + * Sets the CT log list used by all SSL connections created from this SSL_CTX. + * Ownership of the CTLOG_STORE is transferred to the SSL_CTX. + */ +void SSL_CTX_set0_ctlog_store(SSL_CTX *ctx, CTLOG_STORE *logs); + +/* + * Gets the CT log list used by all SSL connections created from this SSL_CTX. + * This will be NULL unless one of the following functions has been called: + * - SSL_CTX_set_default_ctlog_list_file + * - SSL_CTX_set_ctlog_list_file + * - SSL_CTX_set_ctlog_store + */ +const CTLOG_STORE *SSL_CTX_get0_ctlog_store(const SSL_CTX *ctx); + +# endif /* OPENSSL_NO_CT */ + +/* What the "other" parameter contains in security callback */ +/* Mask for type */ +# define SSL_SECOP_OTHER_TYPE 0xffff0000 +# define SSL_SECOP_OTHER_NONE 0 +# define SSL_SECOP_OTHER_CIPHER (1 << 16) +# define SSL_SECOP_OTHER_CURVE (2 << 16) +# define SSL_SECOP_OTHER_DH (3 << 16) +# define SSL_SECOP_OTHER_PKEY (4 << 16) +# define SSL_SECOP_OTHER_SIGALG (5 << 16) +# define SSL_SECOP_OTHER_CERT (6 << 16) + +/* Indicated operation refers to peer key or certificate */ +# define SSL_SECOP_PEER 0x1000 + +/* Values for "op" parameter in security callback */ + +/* Called to filter ciphers */ +/* Ciphers client supports */ +# define SSL_SECOP_CIPHER_SUPPORTED (1 | SSL_SECOP_OTHER_CIPHER) +/* Cipher shared by client/server */ +# define SSL_SECOP_CIPHER_SHARED (2 | SSL_SECOP_OTHER_CIPHER) +/* Sanity check of cipher server selects */ +# define SSL_SECOP_CIPHER_CHECK (3 | SSL_SECOP_OTHER_CIPHER) +/* Curves supported by client */ +# define SSL_SECOP_CURVE_SUPPORTED (4 | SSL_SECOP_OTHER_CURVE) +/* Curves shared by client/server */ +# define SSL_SECOP_CURVE_SHARED (5 | SSL_SECOP_OTHER_CURVE) +/* Sanity check of curve server selects */ +# define SSL_SECOP_CURVE_CHECK (6 | SSL_SECOP_OTHER_CURVE) +/* Temporary DH key */ +# define SSL_SECOP_TMP_DH (7 | SSL_SECOP_OTHER_PKEY) +/* SSL/TLS version */ +# define SSL_SECOP_VERSION (9 | SSL_SECOP_OTHER_NONE) +/* Session tickets */ +# define SSL_SECOP_TICKET (10 | SSL_SECOP_OTHER_NONE) +/* Supported signature algorithms sent to peer */ +# define SSL_SECOP_SIGALG_SUPPORTED (11 | SSL_SECOP_OTHER_SIGALG) +/* Shared signature algorithm */ +# define SSL_SECOP_SIGALG_SHARED (12 | SSL_SECOP_OTHER_SIGALG) +/* Sanity check signature algorithm allowed */ +# define SSL_SECOP_SIGALG_CHECK (13 | SSL_SECOP_OTHER_SIGALG) +/* Used to get mask of supported public key signature algorithms */ +# define SSL_SECOP_SIGALG_MASK (14 | SSL_SECOP_OTHER_SIGALG) +/* Use to see if compression is allowed */ +# define SSL_SECOP_COMPRESSION (15 | SSL_SECOP_OTHER_NONE) +/* EE key in certificate */ +# define SSL_SECOP_EE_KEY (16 | SSL_SECOP_OTHER_CERT) +/* CA key in certificate */ +# define SSL_SECOP_CA_KEY (17 | SSL_SECOP_OTHER_CERT) +/* CA digest algorithm in certificate */ +# define SSL_SECOP_CA_MD (18 | SSL_SECOP_OTHER_CERT) +/* Peer EE key in certificate */ +# define SSL_SECOP_PEER_EE_KEY (SSL_SECOP_EE_KEY | SSL_SECOP_PEER) +/* Peer CA key in certificate */ +# define SSL_SECOP_PEER_CA_KEY (SSL_SECOP_CA_KEY | SSL_SECOP_PEER) +/* Peer CA digest algorithm in certificate */ +# define SSL_SECOP_PEER_CA_MD (SSL_SECOP_CA_MD | SSL_SECOP_PEER) + +void SSL_set_security_level(SSL *s, int level); +__owur int SSL_get_security_level(const SSL *s); +void SSL_set_security_callback(SSL *s, + int (*cb) (const SSL *s, const SSL_CTX *ctx, + int op, int bits, int nid, + void *other, void *ex)); +int (*SSL_get_security_callback(const SSL *s)) (const SSL *s, + const SSL_CTX *ctx, int op, + int bits, int nid, void *other, + void *ex); +void SSL_set0_security_ex_data(SSL *s, void *ex); +__owur void *SSL_get0_security_ex_data(const SSL *s); + +void SSL_CTX_set_security_level(SSL_CTX *ctx, int level); +__owur int SSL_CTX_get_security_level(const SSL_CTX *ctx); +void SSL_CTX_set_security_callback(SSL_CTX *ctx, + int (*cb) (const SSL *s, const SSL_CTX *ctx, + int op, int bits, int nid, + void *other, void *ex)); +int (*SSL_CTX_get_security_callback(const SSL_CTX *ctx)) (const SSL *s, + const SSL_CTX *ctx, + int op, int bits, + int nid, + void *other, + void *ex); +void SSL_CTX_set0_security_ex_data(SSL_CTX *ctx, void *ex); +__owur void *SSL_CTX_get0_security_ex_data(const SSL_CTX *ctx); + +/* OPENSSL_INIT flag 0x010000 reserved for internal use */ +# define OPENSSL_INIT_NO_LOAD_SSL_STRINGS 0x00100000L +# define OPENSSL_INIT_LOAD_SSL_STRINGS 0x00200000L + +# define OPENSSL_INIT_SSL_DEFAULT \ + (OPENSSL_INIT_LOAD_SSL_STRINGS | OPENSSL_INIT_LOAD_CRYPTO_STRINGS) + +int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings); + +# ifndef OPENSSL_NO_UNIT_TEST +__owur const struct openssl_ssl_test_functions *SSL_test_functions(void); +# endif + +__owur int SSL_free_buffers(SSL *ssl); +__owur int SSL_alloc_buffers(SSL *ssl); + +/* Status codes passed to the decrypt session ticket callback. Some of these + * are for internal use only and are never passed to the callback. */ +typedef int SSL_TICKET_STATUS; + +/* Support for ticket appdata */ +/* fatal error, malloc failure */ +# define SSL_TICKET_FATAL_ERR_MALLOC 0 +/* fatal error, either from parsing or decrypting the ticket */ +# define SSL_TICKET_FATAL_ERR_OTHER 1 +/* No ticket present */ +# define SSL_TICKET_NONE 2 +/* Empty ticket present */ +# define SSL_TICKET_EMPTY 3 +/* the ticket couldn't be decrypted */ +# define SSL_TICKET_NO_DECRYPT 4 +/* a ticket was successfully decrypted */ +# define SSL_TICKET_SUCCESS 5 +/* same as above but the ticket needs to be renewed */ +# define SSL_TICKET_SUCCESS_RENEW 6 + +/* Return codes for the decrypt session ticket callback */ +typedef int SSL_TICKET_RETURN; + +/* An error occurred */ +#define SSL_TICKET_RETURN_ABORT 0 +/* Do not use the ticket, do not send a renewed ticket to the client */ +#define SSL_TICKET_RETURN_IGNORE 1 +/* Do not use the ticket, send a renewed ticket to the client */ +#define SSL_TICKET_RETURN_IGNORE_RENEW 2 +/* Use the ticket, do not send a renewed ticket to the client */ +#define SSL_TICKET_RETURN_USE 3 +/* Use the ticket, send a renewed ticket to the client */ +#define SSL_TICKET_RETURN_USE_RENEW 4 + +typedef int (*SSL_CTX_generate_session_ticket_fn)(SSL *s, void *arg); +typedef SSL_TICKET_RETURN (*SSL_CTX_decrypt_session_ticket_fn)(SSL *s, SSL_SESSION *ss, + const unsigned char *keyname, + size_t keyname_length, + SSL_TICKET_STATUS status, + void *arg); +int SSL_CTX_set_session_ticket_cb(SSL_CTX *ctx, + SSL_CTX_generate_session_ticket_fn gen_cb, + SSL_CTX_decrypt_session_ticket_fn dec_cb, + void *arg); +int SSL_SESSION_set1_ticket_appdata(SSL_SESSION *ss, const void *data, size_t len); +int SSL_SESSION_get0_ticket_appdata(SSL_SESSION *ss, void **data, size_t *len); + +extern const char SSL_version_str[]; + +typedef unsigned int (*DTLS_timer_cb)(SSL *s, unsigned int timer_us); + +void DTLS_set_timer_cb(SSL *s, DTLS_timer_cb cb); + + +typedef int (*SSL_allow_early_data_cb_fn)(SSL *s, void *arg); +void SSL_CTX_set_allow_early_data_cb(SSL_CTX *ctx, + SSL_allow_early_data_cb_fn cb, + void *arg); +void SSL_set_allow_early_data_cb(SSL *s, + SSL_allow_early_data_cb_fn cb, + void *arg); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ssl2.h b/third_party/openssl/uos/sw_64/include/openssl/ssl2.h new file mode 100644 index 00000000..5321bd27 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ssl2.h @@ -0,0 +1,24 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SSL2_H +# define HEADER_SSL2_H + +#ifdef __cplusplus +extern "C" { +#endif + +# define SSL2_VERSION 0x0002 + +# define SSL2_MT_CLIENT_HELLO 1 + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ssl3.h b/third_party/openssl/uos/sw_64/include/openssl/ssl3.h new file mode 100644 index 00000000..07effba2 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ssl3.h @@ -0,0 +1,342 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SSL3_H +# define HEADER_SSL3_H + +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Signalling cipher suite value from RFC 5746 + * (TLS_EMPTY_RENEGOTIATION_INFO_SCSV) + */ +# define SSL3_CK_SCSV 0x030000FF + +/* + * Signalling cipher suite value from draft-ietf-tls-downgrade-scsv-00 + * (TLS_FALLBACK_SCSV) + */ +# define SSL3_CK_FALLBACK_SCSV 0x03005600 + +# define SSL3_CK_RSA_NULL_MD5 0x03000001 +# define SSL3_CK_RSA_NULL_SHA 0x03000002 +# define SSL3_CK_RSA_RC4_40_MD5 0x03000003 +# define SSL3_CK_RSA_RC4_128_MD5 0x03000004 +# define SSL3_CK_RSA_RC4_128_SHA 0x03000005 +# define SSL3_CK_RSA_RC2_40_MD5 0x03000006 +# define SSL3_CK_RSA_IDEA_128_SHA 0x03000007 +# define SSL3_CK_RSA_DES_40_CBC_SHA 0x03000008 +# define SSL3_CK_RSA_DES_64_CBC_SHA 0x03000009 +# define SSL3_CK_RSA_DES_192_CBC3_SHA 0x0300000A + +# define SSL3_CK_DH_DSS_DES_40_CBC_SHA 0x0300000B +# define SSL3_CK_DH_DSS_DES_64_CBC_SHA 0x0300000C +# define SSL3_CK_DH_DSS_DES_192_CBC3_SHA 0x0300000D +# define SSL3_CK_DH_RSA_DES_40_CBC_SHA 0x0300000E +# define SSL3_CK_DH_RSA_DES_64_CBC_SHA 0x0300000F +# define SSL3_CK_DH_RSA_DES_192_CBC3_SHA 0x03000010 + +# define SSL3_CK_DHE_DSS_DES_40_CBC_SHA 0x03000011 +# define SSL3_CK_EDH_DSS_DES_40_CBC_SHA SSL3_CK_DHE_DSS_DES_40_CBC_SHA +# define SSL3_CK_DHE_DSS_DES_64_CBC_SHA 0x03000012 +# define SSL3_CK_EDH_DSS_DES_64_CBC_SHA SSL3_CK_DHE_DSS_DES_64_CBC_SHA +# define SSL3_CK_DHE_DSS_DES_192_CBC3_SHA 0x03000013 +# define SSL3_CK_EDH_DSS_DES_192_CBC3_SHA SSL3_CK_DHE_DSS_DES_192_CBC3_SHA +# define SSL3_CK_DHE_RSA_DES_40_CBC_SHA 0x03000014 +# define SSL3_CK_EDH_RSA_DES_40_CBC_SHA SSL3_CK_DHE_RSA_DES_40_CBC_SHA +# define SSL3_CK_DHE_RSA_DES_64_CBC_SHA 0x03000015 +# define SSL3_CK_EDH_RSA_DES_64_CBC_SHA SSL3_CK_DHE_RSA_DES_64_CBC_SHA +# define SSL3_CK_DHE_RSA_DES_192_CBC3_SHA 0x03000016 +# define SSL3_CK_EDH_RSA_DES_192_CBC3_SHA SSL3_CK_DHE_RSA_DES_192_CBC3_SHA + +# define SSL3_CK_ADH_RC4_40_MD5 0x03000017 +# define SSL3_CK_ADH_RC4_128_MD5 0x03000018 +# define SSL3_CK_ADH_DES_40_CBC_SHA 0x03000019 +# define SSL3_CK_ADH_DES_64_CBC_SHA 0x0300001A +# define SSL3_CK_ADH_DES_192_CBC_SHA 0x0300001B + +/* a bundle of RFC standard cipher names, generated from ssl3_ciphers[] */ +# define SSL3_RFC_RSA_NULL_MD5 "TLS_RSA_WITH_NULL_MD5" +# define SSL3_RFC_RSA_NULL_SHA "TLS_RSA_WITH_NULL_SHA" +# define SSL3_RFC_RSA_DES_192_CBC3_SHA "TLS_RSA_WITH_3DES_EDE_CBC_SHA" +# define SSL3_RFC_DHE_DSS_DES_192_CBC3_SHA "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA" +# define SSL3_RFC_DHE_RSA_DES_192_CBC3_SHA "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA" +# define SSL3_RFC_ADH_DES_192_CBC_SHA "TLS_DH_anon_WITH_3DES_EDE_CBC_SHA" +# define SSL3_RFC_RSA_IDEA_128_SHA "TLS_RSA_WITH_IDEA_CBC_SHA" +# define SSL3_RFC_RSA_RC4_128_MD5 "TLS_RSA_WITH_RC4_128_MD5" +# define SSL3_RFC_RSA_RC4_128_SHA "TLS_RSA_WITH_RC4_128_SHA" +# define SSL3_RFC_ADH_RC4_128_MD5 "TLS_DH_anon_WITH_RC4_128_MD5" + +# define SSL3_TXT_RSA_NULL_MD5 "NULL-MD5" +# define SSL3_TXT_RSA_NULL_SHA "NULL-SHA" +# define SSL3_TXT_RSA_RC4_40_MD5 "EXP-RC4-MD5" +# define SSL3_TXT_RSA_RC4_128_MD5 "RC4-MD5" +# define SSL3_TXT_RSA_RC4_128_SHA "RC4-SHA" +# define SSL3_TXT_RSA_RC2_40_MD5 "EXP-RC2-CBC-MD5" +# define SSL3_TXT_RSA_IDEA_128_SHA "IDEA-CBC-SHA" +# define SSL3_TXT_RSA_DES_40_CBC_SHA "EXP-DES-CBC-SHA" +# define SSL3_TXT_RSA_DES_64_CBC_SHA "DES-CBC-SHA" +# define SSL3_TXT_RSA_DES_192_CBC3_SHA "DES-CBC3-SHA" + +# define SSL3_TXT_DH_DSS_DES_40_CBC_SHA "EXP-DH-DSS-DES-CBC-SHA" +# define SSL3_TXT_DH_DSS_DES_64_CBC_SHA "DH-DSS-DES-CBC-SHA" +# define SSL3_TXT_DH_DSS_DES_192_CBC3_SHA "DH-DSS-DES-CBC3-SHA" +# define SSL3_TXT_DH_RSA_DES_40_CBC_SHA "EXP-DH-RSA-DES-CBC-SHA" +# define SSL3_TXT_DH_RSA_DES_64_CBC_SHA "DH-RSA-DES-CBC-SHA" +# define SSL3_TXT_DH_RSA_DES_192_CBC3_SHA "DH-RSA-DES-CBC3-SHA" + +# define SSL3_TXT_DHE_DSS_DES_40_CBC_SHA "EXP-DHE-DSS-DES-CBC-SHA" +# define SSL3_TXT_DHE_DSS_DES_64_CBC_SHA "DHE-DSS-DES-CBC-SHA" +# define SSL3_TXT_DHE_DSS_DES_192_CBC3_SHA "DHE-DSS-DES-CBC3-SHA" +# define SSL3_TXT_DHE_RSA_DES_40_CBC_SHA "EXP-DHE-RSA-DES-CBC-SHA" +# define SSL3_TXT_DHE_RSA_DES_64_CBC_SHA "DHE-RSA-DES-CBC-SHA" +# define SSL3_TXT_DHE_RSA_DES_192_CBC3_SHA "DHE-RSA-DES-CBC3-SHA" + +/* + * This next block of six "EDH" labels is for backward compatibility with + * older versions of OpenSSL. New code should use the six "DHE" labels above + * instead: + */ +# define SSL3_TXT_EDH_DSS_DES_40_CBC_SHA "EXP-EDH-DSS-DES-CBC-SHA" +# define SSL3_TXT_EDH_DSS_DES_64_CBC_SHA "EDH-DSS-DES-CBC-SHA" +# define SSL3_TXT_EDH_DSS_DES_192_CBC3_SHA "EDH-DSS-DES-CBC3-SHA" +# define SSL3_TXT_EDH_RSA_DES_40_CBC_SHA "EXP-EDH-RSA-DES-CBC-SHA" +# define SSL3_TXT_EDH_RSA_DES_64_CBC_SHA "EDH-RSA-DES-CBC-SHA" +# define SSL3_TXT_EDH_RSA_DES_192_CBC3_SHA "EDH-RSA-DES-CBC3-SHA" + +# define SSL3_TXT_ADH_RC4_40_MD5 "EXP-ADH-RC4-MD5" +# define SSL3_TXT_ADH_RC4_128_MD5 "ADH-RC4-MD5" +# define SSL3_TXT_ADH_DES_40_CBC_SHA "EXP-ADH-DES-CBC-SHA" +# define SSL3_TXT_ADH_DES_64_CBC_SHA "ADH-DES-CBC-SHA" +# define SSL3_TXT_ADH_DES_192_CBC_SHA "ADH-DES-CBC3-SHA" + +# define SSL3_SSL_SESSION_ID_LENGTH 32 +# define SSL3_MAX_SSL_SESSION_ID_LENGTH 32 + +# define SSL3_MASTER_SECRET_SIZE 48 +# define SSL3_RANDOM_SIZE 32 +# define SSL3_SESSION_ID_SIZE 32 +# define SSL3_RT_HEADER_LENGTH 5 + +# define SSL3_HM_HEADER_LENGTH 4 + +# ifndef SSL3_ALIGN_PAYLOAD + /* + * Some will argue that this increases memory footprint, but it's not + * actually true. Point is that malloc has to return at least 64-bit aligned + * pointers, meaning that allocating 5 bytes wastes 3 bytes in either case. + * Suggested pre-gaping simply moves these wasted bytes from the end of + * allocated region to its front, but makes data payload aligned, which + * improves performance:-) + */ +# define SSL3_ALIGN_PAYLOAD 8 +# else +# if (SSL3_ALIGN_PAYLOAD&(SSL3_ALIGN_PAYLOAD-1))!=0 +# error "insane SSL3_ALIGN_PAYLOAD" +# undef SSL3_ALIGN_PAYLOAD +# endif +# endif + +/* + * This is the maximum MAC (digest) size used by the SSL library. Currently + * maximum of 20 is used by SHA1, but we reserve for future extension for + * 512-bit hashes. + */ + +# define SSL3_RT_MAX_MD_SIZE 64 + +/* + * Maximum block size used in all ciphersuites. Currently 16 for AES. + */ + +# define SSL_RT_MAX_CIPHER_BLOCK_SIZE 16 + +# define SSL3_RT_MAX_EXTRA (16384) + +/* Maximum plaintext length: defined by SSL/TLS standards */ +# define SSL3_RT_MAX_PLAIN_LENGTH 16384 +/* Maximum compression overhead: defined by SSL/TLS standards */ +# define SSL3_RT_MAX_COMPRESSED_OVERHEAD 1024 + +/* + * The standards give a maximum encryption overhead of 1024 bytes. In + * practice the value is lower than this. The overhead is the maximum number + * of padding bytes (256) plus the mac size. + */ +# define SSL3_RT_MAX_ENCRYPTED_OVERHEAD (256 + SSL3_RT_MAX_MD_SIZE) +# define SSL3_RT_MAX_TLS13_ENCRYPTED_OVERHEAD 256 + +/* + * OpenSSL currently only uses a padding length of at most one block so the + * send overhead is smaller. + */ + +# define SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \ + (SSL_RT_MAX_CIPHER_BLOCK_SIZE + SSL3_RT_MAX_MD_SIZE) + +/* If compression isn't used don't include the compression overhead */ + +# ifdef OPENSSL_NO_COMP +# define SSL3_RT_MAX_COMPRESSED_LENGTH SSL3_RT_MAX_PLAIN_LENGTH +# else +# define SSL3_RT_MAX_COMPRESSED_LENGTH \ + (SSL3_RT_MAX_PLAIN_LENGTH+SSL3_RT_MAX_COMPRESSED_OVERHEAD) +# endif +# define SSL3_RT_MAX_ENCRYPTED_LENGTH \ + (SSL3_RT_MAX_ENCRYPTED_OVERHEAD+SSL3_RT_MAX_COMPRESSED_LENGTH) +# define SSL3_RT_MAX_TLS13_ENCRYPTED_LENGTH \ + (SSL3_RT_MAX_PLAIN_LENGTH + SSL3_RT_MAX_TLS13_ENCRYPTED_OVERHEAD) +# define SSL3_RT_MAX_PACKET_SIZE \ + (SSL3_RT_MAX_ENCRYPTED_LENGTH+SSL3_RT_HEADER_LENGTH) + +# define SSL3_MD_CLIENT_FINISHED_CONST "\x43\x4C\x4E\x54" +# define SSL3_MD_SERVER_FINISHED_CONST "\x53\x52\x56\x52" + +# define SSL3_VERSION 0x0300 +# define SSL3_VERSION_MAJOR 0x03 +# define SSL3_VERSION_MINOR 0x00 + +# define SSL3_RT_CHANGE_CIPHER_SPEC 20 +# define SSL3_RT_ALERT 21 +# define SSL3_RT_HANDSHAKE 22 +# define SSL3_RT_APPLICATION_DATA 23 +# define DTLS1_RT_HEARTBEAT 24 + +/* Pseudo content types to indicate additional parameters */ +# define TLS1_RT_CRYPTO 0x1000 +# define TLS1_RT_CRYPTO_PREMASTER (TLS1_RT_CRYPTO | 0x1) +# define TLS1_RT_CRYPTO_CLIENT_RANDOM (TLS1_RT_CRYPTO | 0x2) +# define TLS1_RT_CRYPTO_SERVER_RANDOM (TLS1_RT_CRYPTO | 0x3) +# define TLS1_RT_CRYPTO_MASTER (TLS1_RT_CRYPTO | 0x4) + +# define TLS1_RT_CRYPTO_READ 0x0000 +# define TLS1_RT_CRYPTO_WRITE 0x0100 +# define TLS1_RT_CRYPTO_MAC (TLS1_RT_CRYPTO | 0x5) +# define TLS1_RT_CRYPTO_KEY (TLS1_RT_CRYPTO | 0x6) +# define TLS1_RT_CRYPTO_IV (TLS1_RT_CRYPTO | 0x7) +# define TLS1_RT_CRYPTO_FIXED_IV (TLS1_RT_CRYPTO | 0x8) + +/* Pseudo content types for SSL/TLS header info */ +# define SSL3_RT_HEADER 0x100 +# define SSL3_RT_INNER_CONTENT_TYPE 0x101 + +# define SSL3_AL_WARNING 1 +# define SSL3_AL_FATAL 2 + +# define SSL3_AD_CLOSE_NOTIFY 0 +# define SSL3_AD_UNEXPECTED_MESSAGE 10/* fatal */ +# define SSL3_AD_BAD_RECORD_MAC 20/* fatal */ +# define SSL3_AD_DECOMPRESSION_FAILURE 30/* fatal */ +# define SSL3_AD_HANDSHAKE_FAILURE 40/* fatal */ +# define SSL3_AD_NO_CERTIFICATE 41 +# define SSL3_AD_BAD_CERTIFICATE 42 +# define SSL3_AD_UNSUPPORTED_CERTIFICATE 43 +# define SSL3_AD_CERTIFICATE_REVOKED 44 +# define SSL3_AD_CERTIFICATE_EXPIRED 45 +# define SSL3_AD_CERTIFICATE_UNKNOWN 46 +# define SSL3_AD_ILLEGAL_PARAMETER 47/* fatal */ + +# define TLS1_HB_REQUEST 1 +# define TLS1_HB_RESPONSE 2 + + +# define SSL3_CT_RSA_SIGN 1 +# define SSL3_CT_DSS_SIGN 2 +# define SSL3_CT_RSA_FIXED_DH 3 +# define SSL3_CT_DSS_FIXED_DH 4 +# define SSL3_CT_RSA_EPHEMERAL_DH 5 +# define SSL3_CT_DSS_EPHEMERAL_DH 6 +# define SSL3_CT_FORTEZZA_DMS 20 +/* + * SSL3_CT_NUMBER is used to size arrays and it must be large enough to + * contain all of the cert types defined for *either* SSLv3 and TLSv1. + */ +# define SSL3_CT_NUMBER 10 + +# if defined(TLS_CT_NUMBER) +# if TLS_CT_NUMBER != SSL3_CT_NUMBER +# error "SSL/TLS CT_NUMBER values do not match" +# endif +# endif + +/* No longer used as of OpenSSL 1.1.1 */ +# define SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS 0x0001 + +/* Removed from OpenSSL 1.1.0 */ +# define TLS1_FLAGS_TLS_PADDING_BUG 0x0 + +# define TLS1_FLAGS_SKIP_CERT_VERIFY 0x0010 + +/* Set if we encrypt then mac instead of usual mac then encrypt */ +# define TLS1_FLAGS_ENCRYPT_THEN_MAC_READ 0x0100 +# define TLS1_FLAGS_ENCRYPT_THEN_MAC TLS1_FLAGS_ENCRYPT_THEN_MAC_READ + +/* Set if extended master secret extension received from peer */ +# define TLS1_FLAGS_RECEIVED_EXTMS 0x0200 + +# define TLS1_FLAGS_ENCRYPT_THEN_MAC_WRITE 0x0400 + +# define TLS1_FLAGS_STATELESS 0x0800 + +/* Set if extended master secret extension required on renegotiation */ +# define TLS1_FLAGS_REQUIRED_EXTMS 0x1000 + +# define SSL3_MT_HELLO_REQUEST 0 +# define SSL3_MT_CLIENT_HELLO 1 +# define SSL3_MT_SERVER_HELLO 2 +# define SSL3_MT_NEWSESSION_TICKET 4 +# define SSL3_MT_END_OF_EARLY_DATA 5 +# define SSL3_MT_ENCRYPTED_EXTENSIONS 8 +# define SSL3_MT_CERTIFICATE 11 +# define SSL3_MT_SERVER_KEY_EXCHANGE 12 +# define SSL3_MT_CERTIFICATE_REQUEST 13 +# define SSL3_MT_SERVER_DONE 14 +# define SSL3_MT_CERTIFICATE_VERIFY 15 +# define SSL3_MT_CLIENT_KEY_EXCHANGE 16 +# define SSL3_MT_FINISHED 20 +# define SSL3_MT_CERTIFICATE_URL 21 +# define SSL3_MT_CERTIFICATE_STATUS 22 +# define SSL3_MT_SUPPLEMENTAL_DATA 23 +# define SSL3_MT_KEY_UPDATE 24 +# ifndef OPENSSL_NO_NEXTPROTONEG +# define SSL3_MT_NEXT_PROTO 67 +# endif +# define SSL3_MT_MESSAGE_HASH 254 +# define DTLS1_MT_HELLO_VERIFY_REQUEST 3 + +/* Dummy message type for handling CCS like a normal handshake message */ +# define SSL3_MT_CHANGE_CIPHER_SPEC 0x0101 + +# define SSL3_MT_CCS 1 + +/* These are used when changing over to a new cipher */ +# define SSL3_CC_READ 0x001 +# define SSL3_CC_WRITE 0x002 +# define SSL3_CC_CLIENT 0x010 +# define SSL3_CC_SERVER 0x020 +# define SSL3_CC_EARLY 0x040 +# define SSL3_CC_HANDSHAKE 0x080 +# define SSL3_CC_APPLICATION 0x100 +# define SSL3_CHANGE_CIPHER_CLIENT_WRITE (SSL3_CC_CLIENT|SSL3_CC_WRITE) +# define SSL3_CHANGE_CIPHER_SERVER_READ (SSL3_CC_SERVER|SSL3_CC_READ) +# define SSL3_CHANGE_CIPHER_CLIENT_READ (SSL3_CC_CLIENT|SSL3_CC_READ) +# define SSL3_CHANGE_CIPHER_SERVER_WRITE (SSL3_CC_SERVER|SSL3_CC_WRITE) + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/sslerr.h b/third_party/openssl/uos/sw_64/include/openssl/sslerr.h new file mode 100644 index 00000000..701d61c6 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/sslerr.h @@ -0,0 +1,776 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SSLERR_H +# define HEADER_SSLERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_SSL_strings(void); + +/* + * SSL function codes. + */ +# define SSL_F_ADD_CLIENT_KEY_SHARE_EXT 438 +# define SSL_F_ADD_KEY_SHARE 512 +# define SSL_F_BYTES_TO_CIPHER_LIST 519 +# define SSL_F_CHECK_SUITEB_CIPHER_LIST 331 +# define SSL_F_CIPHERSUITE_CB 622 +# define SSL_F_CONSTRUCT_CA_NAMES 552 +# define SSL_F_CONSTRUCT_KEY_EXCHANGE_TBS 553 +# define SSL_F_CONSTRUCT_STATEFUL_TICKET 636 +# define SSL_F_CONSTRUCT_STATELESS_TICKET 637 +# define SSL_F_CREATE_SYNTHETIC_MESSAGE_HASH 539 +# define SSL_F_CREATE_TICKET_PREQUEL 638 +# define SSL_F_CT_MOVE_SCTS 345 +# define SSL_F_CT_STRICT 349 +# define SSL_F_CUSTOM_EXT_ADD 554 +# define SSL_F_CUSTOM_EXT_PARSE 555 +# define SSL_F_D2I_SSL_SESSION 103 +# define SSL_F_DANE_CTX_ENABLE 347 +# define SSL_F_DANE_MTYPE_SET 393 +# define SSL_F_DANE_TLSA_ADD 394 +# define SSL_F_DERIVE_SECRET_KEY_AND_IV 514 +# define SSL_F_DO_DTLS1_WRITE 245 +# define SSL_F_DO_SSL3_WRITE 104 +# define SSL_F_DTLS1_BUFFER_RECORD 247 +# define SSL_F_DTLS1_CHECK_TIMEOUT_NUM 318 +# define SSL_F_DTLS1_HEARTBEAT 305 +# define SSL_F_DTLS1_HM_FRAGMENT_NEW 623 +# define SSL_F_DTLS1_PREPROCESS_FRAGMENT 288 +# define SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS 424 +# define SSL_F_DTLS1_PROCESS_RECORD 257 +# define SSL_F_DTLS1_READ_BYTES 258 +# define SSL_F_DTLS1_READ_FAILED 339 +# define SSL_F_DTLS1_RETRANSMIT_MESSAGE 390 +# define SSL_F_DTLS1_WRITE_APP_DATA_BYTES 268 +# define SSL_F_DTLS1_WRITE_BYTES 545 +# define SSL_F_DTLSV1_LISTEN 350 +# define SSL_F_DTLS_CONSTRUCT_CHANGE_CIPHER_SPEC 371 +# define SSL_F_DTLS_CONSTRUCT_HELLO_VERIFY_REQUEST 385 +# define SSL_F_DTLS_GET_REASSEMBLED_MESSAGE 370 +# define SSL_F_DTLS_PROCESS_HELLO_VERIFY 386 +# define SSL_F_DTLS_RECORD_LAYER_NEW 635 +# define SSL_F_DTLS_WAIT_FOR_DRY 592 +# define SSL_F_EARLY_DATA_COUNT_OK 532 +# define SSL_F_FINAL_EARLY_DATA 556 +# define SSL_F_FINAL_EC_PT_FORMATS 485 +# define SSL_F_FINAL_EMS 486 +# define SSL_F_FINAL_KEY_SHARE 503 +# define SSL_F_FINAL_MAXFRAGMENTLEN 557 +# define SSL_F_FINAL_PSK 639 +# define SSL_F_FINAL_RENEGOTIATE 483 +# define SSL_F_FINAL_SERVER_NAME 558 +# define SSL_F_FINAL_SIG_ALGS 497 +# define SSL_F_GET_CERT_VERIFY_TBS_DATA 588 +# define SSL_F_NSS_KEYLOG_INT 500 +# define SSL_F_OPENSSL_INIT_SSL 342 +# define SSL_F_OSSL_STATEM_CLIENT13_READ_TRANSITION 436 +# define SSL_F_OSSL_STATEM_CLIENT13_WRITE_TRANSITION 598 +# define SSL_F_OSSL_STATEM_CLIENT_CONSTRUCT_MESSAGE 430 +# define SSL_F_OSSL_STATEM_CLIENT_POST_PROCESS_MESSAGE 593 +# define SSL_F_OSSL_STATEM_CLIENT_PROCESS_MESSAGE 594 +# define SSL_F_OSSL_STATEM_CLIENT_READ_TRANSITION 417 +# define SSL_F_OSSL_STATEM_CLIENT_WRITE_TRANSITION 599 +# define SSL_F_OSSL_STATEM_SERVER13_READ_TRANSITION 437 +# define SSL_F_OSSL_STATEM_SERVER13_WRITE_TRANSITION 600 +# define SSL_F_OSSL_STATEM_SERVER_CONSTRUCT_MESSAGE 431 +# define SSL_F_OSSL_STATEM_SERVER_POST_PROCESS_MESSAGE 601 +# define SSL_F_OSSL_STATEM_SERVER_POST_WORK 602 +# define SSL_F_OSSL_STATEM_SERVER_PRE_WORK 640 +# define SSL_F_OSSL_STATEM_SERVER_PROCESS_MESSAGE 603 +# define SSL_F_OSSL_STATEM_SERVER_READ_TRANSITION 418 +# define SSL_F_OSSL_STATEM_SERVER_WRITE_TRANSITION 604 +# define SSL_F_PARSE_CA_NAMES 541 +# define SSL_F_PITEM_NEW 624 +# define SSL_F_PQUEUE_NEW 625 +# define SSL_F_PROCESS_KEY_SHARE_EXT 439 +# define SSL_F_READ_STATE_MACHINE 352 +# define SSL_F_SET_CLIENT_CIPHERSUITE 540 +# define SSL_F_SRP_GENERATE_CLIENT_MASTER_SECRET 595 +# define SSL_F_SRP_GENERATE_SERVER_MASTER_SECRET 589 +# define SSL_F_SRP_VERIFY_SERVER_PARAM 596 +# define SSL_F_SSL3_CHANGE_CIPHER_STATE 129 +# define SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM 130 +# define SSL_F_SSL3_CTRL 213 +# define SSL_F_SSL3_CTX_CTRL 133 +# define SSL_F_SSL3_DIGEST_CACHED_RECORDS 293 +# define SSL_F_SSL3_DO_CHANGE_CIPHER_SPEC 292 +# define SSL_F_SSL3_ENC 608 +# define SSL_F_SSL3_FINAL_FINISH_MAC 285 +# define SSL_F_SSL3_FINISH_MAC 587 +# define SSL_F_SSL3_GENERATE_KEY_BLOCK 238 +# define SSL_F_SSL3_GENERATE_MASTER_SECRET 388 +# define SSL_F_SSL3_GET_RECORD 143 +# define SSL_F_SSL3_INIT_FINISHED_MAC 397 +# define SSL_F_SSL3_OUTPUT_CERT_CHAIN 147 +# define SSL_F_SSL3_READ_BYTES 148 +# define SSL_F_SSL3_READ_N 149 +# define SSL_F_SSL3_SETUP_KEY_BLOCK 157 +# define SSL_F_SSL3_SETUP_READ_BUFFER 156 +# define SSL_F_SSL3_SETUP_WRITE_BUFFER 291 +# define SSL_F_SSL3_WRITE_BYTES 158 +# define SSL_F_SSL3_WRITE_PENDING 159 +# define SSL_F_SSL_ADD_CERT_CHAIN 316 +# define SSL_F_SSL_ADD_CERT_TO_BUF 319 +# define SSL_F_SSL_ADD_CERT_TO_WPACKET 493 +# define SSL_F_SSL_ADD_CLIENTHELLO_RENEGOTIATE_EXT 298 +# define SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT 277 +# define SSL_F_SSL_ADD_CLIENTHELLO_USE_SRTP_EXT 307 +# define SSL_F_SSL_ADD_DIR_CERT_SUBJECTS_TO_STACK 215 +# define SSL_F_SSL_ADD_FILE_CERT_SUBJECTS_TO_STACK 216 +# define SSL_F_SSL_ADD_SERVERHELLO_RENEGOTIATE_EXT 299 +# define SSL_F_SSL_ADD_SERVERHELLO_TLSEXT 278 +# define SSL_F_SSL_ADD_SERVERHELLO_USE_SRTP_EXT 308 +# define SSL_F_SSL_BAD_METHOD 160 +# define SSL_F_SSL_BUILD_CERT_CHAIN 332 +# define SSL_F_SSL_BYTES_TO_CIPHER_LIST 161 +# define SSL_F_SSL_CACHE_CIPHERLIST 520 +# define SSL_F_SSL_CERT_ADD0_CHAIN_CERT 346 +# define SSL_F_SSL_CERT_DUP 221 +# define SSL_F_SSL_CERT_NEW 162 +# define SSL_F_SSL_CERT_SET0_CHAIN 340 +# define SSL_F_SSL_CHECK_PRIVATE_KEY 163 +# define SSL_F_SSL_CHECK_SERVERHELLO_TLSEXT 280 +# define SSL_F_SSL_CHECK_SRP_EXT_CLIENTHELLO 606 +# define SSL_F_SSL_CHECK_SRVR_ECC_CERT_AND_ALG 279 +# define SSL_F_SSL_CHOOSE_CLIENT_VERSION 607 +# define SSL_F_SSL_CIPHER_DESCRIPTION 626 +# define SSL_F_SSL_CIPHER_LIST_TO_BYTES 425 +# define SSL_F_SSL_CIPHER_PROCESS_RULESTR 230 +# define SSL_F_SSL_CIPHER_STRENGTH_SORT 231 +# define SSL_F_SSL_CLEAR 164 +# define SSL_F_SSL_CLIENT_HELLO_GET1_EXTENSIONS_PRESENT 627 +# define SSL_F_SSL_COMP_ADD_COMPRESSION_METHOD 165 +# define SSL_F_SSL_CONF_CMD 334 +# define SSL_F_SSL_CREATE_CIPHER_LIST 166 +# define SSL_F_SSL_CTRL 232 +# define SSL_F_SSL_CTX_CHECK_PRIVATE_KEY 168 +# define SSL_F_SSL_CTX_ENABLE_CT 398 +# define SSL_F_SSL_CTX_MAKE_PROFILES 309 +# define SSL_F_SSL_CTX_NEW 169 +# define SSL_F_SSL_CTX_SET_ALPN_PROTOS 343 +# define SSL_F_SSL_CTX_SET_CIPHER_LIST 269 +# define SSL_F_SSL_CTX_SET_CLIENT_CERT_ENGINE 290 +# define SSL_F_SSL_CTX_SET_CT_VALIDATION_CALLBACK 396 +# define SSL_F_SSL_CTX_SET_SESSION_ID_CONTEXT 219 +# define SSL_F_SSL_CTX_SET_SSL_VERSION 170 +# define SSL_F_SSL_CTX_SET_TLSEXT_MAX_FRAGMENT_LENGTH 551 +# define SSL_F_SSL_CTX_USE_CERTIFICATE 171 +# define SSL_F_SSL_CTX_USE_CERTIFICATE_ASN1 172 +# define SSL_F_SSL_CTX_USE_CERTIFICATE_FILE 173 +# define SSL_F_SSL_CTX_USE_PRIVATEKEY 174 +# define SSL_F_SSL_CTX_USE_PRIVATEKEY_ASN1 175 +# define SSL_F_SSL_CTX_USE_PRIVATEKEY_FILE 176 +# define SSL_F_SSL_CTX_USE_PSK_IDENTITY_HINT 272 +# define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY 177 +# define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_ASN1 178 +# define SSL_F_SSL_CTX_USE_RSAPRIVATEKEY_FILE 179 +# define SSL_F_SSL_CTX_USE_SERVERINFO 336 +# define SSL_F_SSL_CTX_USE_SERVERINFO_EX 543 +# define SSL_F_SSL_CTX_USE_SERVERINFO_FILE 337 +# define SSL_F_SSL_DANE_DUP 403 +# define SSL_F_SSL_DANE_ENABLE 395 +# define SSL_F_SSL_DERIVE 590 +# define SSL_F_SSL_DO_CONFIG 391 +# define SSL_F_SSL_DO_HANDSHAKE 180 +# define SSL_F_SSL_DUP_CA_LIST 408 +# define SSL_F_SSL_ENABLE_CT 402 +# define SSL_F_SSL_GENERATE_PKEY_GROUP 559 +# define SSL_F_SSL_GENERATE_SESSION_ID 547 +# define SSL_F_SSL_GET_NEW_SESSION 181 +# define SSL_F_SSL_GET_PREV_SESSION 217 +# define SSL_F_SSL_GET_SERVER_CERT_INDEX 322 +# define SSL_F_SSL_GET_SIGN_PKEY 183 +# define SSL_F_SSL_HANDSHAKE_HASH 560 +# define SSL_F_SSL_INIT_WBIO_BUFFER 184 +# define SSL_F_SSL_KEY_UPDATE 515 +# define SSL_F_SSL_LOAD_CLIENT_CA_FILE 185 +# define SSL_F_SSL_LOG_MASTER_SECRET 498 +# define SSL_F_SSL_LOG_RSA_CLIENT_KEY_EXCHANGE 499 +# define SSL_F_SSL_MODULE_INIT 392 +# define SSL_F_SSL_NEW 186 +# define SSL_F_SSL_NEXT_PROTO_VALIDATE 565 +# define SSL_F_SSL_PARSE_CLIENTHELLO_RENEGOTIATE_EXT 300 +# define SSL_F_SSL_PARSE_CLIENTHELLO_TLSEXT 302 +# define SSL_F_SSL_PARSE_CLIENTHELLO_USE_SRTP_EXT 310 +# define SSL_F_SSL_PARSE_SERVERHELLO_RENEGOTIATE_EXT 301 +# define SSL_F_SSL_PARSE_SERVERHELLO_TLSEXT 303 +# define SSL_F_SSL_PARSE_SERVERHELLO_USE_SRTP_EXT 311 +# define SSL_F_SSL_PEEK 270 +# define SSL_F_SSL_PEEK_EX 432 +# define SSL_F_SSL_PEEK_INTERNAL 522 +# define SSL_F_SSL_READ 223 +# define SSL_F_SSL_READ_EARLY_DATA 529 +# define SSL_F_SSL_READ_EX 434 +# define SSL_F_SSL_READ_INTERNAL 523 +# define SSL_F_SSL_RENEGOTIATE 516 +# define SSL_F_SSL_RENEGOTIATE_ABBREVIATED 546 +# define SSL_F_SSL_SCAN_CLIENTHELLO_TLSEXT 320 +# define SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT 321 +# define SSL_F_SSL_SESSION_DUP 348 +# define SSL_F_SSL_SESSION_NEW 189 +# define SSL_F_SSL_SESSION_PRINT_FP 190 +# define SSL_F_SSL_SESSION_SET1_ID 423 +# define SSL_F_SSL_SESSION_SET1_ID_CONTEXT 312 +# define SSL_F_SSL_SET_ALPN_PROTOS 344 +# define SSL_F_SSL_SET_CERT 191 +# define SSL_F_SSL_SET_CERT_AND_KEY 621 +# define SSL_F_SSL_SET_CIPHER_LIST 271 +# define SSL_F_SSL_SET_CT_VALIDATION_CALLBACK 399 +# define SSL_F_SSL_SET_FD 192 +# define SSL_F_SSL_SET_PKEY 193 +# define SSL_F_SSL_SET_RFD 194 +# define SSL_F_SSL_SET_SESSION 195 +# define SSL_F_SSL_SET_SESSION_ID_CONTEXT 218 +# define SSL_F_SSL_SET_SESSION_TICKET_EXT 294 +# define SSL_F_SSL_SET_TLSEXT_MAX_FRAGMENT_LENGTH 550 +# define SSL_F_SSL_SET_WFD 196 +# define SSL_F_SSL_SHUTDOWN 224 +# define SSL_F_SSL_SRP_CTX_INIT 313 +# define SSL_F_SSL_START_ASYNC_JOB 389 +# define SSL_F_SSL_UNDEFINED_FUNCTION 197 +# define SSL_F_SSL_UNDEFINED_VOID_FUNCTION 244 +# define SSL_F_SSL_USE_CERTIFICATE 198 +# define SSL_F_SSL_USE_CERTIFICATE_ASN1 199 +# define SSL_F_SSL_USE_CERTIFICATE_FILE 200 +# define SSL_F_SSL_USE_PRIVATEKEY 201 +# define SSL_F_SSL_USE_PRIVATEKEY_ASN1 202 +# define SSL_F_SSL_USE_PRIVATEKEY_FILE 203 +# define SSL_F_SSL_USE_PSK_IDENTITY_HINT 273 +# define SSL_F_SSL_USE_RSAPRIVATEKEY 204 +# define SSL_F_SSL_USE_RSAPRIVATEKEY_ASN1 205 +# define SSL_F_SSL_USE_RSAPRIVATEKEY_FILE 206 +# define SSL_F_SSL_VALIDATE_CT 400 +# define SSL_F_SSL_VERIFY_CERT_CHAIN 207 +# define SSL_F_SSL_VERIFY_CLIENT_POST_HANDSHAKE 616 +# define SSL_F_SSL_WRITE 208 +# define SSL_F_SSL_WRITE_EARLY_DATA 526 +# define SSL_F_SSL_WRITE_EARLY_FINISH 527 +# define SSL_F_SSL_WRITE_EX 433 +# define SSL_F_SSL_WRITE_INTERNAL 524 +# define SSL_F_STATE_MACHINE 353 +# define SSL_F_TLS12_CHECK_PEER_SIGALG 333 +# define SSL_F_TLS12_COPY_SIGALGS 533 +# define SSL_F_TLS13_CHANGE_CIPHER_STATE 440 +# define SSL_F_TLS13_ENC 609 +# define SSL_F_TLS13_FINAL_FINISH_MAC 605 +# define SSL_F_TLS13_GENERATE_SECRET 591 +# define SSL_F_TLS13_HKDF_EXPAND 561 +# define SSL_F_TLS13_RESTORE_HANDSHAKE_DIGEST_FOR_PHA 617 +# define SSL_F_TLS13_SAVE_HANDSHAKE_DIGEST_FOR_PHA 618 +# define SSL_F_TLS13_SETUP_KEY_BLOCK 441 +# define SSL_F_TLS1_CHANGE_CIPHER_STATE 209 +# define SSL_F_TLS1_CHECK_DUPLICATE_EXTENSIONS 341 +# define SSL_F_TLS1_ENC 401 +# define SSL_F_TLS1_EXPORT_KEYING_MATERIAL 314 +# define SSL_F_TLS1_GET_CURVELIST 338 +# define SSL_F_TLS1_PRF 284 +# define SSL_F_TLS1_SAVE_U16 628 +# define SSL_F_TLS1_SETUP_KEY_BLOCK 211 +# define SSL_F_TLS1_SET_GROUPS 629 +# define SSL_F_TLS1_SET_RAW_SIGALGS 630 +# define SSL_F_TLS1_SET_SERVER_SIGALGS 335 +# define SSL_F_TLS1_SET_SHARED_SIGALGS 631 +# define SSL_F_TLS1_SET_SIGALGS 632 +# define SSL_F_TLS_CHOOSE_SIGALG 513 +# define SSL_F_TLS_CLIENT_KEY_EXCHANGE_POST_WORK 354 +# define SSL_F_TLS_COLLECT_EXTENSIONS 435 +# define SSL_F_TLS_CONSTRUCT_CERTIFICATE_AUTHORITIES 542 +# define SSL_F_TLS_CONSTRUCT_CERTIFICATE_REQUEST 372 +# define SSL_F_TLS_CONSTRUCT_CERT_STATUS 429 +# define SSL_F_TLS_CONSTRUCT_CERT_STATUS_BODY 494 +# define SSL_F_TLS_CONSTRUCT_CERT_VERIFY 496 +# define SSL_F_TLS_CONSTRUCT_CHANGE_CIPHER_SPEC 427 +# define SSL_F_TLS_CONSTRUCT_CKE_DHE 404 +# define SSL_F_TLS_CONSTRUCT_CKE_ECDHE 405 +# define SSL_F_TLS_CONSTRUCT_CKE_GOST 406 +# define SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE 407 +# define SSL_F_TLS_CONSTRUCT_CKE_RSA 409 +# define SSL_F_TLS_CONSTRUCT_CKE_SRP 410 +# define SSL_F_TLS_CONSTRUCT_CLIENT_CERTIFICATE 484 +# define SSL_F_TLS_CONSTRUCT_CLIENT_HELLO 487 +# define SSL_F_TLS_CONSTRUCT_CLIENT_KEY_EXCHANGE 488 +# define SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY 489 +# define SSL_F_TLS_CONSTRUCT_CTOS_ALPN 466 +# define SSL_F_TLS_CONSTRUCT_CTOS_CERTIFICATE 355 +# define SSL_F_TLS_CONSTRUCT_CTOS_COOKIE 535 +# define SSL_F_TLS_CONSTRUCT_CTOS_EARLY_DATA 530 +# define SSL_F_TLS_CONSTRUCT_CTOS_EC_PT_FORMATS 467 +# define SSL_F_TLS_CONSTRUCT_CTOS_EMS 468 +# define SSL_F_TLS_CONSTRUCT_CTOS_ETM 469 +# define SSL_F_TLS_CONSTRUCT_CTOS_HELLO 356 +# define SSL_F_TLS_CONSTRUCT_CTOS_KEY_EXCHANGE 357 +# define SSL_F_TLS_CONSTRUCT_CTOS_KEY_SHARE 470 +# define SSL_F_TLS_CONSTRUCT_CTOS_MAXFRAGMENTLEN 549 +# define SSL_F_TLS_CONSTRUCT_CTOS_NPN 471 +# define SSL_F_TLS_CONSTRUCT_CTOS_PADDING 472 +# define SSL_F_TLS_CONSTRUCT_CTOS_POST_HANDSHAKE_AUTH 619 +# define SSL_F_TLS_CONSTRUCT_CTOS_PSK 501 +# define SSL_F_TLS_CONSTRUCT_CTOS_PSK_KEX_MODES 509 +# define SSL_F_TLS_CONSTRUCT_CTOS_RENEGOTIATE 473 +# define SSL_F_TLS_CONSTRUCT_CTOS_SCT 474 +# define SSL_F_TLS_CONSTRUCT_CTOS_SERVER_NAME 475 +# define SSL_F_TLS_CONSTRUCT_CTOS_SESSION_TICKET 476 +# define SSL_F_TLS_CONSTRUCT_CTOS_SIG_ALGS 477 +# define SSL_F_TLS_CONSTRUCT_CTOS_SRP 478 +# define SSL_F_TLS_CONSTRUCT_CTOS_STATUS_REQUEST 479 +# define SSL_F_TLS_CONSTRUCT_CTOS_SUPPORTED_GROUPS 480 +# define SSL_F_TLS_CONSTRUCT_CTOS_SUPPORTED_VERSIONS 481 +# define SSL_F_TLS_CONSTRUCT_CTOS_USE_SRTP 482 +# define SSL_F_TLS_CONSTRUCT_CTOS_VERIFY 358 +# define SSL_F_TLS_CONSTRUCT_ENCRYPTED_EXTENSIONS 443 +# define SSL_F_TLS_CONSTRUCT_END_OF_EARLY_DATA 536 +# define SSL_F_TLS_CONSTRUCT_EXTENSIONS 447 +# define SSL_F_TLS_CONSTRUCT_FINISHED 359 +# define SSL_F_TLS_CONSTRUCT_HELLO_REQUEST 373 +# define SSL_F_TLS_CONSTRUCT_HELLO_RETRY_REQUEST 510 +# define SSL_F_TLS_CONSTRUCT_KEY_UPDATE 517 +# define SSL_F_TLS_CONSTRUCT_NEW_SESSION_TICKET 428 +# define SSL_F_TLS_CONSTRUCT_NEXT_PROTO 426 +# define SSL_F_TLS_CONSTRUCT_SERVER_CERTIFICATE 490 +# define SSL_F_TLS_CONSTRUCT_SERVER_HELLO 491 +# define SSL_F_TLS_CONSTRUCT_SERVER_KEY_EXCHANGE 492 +# define SSL_F_TLS_CONSTRUCT_STOC_ALPN 451 +# define SSL_F_TLS_CONSTRUCT_STOC_CERTIFICATE 374 +# define SSL_F_TLS_CONSTRUCT_STOC_COOKIE 613 +# define SSL_F_TLS_CONSTRUCT_STOC_CRYPTOPRO_BUG 452 +# define SSL_F_TLS_CONSTRUCT_STOC_DONE 375 +# define SSL_F_TLS_CONSTRUCT_STOC_EARLY_DATA 531 +# define SSL_F_TLS_CONSTRUCT_STOC_EARLY_DATA_INFO 525 +# define SSL_F_TLS_CONSTRUCT_STOC_EC_PT_FORMATS 453 +# define SSL_F_TLS_CONSTRUCT_STOC_EMS 454 +# define SSL_F_TLS_CONSTRUCT_STOC_ETM 455 +# define SSL_F_TLS_CONSTRUCT_STOC_HELLO 376 +# define SSL_F_TLS_CONSTRUCT_STOC_KEY_EXCHANGE 377 +# define SSL_F_TLS_CONSTRUCT_STOC_KEY_SHARE 456 +# define SSL_F_TLS_CONSTRUCT_STOC_MAXFRAGMENTLEN 548 +# define SSL_F_TLS_CONSTRUCT_STOC_NEXT_PROTO_NEG 457 +# define SSL_F_TLS_CONSTRUCT_STOC_PSK 504 +# define SSL_F_TLS_CONSTRUCT_STOC_RENEGOTIATE 458 +# define SSL_F_TLS_CONSTRUCT_STOC_SERVER_NAME 459 +# define SSL_F_TLS_CONSTRUCT_STOC_SESSION_TICKET 460 +# define SSL_F_TLS_CONSTRUCT_STOC_STATUS_REQUEST 461 +# define SSL_F_TLS_CONSTRUCT_STOC_SUPPORTED_GROUPS 544 +# define SSL_F_TLS_CONSTRUCT_STOC_SUPPORTED_VERSIONS 611 +# define SSL_F_TLS_CONSTRUCT_STOC_USE_SRTP 462 +# define SSL_F_TLS_EARLY_POST_PROCESS_CLIENT_HELLO 521 +# define SSL_F_TLS_FINISH_HANDSHAKE 597 +# define SSL_F_TLS_GET_MESSAGE_BODY 351 +# define SSL_F_TLS_GET_MESSAGE_HEADER 387 +# define SSL_F_TLS_HANDLE_ALPN 562 +# define SSL_F_TLS_HANDLE_STATUS_REQUEST 563 +# define SSL_F_TLS_PARSE_CERTIFICATE_AUTHORITIES 566 +# define SSL_F_TLS_PARSE_CLIENTHELLO_TLSEXT 449 +# define SSL_F_TLS_PARSE_CTOS_ALPN 567 +# define SSL_F_TLS_PARSE_CTOS_COOKIE 614 +# define SSL_F_TLS_PARSE_CTOS_EARLY_DATA 568 +# define SSL_F_TLS_PARSE_CTOS_EC_PT_FORMATS 569 +# define SSL_F_TLS_PARSE_CTOS_EMS 570 +# define SSL_F_TLS_PARSE_CTOS_KEY_SHARE 463 +# define SSL_F_TLS_PARSE_CTOS_MAXFRAGMENTLEN 571 +# define SSL_F_TLS_PARSE_CTOS_POST_HANDSHAKE_AUTH 620 +# define SSL_F_TLS_PARSE_CTOS_PSK 505 +# define SSL_F_TLS_PARSE_CTOS_PSK_KEX_MODES 572 +# define SSL_F_TLS_PARSE_CTOS_RENEGOTIATE 464 +# define SSL_F_TLS_PARSE_CTOS_SERVER_NAME 573 +# define SSL_F_TLS_PARSE_CTOS_SESSION_TICKET 574 +# define SSL_F_TLS_PARSE_CTOS_SIG_ALGS 575 +# define SSL_F_TLS_PARSE_CTOS_SIG_ALGS_CERT 615 +# define SSL_F_TLS_PARSE_CTOS_SRP 576 +# define SSL_F_TLS_PARSE_CTOS_STATUS_REQUEST 577 +# define SSL_F_TLS_PARSE_CTOS_SUPPORTED_GROUPS 578 +# define SSL_F_TLS_PARSE_CTOS_USE_SRTP 465 +# define SSL_F_TLS_PARSE_STOC_ALPN 579 +# define SSL_F_TLS_PARSE_STOC_COOKIE 534 +# define SSL_F_TLS_PARSE_STOC_EARLY_DATA 538 +# define SSL_F_TLS_PARSE_STOC_EARLY_DATA_INFO 528 +# define SSL_F_TLS_PARSE_STOC_EC_PT_FORMATS 580 +# define SSL_F_TLS_PARSE_STOC_KEY_SHARE 445 +# define SSL_F_TLS_PARSE_STOC_MAXFRAGMENTLEN 581 +# define SSL_F_TLS_PARSE_STOC_NPN 582 +# define SSL_F_TLS_PARSE_STOC_PSK 502 +# define SSL_F_TLS_PARSE_STOC_RENEGOTIATE 448 +# define SSL_F_TLS_PARSE_STOC_SCT 564 +# define SSL_F_TLS_PARSE_STOC_SERVER_NAME 583 +# define SSL_F_TLS_PARSE_STOC_SESSION_TICKET 584 +# define SSL_F_TLS_PARSE_STOC_STATUS_REQUEST 585 +# define SSL_F_TLS_PARSE_STOC_SUPPORTED_VERSIONS 612 +# define SSL_F_TLS_PARSE_STOC_USE_SRTP 446 +# define SSL_F_TLS_POST_PROCESS_CLIENT_HELLO 378 +# define SSL_F_TLS_POST_PROCESS_CLIENT_KEY_EXCHANGE 384 +# define SSL_F_TLS_PREPARE_CLIENT_CERTIFICATE 360 +# define SSL_F_TLS_PROCESS_AS_HELLO_RETRY_REQUEST 610 +# define SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST 361 +# define SSL_F_TLS_PROCESS_CERT_STATUS 362 +# define SSL_F_TLS_PROCESS_CERT_STATUS_BODY 495 +# define SSL_F_TLS_PROCESS_CERT_VERIFY 379 +# define SSL_F_TLS_PROCESS_CHANGE_CIPHER_SPEC 363 +# define SSL_F_TLS_PROCESS_CKE_DHE 411 +# define SSL_F_TLS_PROCESS_CKE_ECDHE 412 +# define SSL_F_TLS_PROCESS_CKE_GOST 413 +# define SSL_F_TLS_PROCESS_CKE_PSK_PREAMBLE 414 +# define SSL_F_TLS_PROCESS_CKE_RSA 415 +# define SSL_F_TLS_PROCESS_CKE_SRP 416 +# define SSL_F_TLS_PROCESS_CLIENT_CERTIFICATE 380 +# define SSL_F_TLS_PROCESS_CLIENT_HELLO 381 +# define SSL_F_TLS_PROCESS_CLIENT_KEY_EXCHANGE 382 +# define SSL_F_TLS_PROCESS_ENCRYPTED_EXTENSIONS 444 +# define SSL_F_TLS_PROCESS_END_OF_EARLY_DATA 537 +# define SSL_F_TLS_PROCESS_FINISHED 364 +# define SSL_F_TLS_PROCESS_HELLO_REQ 507 +# define SSL_F_TLS_PROCESS_HELLO_RETRY_REQUEST 511 +# define SSL_F_TLS_PROCESS_INITIAL_SERVER_FLIGHT 442 +# define SSL_F_TLS_PROCESS_KEY_EXCHANGE 365 +# define SSL_F_TLS_PROCESS_KEY_UPDATE 518 +# define SSL_F_TLS_PROCESS_NEW_SESSION_TICKET 366 +# define SSL_F_TLS_PROCESS_NEXT_PROTO 383 +# define SSL_F_TLS_PROCESS_SERVER_CERTIFICATE 367 +# define SSL_F_TLS_PROCESS_SERVER_DONE 368 +# define SSL_F_TLS_PROCESS_SERVER_HELLO 369 +# define SSL_F_TLS_PROCESS_SKE_DHE 419 +# define SSL_F_TLS_PROCESS_SKE_ECDHE 420 +# define SSL_F_TLS_PROCESS_SKE_PSK_PREAMBLE 421 +# define SSL_F_TLS_PROCESS_SKE_SRP 422 +# define SSL_F_TLS_PSK_DO_BINDER 506 +# define SSL_F_TLS_SCAN_CLIENTHELLO_TLSEXT 450 +# define SSL_F_TLS_SETUP_HANDSHAKE 508 +# define SSL_F_USE_CERTIFICATE_CHAIN_FILE 220 +# define SSL_F_WPACKET_INTERN_INIT_LEN 633 +# define SSL_F_WPACKET_START_SUB_PACKET_LEN__ 634 +# define SSL_F_WRITE_STATE_MACHINE 586 + +/* + * SSL reason codes. + */ +# define SSL_R_APPLICATION_DATA_AFTER_CLOSE_NOTIFY 291 +# define SSL_R_APP_DATA_IN_HANDSHAKE 100 +# define SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT 272 +# define SSL_R_AT_LEAST_TLS_1_0_NEEDED_IN_FIPS_MODE 143 +# define SSL_R_AT_LEAST_TLS_1_2_NEEDED_IN_SUITEB_MODE 158 +# define SSL_R_BAD_CHANGE_CIPHER_SPEC 103 +# define SSL_R_BAD_CIPHER 186 +# define SSL_R_BAD_DATA 390 +# define SSL_R_BAD_DATA_RETURNED_BY_CALLBACK 106 +# define SSL_R_BAD_DECOMPRESSION 107 +# define SSL_R_BAD_DH_VALUE 102 +# define SSL_R_BAD_DIGEST_LENGTH 111 +# define SSL_R_BAD_EARLY_DATA 233 +# define SSL_R_BAD_ECC_CERT 304 +# define SSL_R_BAD_ECPOINT 306 +# define SSL_R_BAD_EXTENSION 110 +# define SSL_R_BAD_HANDSHAKE_LENGTH 332 +# define SSL_R_BAD_HANDSHAKE_STATE 236 +# define SSL_R_BAD_HELLO_REQUEST 105 +# define SSL_R_BAD_HRR_VERSION 263 +# define SSL_R_BAD_KEY_SHARE 108 +# define SSL_R_BAD_KEY_UPDATE 122 +# define SSL_R_BAD_LEGACY_VERSION 292 +# define SSL_R_BAD_LENGTH 271 +# define SSL_R_BAD_PACKET 240 +# define SSL_R_BAD_PACKET_LENGTH 115 +# define SSL_R_BAD_PROTOCOL_VERSION_NUMBER 116 +# define SSL_R_BAD_PSK 219 +# define SSL_R_BAD_PSK_IDENTITY 114 +# define SSL_R_BAD_RECORD_TYPE 443 +# define SSL_R_BAD_RSA_ENCRYPT 119 +# define SSL_R_BAD_SIGNATURE 123 +# define SSL_R_BAD_SRP_A_LENGTH 347 +# define SSL_R_BAD_SRP_PARAMETERS 371 +# define SSL_R_BAD_SRTP_MKI_VALUE 352 +# define SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST 353 +# define SSL_R_BAD_SSL_FILETYPE 124 +# define SSL_R_BAD_VALUE 384 +# define SSL_R_BAD_WRITE_RETRY 127 +# define SSL_R_BINDER_DOES_NOT_VERIFY 253 +# define SSL_R_BIO_NOT_SET 128 +# define SSL_R_BLOCK_CIPHER_PAD_IS_WRONG 129 +# define SSL_R_BN_LIB 130 +# define SSL_R_CALLBACK_FAILED 234 +# define SSL_R_CANNOT_CHANGE_CIPHER 109 +# define SSL_R_CA_DN_LENGTH_MISMATCH 131 +# define SSL_R_CA_KEY_TOO_SMALL 397 +# define SSL_R_CA_MD_TOO_WEAK 398 +# define SSL_R_CCS_RECEIVED_EARLY 133 +# define SSL_R_CERTIFICATE_VERIFY_FAILED 134 +# define SSL_R_CERT_CB_ERROR 377 +# define SSL_R_CERT_LENGTH_MISMATCH 135 +# define SSL_R_CIPHERSUITE_DIGEST_HAS_CHANGED 218 +# define SSL_R_CIPHER_CODE_WRONG_LENGTH 137 +# define SSL_R_CIPHER_OR_HASH_UNAVAILABLE 138 +# define SSL_R_CLIENTHELLO_TLSEXT 226 +# define SSL_R_COMPRESSED_LENGTH_TOO_LONG 140 +# define SSL_R_COMPRESSION_DISABLED 343 +# define SSL_R_COMPRESSION_FAILURE 141 +# define SSL_R_COMPRESSION_ID_NOT_WITHIN_PRIVATE_RANGE 307 +# define SSL_R_COMPRESSION_LIBRARY_ERROR 142 +# define SSL_R_CONNECTION_TYPE_NOT_SET 144 +# define SSL_R_CONTEXT_NOT_DANE_ENABLED 167 +# define SSL_R_COOKIE_GEN_CALLBACK_FAILURE 400 +# define SSL_R_COOKIE_MISMATCH 308 +# define SSL_R_CUSTOM_EXT_HANDLER_ALREADY_INSTALLED 206 +# define SSL_R_DANE_ALREADY_ENABLED 172 +# define SSL_R_DANE_CANNOT_OVERRIDE_MTYPE_FULL 173 +# define SSL_R_DANE_NOT_ENABLED 175 +# define SSL_R_DANE_TLSA_BAD_CERTIFICATE 180 +# define SSL_R_DANE_TLSA_BAD_CERTIFICATE_USAGE 184 +# define SSL_R_DANE_TLSA_BAD_DATA_LENGTH 189 +# define SSL_R_DANE_TLSA_BAD_DIGEST_LENGTH 192 +# define SSL_R_DANE_TLSA_BAD_MATCHING_TYPE 200 +# define SSL_R_DANE_TLSA_BAD_PUBLIC_KEY 201 +# define SSL_R_DANE_TLSA_BAD_SELECTOR 202 +# define SSL_R_DANE_TLSA_NULL_DATA 203 +# define SSL_R_DATA_BETWEEN_CCS_AND_FINISHED 145 +# define SSL_R_DATA_LENGTH_TOO_LONG 146 +# define SSL_R_DECRYPTION_FAILED 147 +# define SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC 281 +# define SSL_R_DH_KEY_TOO_SMALL 394 +# define SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG 148 +# define SSL_R_DIGEST_CHECK_FAILED 149 +# define SSL_R_DTLS_MESSAGE_TOO_BIG 334 +# define SSL_R_DUPLICATE_COMPRESSION_ID 309 +# define SSL_R_ECC_CERT_NOT_FOR_SIGNING 318 +# define SSL_R_ECDH_REQUIRED_FOR_SUITEB_MODE 374 +# define SSL_R_EE_KEY_TOO_SMALL 399 +# define SSL_R_EMPTY_SRTP_PROTECTION_PROFILE_LIST 354 +# define SSL_R_ENCRYPTED_LENGTH_TOO_LONG 150 +# define SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST 151 +# define SSL_R_ERROR_SETTING_TLSA_BASE_DOMAIN 204 +# define SSL_R_EXCEEDS_MAX_FRAGMENT_SIZE 194 +# define SSL_R_EXCESSIVE_MESSAGE_SIZE 152 +# define SSL_R_EXTENSION_NOT_RECEIVED 279 +# define SSL_R_EXTRA_DATA_IN_MESSAGE 153 +# define SSL_R_EXT_LENGTH_MISMATCH 163 +# define SSL_R_FAILED_TO_INIT_ASYNC 405 +# define SSL_R_FRAGMENTED_CLIENT_HELLO 401 +# define SSL_R_GOT_A_FIN_BEFORE_A_CCS 154 +# define SSL_R_HTTPS_PROXY_REQUEST 155 +# define SSL_R_HTTP_REQUEST 156 +# define SSL_R_ILLEGAL_POINT_COMPRESSION 162 +# define SSL_R_ILLEGAL_SUITEB_DIGEST 380 +# define SSL_R_INAPPROPRIATE_FALLBACK 373 +# define SSL_R_INCONSISTENT_COMPRESSION 340 +# define SSL_R_INCONSISTENT_EARLY_DATA_ALPN 222 +# define SSL_R_INCONSISTENT_EARLY_DATA_SNI 231 +# define SSL_R_INCONSISTENT_EXTMS 104 +# define SSL_R_INSUFFICIENT_SECURITY 241 +# define SSL_R_INVALID_ALERT 205 +# define SSL_R_INVALID_CCS_MESSAGE 260 +# define SSL_R_INVALID_CERTIFICATE_OR_ALG 238 +# define SSL_R_INVALID_COMMAND 280 +# define SSL_R_INVALID_COMPRESSION_ALGORITHM 341 +# define SSL_R_INVALID_CONFIG 283 +# define SSL_R_INVALID_CONFIGURATION_NAME 113 +# define SSL_R_INVALID_CONTEXT 282 +# define SSL_R_INVALID_CT_VALIDATION_TYPE 212 +# define SSL_R_INVALID_KEY_UPDATE_TYPE 120 +# define SSL_R_INVALID_MAX_EARLY_DATA 174 +# define SSL_R_INVALID_NULL_CMD_NAME 385 +# define SSL_R_INVALID_SEQUENCE_NUMBER 402 +# define SSL_R_INVALID_SERVERINFO_DATA 388 +# define SSL_R_INVALID_SESSION_ID 999 +# define SSL_R_INVALID_SRP_USERNAME 357 +# define SSL_R_INVALID_STATUS_RESPONSE 328 +# define SSL_R_INVALID_TICKET_KEYS_LENGTH 325 +# define SSL_R_LENGTH_MISMATCH 159 +# define SSL_R_LENGTH_TOO_LONG 404 +# define SSL_R_LENGTH_TOO_SHORT 160 +# define SSL_R_LIBRARY_BUG 274 +# define SSL_R_LIBRARY_HAS_NO_CIPHERS 161 +# define SSL_R_MISSING_DSA_SIGNING_CERT 165 +# define SSL_R_MISSING_ECDSA_SIGNING_CERT 381 +# define SSL_R_MISSING_FATAL 256 +# define SSL_R_MISSING_PARAMETERS 290 +# define SSL_R_MISSING_PSK_KEX_MODES_EXTENSION 310 +# define SSL_R_MISSING_RSA_CERTIFICATE 168 +# define SSL_R_MISSING_RSA_ENCRYPTING_CERT 169 +# define SSL_R_MISSING_RSA_SIGNING_CERT 170 +# define SSL_R_MISSING_SIGALGS_EXTENSION 112 +# define SSL_R_MISSING_SIGNING_CERT 221 +# define SSL_R_MISSING_SRP_PARAM 358 +# define SSL_R_MISSING_SUPPORTED_GROUPS_EXTENSION 209 +# define SSL_R_MISSING_TMP_DH_KEY 171 +# define SSL_R_MISSING_TMP_ECDH_KEY 311 +# define SSL_R_MIXED_HANDSHAKE_AND_NON_HANDSHAKE_DATA 293 +# define SSL_R_NOT_ON_RECORD_BOUNDARY 182 +# define SSL_R_NOT_REPLACING_CERTIFICATE 289 +# define SSL_R_NOT_SERVER 284 +# define SSL_R_NO_APPLICATION_PROTOCOL 235 +# define SSL_R_NO_CERTIFICATES_RETURNED 176 +# define SSL_R_NO_CERTIFICATE_ASSIGNED 177 +# define SSL_R_NO_CERTIFICATE_SET 179 +# define SSL_R_NO_CHANGE_FOLLOWING_HRR 214 +# define SSL_R_NO_CIPHERS_AVAILABLE 181 +# define SSL_R_NO_CIPHERS_SPECIFIED 183 +# define SSL_R_NO_CIPHER_MATCH 185 +# define SSL_R_NO_CLIENT_CERT_METHOD 331 +# define SSL_R_NO_COMPRESSION_SPECIFIED 187 +# define SSL_R_NO_COOKIE_CALLBACK_SET 287 +# define SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER 330 +# define SSL_R_NO_METHOD_SPECIFIED 188 +# define SSL_R_NO_PEM_EXTENSIONS 389 +# define SSL_R_NO_PRIVATE_KEY_ASSIGNED 190 +# define SSL_R_NO_PROTOCOLS_AVAILABLE 191 +# define SSL_R_NO_RENEGOTIATION 339 +# define SSL_R_NO_REQUIRED_DIGEST 324 +# define SSL_R_NO_SHARED_CIPHER 193 +# define SSL_R_NO_SHARED_GROUPS 410 +# define SSL_R_NO_SHARED_SIGNATURE_ALGORITHMS 376 +# define SSL_R_NO_SRTP_PROFILES 359 +# define SSL_R_NO_SUITABLE_KEY_SHARE 101 +# define SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM 118 +# define SSL_R_NO_VALID_SCTS 216 +# define SSL_R_NO_VERIFY_COOKIE_CALLBACK 403 +# define SSL_R_NULL_SSL_CTX 195 +# define SSL_R_NULL_SSL_METHOD_PASSED 196 +# define SSL_R_OCSP_CALLBACK_FAILURE 294 +# define SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED 197 +# define SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED 344 +# define SSL_R_OVERFLOW_ERROR 237 +# define SSL_R_PACKET_LENGTH_TOO_LONG 198 +# define SSL_R_PARSE_TLSEXT 227 +# define SSL_R_PATH_TOO_LONG 270 +# define SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE 199 +# define SSL_R_PEM_NAME_BAD_PREFIX 391 +# define SSL_R_PEM_NAME_TOO_SHORT 392 +# define SSL_R_PIPELINE_FAILURE 406 +# define SSL_R_POST_HANDSHAKE_AUTH_ENCODING_ERR 278 +# define SSL_R_PRIVATE_KEY_MISMATCH 288 +# define SSL_R_PROTOCOL_IS_SHUTDOWN 207 +# define SSL_R_PSK_IDENTITY_NOT_FOUND 223 +# define SSL_R_PSK_NO_CLIENT_CB 224 +# define SSL_R_PSK_NO_SERVER_CB 225 +# define SSL_R_READ_BIO_NOT_SET 211 +# define SSL_R_READ_TIMEOUT_EXPIRED 312 +# define SSL_R_RECORD_LENGTH_MISMATCH 213 +# define SSL_R_RECORD_TOO_SMALL 298 +# define SSL_R_RENEGOTIATE_EXT_TOO_LONG 335 +# define SSL_R_RENEGOTIATION_ENCODING_ERR 336 +# define SSL_R_RENEGOTIATION_MISMATCH 337 +# define SSL_R_REQUEST_PENDING 285 +# define SSL_R_REQUEST_SENT 286 +# define SSL_R_REQUIRED_CIPHER_MISSING 215 +# define SSL_R_REQUIRED_COMPRESSION_ALGORITHM_MISSING 342 +# define SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING 345 +# define SSL_R_SCT_VERIFICATION_FAILED 208 +# define SSL_R_SERVERHELLO_TLSEXT 275 +# define SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED 277 +# define SSL_R_SHUTDOWN_WHILE_IN_INIT 407 +# define SSL_R_SIGNATURE_ALGORITHMS_ERROR 360 +# define SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE 220 +# define SSL_R_SRP_A_CALC 361 +# define SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES 362 +# define SSL_R_SRTP_PROTECTION_PROFILE_LIST_TOO_LONG 363 +# define SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE 364 +# define SSL_R_SSL3_EXT_INVALID_MAX_FRAGMENT_LENGTH 232 +# define SSL_R_SSL3_EXT_INVALID_SERVERNAME 319 +# define SSL_R_SSL3_EXT_INVALID_SERVERNAME_TYPE 320 +# define SSL_R_SSL3_SESSION_ID_TOO_LONG 300 +# define SSL_R_SSLV3_ALERT_BAD_CERTIFICATE 1042 +# define SSL_R_SSLV3_ALERT_BAD_RECORD_MAC 1020 +# define SSL_R_SSLV3_ALERT_CERTIFICATE_EXPIRED 1045 +# define SSL_R_SSLV3_ALERT_CERTIFICATE_REVOKED 1044 +# define SSL_R_SSLV3_ALERT_CERTIFICATE_UNKNOWN 1046 +# define SSL_R_SSLV3_ALERT_DECOMPRESSION_FAILURE 1030 +# define SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE 1040 +# define SSL_R_SSLV3_ALERT_ILLEGAL_PARAMETER 1047 +# define SSL_R_SSLV3_ALERT_NO_CERTIFICATE 1041 +# define SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE 1010 +# define SSL_R_SSLV3_ALERT_UNSUPPORTED_CERTIFICATE 1043 +# define SSL_R_SSL_COMMAND_SECTION_EMPTY 117 +# define SSL_R_SSL_COMMAND_SECTION_NOT_FOUND 125 +# define SSL_R_SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION 228 +# define SSL_R_SSL_HANDSHAKE_FAILURE 229 +# define SSL_R_SSL_LIBRARY_HAS_NO_CIPHERS 230 +# define SSL_R_SSL_NEGATIVE_LENGTH 372 +# define SSL_R_SSL_SECTION_EMPTY 126 +# define SSL_R_SSL_SECTION_NOT_FOUND 136 +# define SSL_R_SSL_SESSION_ID_CALLBACK_FAILED 301 +# define SSL_R_SSL_SESSION_ID_CONFLICT 302 +# define SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG 273 +# define SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH 303 +# define SSL_R_SSL_SESSION_ID_TOO_LONG 408 +# define SSL_R_SSL_SESSION_VERSION_MISMATCH 210 +# define SSL_R_STILL_IN_INIT 121 +# define SSL_R_TLSV13_ALERT_CERTIFICATE_REQUIRED 1116 +# define SSL_R_TLSV13_ALERT_MISSING_EXTENSION 1109 +# define SSL_R_TLSV1_ALERT_ACCESS_DENIED 1049 +# define SSL_R_TLSV1_ALERT_DECODE_ERROR 1050 +# define SSL_R_TLSV1_ALERT_DECRYPTION_FAILED 1021 +# define SSL_R_TLSV1_ALERT_DECRYPT_ERROR 1051 +# define SSL_R_TLSV1_ALERT_EXPORT_RESTRICTION 1060 +# define SSL_R_TLSV1_ALERT_INAPPROPRIATE_FALLBACK 1086 +# define SSL_R_TLSV1_ALERT_INSUFFICIENT_SECURITY 1071 +# define SSL_R_TLSV1_ALERT_INTERNAL_ERROR 1080 +# define SSL_R_TLSV1_ALERT_NO_RENEGOTIATION 1100 +# define SSL_R_TLSV1_ALERT_PROTOCOL_VERSION 1070 +# define SSL_R_TLSV1_ALERT_RECORD_OVERFLOW 1022 +# define SSL_R_TLSV1_ALERT_UNKNOWN_CA 1048 +# define SSL_R_TLSV1_ALERT_USER_CANCELLED 1090 +# define SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE 1114 +# define SSL_R_TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE 1113 +# define SSL_R_TLSV1_CERTIFICATE_UNOBTAINABLE 1111 +# define SSL_R_TLSV1_UNRECOGNIZED_NAME 1112 +# define SSL_R_TLSV1_UNSUPPORTED_EXTENSION 1110 +# define SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT 365 +# define SSL_R_TLS_HEARTBEAT_PENDING 366 +# define SSL_R_TLS_ILLEGAL_EXPORTER_LABEL 367 +# define SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST 157 +# define SSL_R_TOO_MANY_KEY_UPDATES 132 +# define SSL_R_TOO_MANY_WARN_ALERTS 409 +# define SSL_R_TOO_MUCH_EARLY_DATA 164 +# define SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS 314 +# define SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS 239 +# define SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES 242 +# define SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES 243 +# define SSL_R_UNEXPECTED_CCS_MESSAGE 262 +# define SSL_R_UNEXPECTED_END_OF_EARLY_DATA 178 +# define SSL_R_UNEXPECTED_MESSAGE 244 +# define SSL_R_UNEXPECTED_RECORD 245 +# define SSL_R_UNINITIALIZED 276 +# define SSL_R_UNKNOWN_ALERT_TYPE 246 +# define SSL_R_UNKNOWN_CERTIFICATE_TYPE 247 +# define SSL_R_UNKNOWN_CIPHER_RETURNED 248 +# define SSL_R_UNKNOWN_CIPHER_TYPE 249 +# define SSL_R_UNKNOWN_CMD_NAME 386 +# define SSL_R_UNKNOWN_COMMAND 139 +# define SSL_R_UNKNOWN_DIGEST 368 +# define SSL_R_UNKNOWN_KEY_EXCHANGE_TYPE 250 +# define SSL_R_UNKNOWN_PKEY_TYPE 251 +# define SSL_R_UNKNOWN_PROTOCOL 252 +# define SSL_R_UNKNOWN_SSL_VERSION 254 +# define SSL_R_UNKNOWN_STATE 255 +# define SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED 338 +# define SSL_R_UNSOLICITED_EXTENSION 217 +# define SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM 257 +# define SSL_R_UNSUPPORTED_ELLIPTIC_CURVE 315 +# define SSL_R_UNSUPPORTED_PROTOCOL 258 +# define SSL_R_UNSUPPORTED_SSL_VERSION 259 +# define SSL_R_UNSUPPORTED_STATUS_TYPE 329 +# define SSL_R_USE_SRTP_NOT_NEGOTIATED 369 +# define SSL_R_VERSION_TOO_HIGH 166 +# define SSL_R_VERSION_TOO_LOW 396 +# define SSL_R_WRONG_CERTIFICATE_TYPE 383 +# define SSL_R_WRONG_CIPHER_RETURNED 261 +# define SSL_R_WRONG_CURVE 378 +# define SSL_R_WRONG_SIGNATURE_LENGTH 264 +# define SSL_R_WRONG_SIGNATURE_SIZE 265 +# define SSL_R_WRONG_SIGNATURE_TYPE 370 +# define SSL_R_WRONG_SSL_VERSION 266 +# define SSL_R_WRONG_VERSION_NUMBER 267 +# define SSL_R_X509_LIB 268 +# define SSL_R_X509_VERIFICATION_SETUP_PROBLEMS 269 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/stack.h b/third_party/openssl/uos/sw_64/include/openssl/stack.h new file mode 100644 index 00000000..cfc07505 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/stack.h @@ -0,0 +1,83 @@ +/* + * Copyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_STACK_H +# define HEADER_STACK_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct stack_st OPENSSL_STACK; /* Use STACK_OF(...) instead */ + +typedef int (*OPENSSL_sk_compfunc)(const void *, const void *); +typedef void (*OPENSSL_sk_freefunc)(void *); +typedef void *(*OPENSSL_sk_copyfunc)(const void *); + +int OPENSSL_sk_num(const OPENSSL_STACK *); +void *OPENSSL_sk_value(const OPENSSL_STACK *, int); + +void *OPENSSL_sk_set(OPENSSL_STACK *st, int i, const void *data); + +OPENSSL_STACK *OPENSSL_sk_new(OPENSSL_sk_compfunc cmp); +OPENSSL_STACK *OPENSSL_sk_new_null(void); +OPENSSL_STACK *OPENSSL_sk_new_reserve(OPENSSL_sk_compfunc c, int n); +int OPENSSL_sk_reserve(OPENSSL_STACK *st, int n); +void OPENSSL_sk_free(OPENSSL_STACK *); +void OPENSSL_sk_pop_free(OPENSSL_STACK *st, void (*func) (void *)); +OPENSSL_STACK *OPENSSL_sk_deep_copy(const OPENSSL_STACK *, + OPENSSL_sk_copyfunc c, + OPENSSL_sk_freefunc f); +int OPENSSL_sk_insert(OPENSSL_STACK *sk, const void *data, int where); +void *OPENSSL_sk_delete(OPENSSL_STACK *st, int loc); +void *OPENSSL_sk_delete_ptr(OPENSSL_STACK *st, const void *p); +int OPENSSL_sk_find(OPENSSL_STACK *st, const void *data); +int OPENSSL_sk_find_ex(OPENSSL_STACK *st, const void *data); +int OPENSSL_sk_push(OPENSSL_STACK *st, const void *data); +int OPENSSL_sk_unshift(OPENSSL_STACK *st, const void *data); +void *OPENSSL_sk_shift(OPENSSL_STACK *st); +void *OPENSSL_sk_pop(OPENSSL_STACK *st); +void OPENSSL_sk_zero(OPENSSL_STACK *st); +OPENSSL_sk_compfunc OPENSSL_sk_set_cmp_func(OPENSSL_STACK *sk, + OPENSSL_sk_compfunc cmp); +OPENSSL_STACK *OPENSSL_sk_dup(const OPENSSL_STACK *st); +void OPENSSL_sk_sort(OPENSSL_STACK *st); +int OPENSSL_sk_is_sorted(const OPENSSL_STACK *st); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define _STACK OPENSSL_STACK +# define sk_num OPENSSL_sk_num +# define sk_value OPENSSL_sk_value +# define sk_set OPENSSL_sk_set +# define sk_new OPENSSL_sk_new +# define sk_new_null OPENSSL_sk_new_null +# define sk_free OPENSSL_sk_free +# define sk_pop_free OPENSSL_sk_pop_free +# define sk_deep_copy OPENSSL_sk_deep_copy +# define sk_insert OPENSSL_sk_insert +# define sk_delete OPENSSL_sk_delete +# define sk_delete_ptr OPENSSL_sk_delete_ptr +# define sk_find OPENSSL_sk_find +# define sk_find_ex OPENSSL_sk_find_ex +# define sk_push OPENSSL_sk_push +# define sk_unshift OPENSSL_sk_unshift +# define sk_shift OPENSSL_sk_shift +# define sk_pop OPENSSL_sk_pop +# define sk_zero OPENSSL_sk_zero +# define sk_set_cmp_func OPENSSL_sk_set_cmp_func +# define sk_dup OPENSSL_sk_dup +# define sk_sort OPENSSL_sk_sort +# define sk_is_sorted OPENSSL_sk_is_sorted +# endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/store.h b/third_party/openssl/uos/sw_64/include/openssl/store.h new file mode 100644 index 00000000..a40a7339 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/store.h @@ -0,0 +1,266 @@ +/* + * Copyright 2016-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OSSL_STORE_H +# define HEADER_OSSL_STORE_H + +# include +# include +# include +# include + +# ifdef __cplusplus +extern "C" { +# endif + +/*- + * The main OSSL_STORE functions. + * ------------------------------ + * + * These allow applications to open a channel to a resource with supported + * data (keys, certs, crls, ...), read the data a piece at a time and decide + * what to do with it, and finally close. + */ + +typedef struct ossl_store_ctx_st OSSL_STORE_CTX; + +/* + * Typedef for the OSSL_STORE_INFO post processing callback. This can be used + * to massage the given OSSL_STORE_INFO, or to drop it entirely (by returning + * NULL). + */ +typedef OSSL_STORE_INFO *(*OSSL_STORE_post_process_info_fn)(OSSL_STORE_INFO *, + void *); + +/* + * Open a channel given a URI. The given UI method will be used any time the + * loader needs extra input, for example when a password or pin is needed, and + * will be passed the same user data every time it's needed in this context. + * + * Returns a context reference which represents the channel to communicate + * through. + */ +OSSL_STORE_CTX *OSSL_STORE_open(const char *uri, const UI_METHOD *ui_method, + void *ui_data, + OSSL_STORE_post_process_info_fn post_process, + void *post_process_data); + +/* + * Control / fine tune the OSSL_STORE channel. |cmd| determines what is to be + * done, and depends on the underlying loader (use OSSL_STORE_get0_scheme to + * determine which loader is used), except for common commands (see below). + * Each command takes different arguments. + */ +int OSSL_STORE_ctrl(OSSL_STORE_CTX *ctx, int cmd, ... /* args */); +int OSSL_STORE_vctrl(OSSL_STORE_CTX *ctx, int cmd, va_list args); + +/* + * Common ctrl commands that different loaders may choose to support. + */ +/* int on = 0 or 1; STORE_ctrl(ctx, STORE_C_USE_SECMEM, &on); */ +# define OSSL_STORE_C_USE_SECMEM 1 +/* Where custom commands start */ +# define OSSL_STORE_C_CUSTOM_START 100 + +/* + * Read one data item (a key, a cert, a CRL) that is supported by the OSSL_STORE + * functionality, given a context. + * Returns a OSSL_STORE_INFO pointer, from which OpenSSL typed data can be + * extracted with OSSL_STORE_INFO_get0_PKEY(), OSSL_STORE_INFO_get0_CERT(), ... + * NULL is returned on error, which may include that the data found at the URI + * can't be figured out for certain or is ambiguous. + */ +OSSL_STORE_INFO *OSSL_STORE_load(OSSL_STORE_CTX *ctx); + +/* + * Check if end of data (end of file) is reached + * Returns 1 on end, 0 otherwise. + */ +int OSSL_STORE_eof(OSSL_STORE_CTX *ctx); + +/* + * Check if an error occurred + * Returns 1 if it did, 0 otherwise. + */ +int OSSL_STORE_error(OSSL_STORE_CTX *ctx); + +/* + * Close the channel + * Returns 1 on success, 0 on error. + */ +int OSSL_STORE_close(OSSL_STORE_CTX *ctx); + + +/*- + * Extracting OpenSSL types from and creating new OSSL_STORE_INFOs + * --------------------------------------------------------------- + */ + +/* + * Types of data that can be ossl_stored in a OSSL_STORE_INFO. + * OSSL_STORE_INFO_NAME is typically found when getting a listing of + * available "files" / "tokens" / what have you. + */ +# define OSSL_STORE_INFO_NAME 1 /* char * */ +# define OSSL_STORE_INFO_PARAMS 2 /* EVP_PKEY * */ +# define OSSL_STORE_INFO_PKEY 3 /* EVP_PKEY * */ +# define OSSL_STORE_INFO_CERT 4 /* X509 * */ +# define OSSL_STORE_INFO_CRL 5 /* X509_CRL * */ + +/* + * Functions to generate OSSL_STORE_INFOs, one function for each type we + * support having in them, as well as a generic constructor. + * + * In all cases, ownership of the object is transferred to the OSSL_STORE_INFO + * and will therefore be freed when the OSSL_STORE_INFO is freed. + */ +OSSL_STORE_INFO *OSSL_STORE_INFO_new_NAME(char *name); +int OSSL_STORE_INFO_set0_NAME_description(OSSL_STORE_INFO *info, char *desc); +OSSL_STORE_INFO *OSSL_STORE_INFO_new_PARAMS(EVP_PKEY *params); +OSSL_STORE_INFO *OSSL_STORE_INFO_new_PKEY(EVP_PKEY *pkey); +OSSL_STORE_INFO *OSSL_STORE_INFO_new_CERT(X509 *x509); +OSSL_STORE_INFO *OSSL_STORE_INFO_new_CRL(X509_CRL *crl); + +/* + * Functions to try to extract data from a OSSL_STORE_INFO. + */ +int OSSL_STORE_INFO_get_type(const OSSL_STORE_INFO *info); +const char *OSSL_STORE_INFO_get0_NAME(const OSSL_STORE_INFO *info); +char *OSSL_STORE_INFO_get1_NAME(const OSSL_STORE_INFO *info); +const char *OSSL_STORE_INFO_get0_NAME_description(const OSSL_STORE_INFO *info); +char *OSSL_STORE_INFO_get1_NAME_description(const OSSL_STORE_INFO *info); +EVP_PKEY *OSSL_STORE_INFO_get0_PARAMS(const OSSL_STORE_INFO *info); +EVP_PKEY *OSSL_STORE_INFO_get1_PARAMS(const OSSL_STORE_INFO *info); +EVP_PKEY *OSSL_STORE_INFO_get0_PKEY(const OSSL_STORE_INFO *info); +EVP_PKEY *OSSL_STORE_INFO_get1_PKEY(const OSSL_STORE_INFO *info); +X509 *OSSL_STORE_INFO_get0_CERT(const OSSL_STORE_INFO *info); +X509 *OSSL_STORE_INFO_get1_CERT(const OSSL_STORE_INFO *info); +X509_CRL *OSSL_STORE_INFO_get0_CRL(const OSSL_STORE_INFO *info); +X509_CRL *OSSL_STORE_INFO_get1_CRL(const OSSL_STORE_INFO *info); + +const char *OSSL_STORE_INFO_type_string(int type); + +/* + * Free the OSSL_STORE_INFO + */ +void OSSL_STORE_INFO_free(OSSL_STORE_INFO *info); + + +/*- + * Functions to construct a search URI from a base URI and search criteria + * ----------------------------------------------------------------------- + */ + +/* OSSL_STORE search types */ +# define OSSL_STORE_SEARCH_BY_NAME 1 /* subject in certs, issuer in CRLs */ +# define OSSL_STORE_SEARCH_BY_ISSUER_SERIAL 2 +# define OSSL_STORE_SEARCH_BY_KEY_FINGERPRINT 3 +# define OSSL_STORE_SEARCH_BY_ALIAS 4 + +/* To check what search types the scheme handler supports */ +int OSSL_STORE_supports_search(OSSL_STORE_CTX *ctx, int search_type); + +/* Search term constructors */ +/* + * The input is considered to be owned by the caller, and must therefore + * remain present throughout the lifetime of the returned OSSL_STORE_SEARCH + */ +OSSL_STORE_SEARCH *OSSL_STORE_SEARCH_by_name(X509_NAME *name); +OSSL_STORE_SEARCH *OSSL_STORE_SEARCH_by_issuer_serial(X509_NAME *name, + const ASN1_INTEGER + *serial); +OSSL_STORE_SEARCH *OSSL_STORE_SEARCH_by_key_fingerprint(const EVP_MD *digest, + const unsigned char + *bytes, size_t len); +OSSL_STORE_SEARCH *OSSL_STORE_SEARCH_by_alias(const char *alias); + +/* Search term destructor */ +void OSSL_STORE_SEARCH_free(OSSL_STORE_SEARCH *search); + +/* Search term accessors */ +int OSSL_STORE_SEARCH_get_type(const OSSL_STORE_SEARCH *criterion); +X509_NAME *OSSL_STORE_SEARCH_get0_name(OSSL_STORE_SEARCH *criterion); +const ASN1_INTEGER *OSSL_STORE_SEARCH_get0_serial(const OSSL_STORE_SEARCH + *criterion); +const unsigned char *OSSL_STORE_SEARCH_get0_bytes(const OSSL_STORE_SEARCH + *criterion, size_t *length); +const char *OSSL_STORE_SEARCH_get0_string(const OSSL_STORE_SEARCH *criterion); +const EVP_MD *OSSL_STORE_SEARCH_get0_digest(const OSSL_STORE_SEARCH *criterion); + +/* + * Add search criterion and expected return type (which can be unspecified) + * to the loading channel. This MUST happen before the first OSSL_STORE_load(). + */ +int OSSL_STORE_expect(OSSL_STORE_CTX *ctx, int expected_type); +int OSSL_STORE_find(OSSL_STORE_CTX *ctx, OSSL_STORE_SEARCH *search); + + +/*- + * Function to register a loader for the given URI scheme. + * ------------------------------------------------------- + * + * The loader receives all the main components of an URI except for the + * scheme. + */ + +typedef struct ossl_store_loader_st OSSL_STORE_LOADER; +OSSL_STORE_LOADER *OSSL_STORE_LOADER_new(ENGINE *e, const char *scheme); +const ENGINE *OSSL_STORE_LOADER_get0_engine(const OSSL_STORE_LOADER *loader); +const char *OSSL_STORE_LOADER_get0_scheme(const OSSL_STORE_LOADER *loader); +/* struct ossl_store_loader_ctx_st is defined differently by each loader */ +typedef struct ossl_store_loader_ctx_st OSSL_STORE_LOADER_CTX; +typedef OSSL_STORE_LOADER_CTX *(*OSSL_STORE_open_fn)(const OSSL_STORE_LOADER + *loader, + const char *uri, + const UI_METHOD *ui_method, + void *ui_data); +int OSSL_STORE_LOADER_set_open(OSSL_STORE_LOADER *loader, + OSSL_STORE_open_fn open_function); +typedef int (*OSSL_STORE_ctrl_fn)(OSSL_STORE_LOADER_CTX *ctx, int cmd, + va_list args); +int OSSL_STORE_LOADER_set_ctrl(OSSL_STORE_LOADER *loader, + OSSL_STORE_ctrl_fn ctrl_function); +typedef int (*OSSL_STORE_expect_fn)(OSSL_STORE_LOADER_CTX *ctx, int expected); +int OSSL_STORE_LOADER_set_expect(OSSL_STORE_LOADER *loader, + OSSL_STORE_expect_fn expect_function); +typedef int (*OSSL_STORE_find_fn)(OSSL_STORE_LOADER_CTX *ctx, + OSSL_STORE_SEARCH *criteria); +int OSSL_STORE_LOADER_set_find(OSSL_STORE_LOADER *loader, + OSSL_STORE_find_fn find_function); +typedef OSSL_STORE_INFO *(*OSSL_STORE_load_fn)(OSSL_STORE_LOADER_CTX *ctx, + const UI_METHOD *ui_method, + void *ui_data); +int OSSL_STORE_LOADER_set_load(OSSL_STORE_LOADER *loader, + OSSL_STORE_load_fn load_function); +typedef int (*OSSL_STORE_eof_fn)(OSSL_STORE_LOADER_CTX *ctx); +int OSSL_STORE_LOADER_set_eof(OSSL_STORE_LOADER *loader, + OSSL_STORE_eof_fn eof_function); +typedef int (*OSSL_STORE_error_fn)(OSSL_STORE_LOADER_CTX *ctx); +int OSSL_STORE_LOADER_set_error(OSSL_STORE_LOADER *loader, + OSSL_STORE_error_fn error_function); +typedef int (*OSSL_STORE_close_fn)(OSSL_STORE_LOADER_CTX *ctx); +int OSSL_STORE_LOADER_set_close(OSSL_STORE_LOADER *loader, + OSSL_STORE_close_fn close_function); +void OSSL_STORE_LOADER_free(OSSL_STORE_LOADER *loader); + +int OSSL_STORE_register_loader(OSSL_STORE_LOADER *loader); +OSSL_STORE_LOADER *OSSL_STORE_unregister_loader(const char *scheme); + +/*- + * Functions to list STORE loaders + * ------------------------------- + */ +int OSSL_STORE_do_all_loaders(void (*do_function) (const OSSL_STORE_LOADER + *loader, void *do_arg), + void *do_arg); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/storeerr.h b/third_party/openssl/uos/sw_64/include/openssl/storeerr.h new file mode 100644 index 00000000..190eab07 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/storeerr.h @@ -0,0 +1,91 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_OSSL_STOREERR_H +# define HEADER_OSSL_STOREERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_OSSL_STORE_strings(void); + +/* + * OSSL_STORE function codes. + */ +# define OSSL_STORE_F_FILE_CTRL 129 +# define OSSL_STORE_F_FILE_FIND 138 +# define OSSL_STORE_F_FILE_GET_PASS 118 +# define OSSL_STORE_F_FILE_LOAD 119 +# define OSSL_STORE_F_FILE_LOAD_TRY_DECODE 124 +# define OSSL_STORE_F_FILE_NAME_TO_URI 126 +# define OSSL_STORE_F_FILE_OPEN 120 +# define OSSL_STORE_F_OSSL_STORE_ATTACH_PEM_BIO 127 +# define OSSL_STORE_F_OSSL_STORE_EXPECT 130 +# define OSSL_STORE_F_OSSL_STORE_FILE_ATTACH_PEM_BIO_INT 128 +# define OSSL_STORE_F_OSSL_STORE_FIND 131 +# define OSSL_STORE_F_OSSL_STORE_GET0_LOADER_INT 100 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_CERT 101 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_CRL 102 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_NAME 103 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_NAME_DESCRIPTION 135 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_PARAMS 104 +# define OSSL_STORE_F_OSSL_STORE_INFO_GET1_PKEY 105 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_CERT 106 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_CRL 107 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_EMBEDDED 123 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_NAME 109 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_PARAMS 110 +# define OSSL_STORE_F_OSSL_STORE_INFO_NEW_PKEY 111 +# define OSSL_STORE_F_OSSL_STORE_INFO_SET0_NAME_DESCRIPTION 134 +# define OSSL_STORE_F_OSSL_STORE_INIT_ONCE 112 +# define OSSL_STORE_F_OSSL_STORE_LOADER_NEW 113 +# define OSSL_STORE_F_OSSL_STORE_OPEN 114 +# define OSSL_STORE_F_OSSL_STORE_OPEN_INT 115 +# define OSSL_STORE_F_OSSL_STORE_REGISTER_LOADER_INT 117 +# define OSSL_STORE_F_OSSL_STORE_SEARCH_BY_ALIAS 132 +# define OSSL_STORE_F_OSSL_STORE_SEARCH_BY_ISSUER_SERIAL 133 +# define OSSL_STORE_F_OSSL_STORE_SEARCH_BY_KEY_FINGERPRINT 136 +# define OSSL_STORE_F_OSSL_STORE_SEARCH_BY_NAME 137 +# define OSSL_STORE_F_OSSL_STORE_UNREGISTER_LOADER_INT 116 +# define OSSL_STORE_F_TRY_DECODE_PARAMS 121 +# define OSSL_STORE_F_TRY_DECODE_PKCS12 122 +# define OSSL_STORE_F_TRY_DECODE_PKCS8ENCRYPTED 125 + +/* + * OSSL_STORE reason codes. + */ +# define OSSL_STORE_R_AMBIGUOUS_CONTENT_TYPE 107 +# define OSSL_STORE_R_BAD_PASSWORD_READ 115 +# define OSSL_STORE_R_ERROR_VERIFYING_PKCS12_MAC 113 +# define OSSL_STORE_R_FINGERPRINT_SIZE_DOES_NOT_MATCH_DIGEST 121 +# define OSSL_STORE_R_INVALID_SCHEME 106 +# define OSSL_STORE_R_IS_NOT_A 112 +# define OSSL_STORE_R_LOADER_INCOMPLETE 116 +# define OSSL_STORE_R_LOADING_STARTED 117 +# define OSSL_STORE_R_NOT_A_CERTIFICATE 100 +# define OSSL_STORE_R_NOT_A_CRL 101 +# define OSSL_STORE_R_NOT_A_KEY 102 +# define OSSL_STORE_R_NOT_A_NAME 103 +# define OSSL_STORE_R_NOT_PARAMETERS 104 +# define OSSL_STORE_R_PASSPHRASE_CALLBACK_ERROR 114 +# define OSSL_STORE_R_PATH_MUST_BE_ABSOLUTE 108 +# define OSSL_STORE_R_SEARCH_ONLY_SUPPORTED_FOR_DIRECTORIES 119 +# define OSSL_STORE_R_UI_PROCESS_INTERRUPTED_OR_CANCELLED 109 +# define OSSL_STORE_R_UNREGISTERED_SCHEME 105 +# define OSSL_STORE_R_UNSUPPORTED_CONTENT_TYPE 110 +# define OSSL_STORE_R_UNSUPPORTED_OPERATION 118 +# define OSSL_STORE_R_UNSUPPORTED_SEARCH_TYPE 120 +# define OSSL_STORE_R_URI_AUTHORITY_UNSUPPORTED 111 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/symhacks.h b/third_party/openssl/uos/sw_64/include/openssl/symhacks.h new file mode 100644 index 00000000..156ea6e4 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/symhacks.h @@ -0,0 +1,37 @@ +/* + * Copyright 1999-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_SYMHACKS_H +# define HEADER_SYMHACKS_H + +# include + +/* Case insensitive linking causes problems.... */ +# if defined(OPENSSL_SYS_VMS) +# undef ERR_load_CRYPTO_strings +# define ERR_load_CRYPTO_strings ERR_load_CRYPTOlib_strings +# undef OCSP_crlID_new +# define OCSP_crlID_new OCSP_crlID2_new + +# undef d2i_ECPARAMETERS +# define d2i_ECPARAMETERS d2i_UC_ECPARAMETERS +# undef i2d_ECPARAMETERS +# define i2d_ECPARAMETERS i2d_UC_ECPARAMETERS +# undef d2i_ECPKPARAMETERS +# define d2i_ECPKPARAMETERS d2i_UC_ECPKPARAMETERS +# undef i2d_ECPKPARAMETERS +# define i2d_ECPKPARAMETERS i2d_UC_ECPKPARAMETERS + +/* This one clashes with CMS_data_create */ +# undef cms_Data_create +# define cms_Data_create priv_cms_Data_create + +# endif + +#endif /* ! defined HEADER_VMS_IDHACKS_H */ diff --git a/third_party/openssl/uos/sw_64/include/openssl/tls1.h b/third_party/openssl/uos/sw_64/include/openssl/tls1.h new file mode 100644 index 00000000..76d9fda4 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/tls1.h @@ -0,0 +1,1237 @@ +/* + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * Copyright 2005 Nokia. All rights reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_TLS1_H +# define HEADER_TLS1_H + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Default security level if not overridden at config time */ +# ifndef OPENSSL_TLS_SECURITY_LEVEL +# define OPENSSL_TLS_SECURITY_LEVEL 1 +# endif + +# define TLS1_VERSION 0x0301 +# define TLS1_1_VERSION 0x0302 +# define TLS1_2_VERSION 0x0303 +# define TLS1_3_VERSION 0x0304 +# define TLS_MAX_VERSION TLS1_3_VERSION + +/* Special value for method supporting multiple versions */ +# define TLS_ANY_VERSION 0x10000 + +# define TLS1_VERSION_MAJOR 0x03 +# define TLS1_VERSION_MINOR 0x01 + +# define TLS1_1_VERSION_MAJOR 0x03 +# define TLS1_1_VERSION_MINOR 0x02 + +# define TLS1_2_VERSION_MAJOR 0x03 +# define TLS1_2_VERSION_MINOR 0x03 + +# define TLS1_get_version(s) \ + ((SSL_version(s) >> 8) == TLS1_VERSION_MAJOR ? SSL_version(s) : 0) + +# define TLS1_get_client_version(s) \ + ((SSL_client_version(s) >> 8) == TLS1_VERSION_MAJOR ? SSL_client_version(s) : 0) + +# define TLS1_AD_DECRYPTION_FAILED 21 +# define TLS1_AD_RECORD_OVERFLOW 22 +# define TLS1_AD_UNKNOWN_CA 48/* fatal */ +# define TLS1_AD_ACCESS_DENIED 49/* fatal */ +# define TLS1_AD_DECODE_ERROR 50/* fatal */ +# define TLS1_AD_DECRYPT_ERROR 51 +# define TLS1_AD_EXPORT_RESTRICTION 60/* fatal */ +# define TLS1_AD_PROTOCOL_VERSION 70/* fatal */ +# define TLS1_AD_INSUFFICIENT_SECURITY 71/* fatal */ +# define TLS1_AD_INTERNAL_ERROR 80/* fatal */ +# define TLS1_AD_INAPPROPRIATE_FALLBACK 86/* fatal */ +# define TLS1_AD_USER_CANCELLED 90 +# define TLS1_AD_NO_RENEGOTIATION 100 +/* TLSv1.3 alerts */ +# define TLS13_AD_MISSING_EXTENSION 109 /* fatal */ +# define TLS13_AD_CERTIFICATE_REQUIRED 116 /* fatal */ +/* codes 110-114 are from RFC3546 */ +# define TLS1_AD_UNSUPPORTED_EXTENSION 110 +# define TLS1_AD_CERTIFICATE_UNOBTAINABLE 111 +# define TLS1_AD_UNRECOGNIZED_NAME 112 +# define TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE 113 +# define TLS1_AD_BAD_CERTIFICATE_HASH_VALUE 114 +# define TLS1_AD_UNKNOWN_PSK_IDENTITY 115/* fatal */ +# define TLS1_AD_NO_APPLICATION_PROTOCOL 120 /* fatal */ + +/* ExtensionType values from RFC3546 / RFC4366 / RFC6066 */ +# define TLSEXT_TYPE_server_name 0 +# define TLSEXT_TYPE_max_fragment_length 1 +# define TLSEXT_TYPE_client_certificate_url 2 +# define TLSEXT_TYPE_trusted_ca_keys 3 +# define TLSEXT_TYPE_truncated_hmac 4 +# define TLSEXT_TYPE_status_request 5 +/* ExtensionType values from RFC4681 */ +# define TLSEXT_TYPE_user_mapping 6 +/* ExtensionType values from RFC5878 */ +# define TLSEXT_TYPE_client_authz 7 +# define TLSEXT_TYPE_server_authz 8 +/* ExtensionType values from RFC6091 */ +# define TLSEXT_TYPE_cert_type 9 + +/* ExtensionType values from RFC4492 */ +/* + * Prior to TLSv1.3 the supported_groups extension was known as + * elliptic_curves + */ +# define TLSEXT_TYPE_supported_groups 10 +# define TLSEXT_TYPE_elliptic_curves TLSEXT_TYPE_supported_groups +# define TLSEXT_TYPE_ec_point_formats 11 + + +/* ExtensionType value from RFC5054 */ +# define TLSEXT_TYPE_srp 12 + +/* ExtensionType values from RFC5246 */ +# define TLSEXT_TYPE_signature_algorithms 13 + +/* ExtensionType value from RFC5764 */ +# define TLSEXT_TYPE_use_srtp 14 + +/* ExtensionType value from RFC5620 */ +# define TLSEXT_TYPE_heartbeat 15 + +/* ExtensionType value from RFC7301 */ +# define TLSEXT_TYPE_application_layer_protocol_negotiation 16 + +/* + * Extension type for Certificate Transparency + * https://tools.ietf.org/html/rfc6962#section-3.3.1 + */ +# define TLSEXT_TYPE_signed_certificate_timestamp 18 + +/* + * ExtensionType value for TLS padding extension. + * http://tools.ietf.org/html/draft-agl-tls-padding + */ +# define TLSEXT_TYPE_padding 21 + +/* ExtensionType value from RFC7366 */ +# define TLSEXT_TYPE_encrypt_then_mac 22 + +/* ExtensionType value from RFC7627 */ +# define TLSEXT_TYPE_extended_master_secret 23 + +/* ExtensionType value from RFC4507 */ +# define TLSEXT_TYPE_session_ticket 35 + +/* As defined for TLS1.3 */ +# define TLSEXT_TYPE_psk 41 +# define TLSEXT_TYPE_early_data 42 +# define TLSEXT_TYPE_supported_versions 43 +# define TLSEXT_TYPE_cookie 44 +# define TLSEXT_TYPE_psk_kex_modes 45 +# define TLSEXT_TYPE_certificate_authorities 47 +# define TLSEXT_TYPE_post_handshake_auth 49 +# define TLSEXT_TYPE_signature_algorithms_cert 50 +# define TLSEXT_TYPE_key_share 51 + +/* Temporary extension type */ +# define TLSEXT_TYPE_renegotiate 0xff01 + +# ifndef OPENSSL_NO_NEXTPROTONEG +/* This is not an IANA defined extension number */ +# define TLSEXT_TYPE_next_proto_neg 13172 +# endif + +/* NameType value from RFC3546 */ +# define TLSEXT_NAMETYPE_host_name 0 +/* status request value from RFC3546 */ +# define TLSEXT_STATUSTYPE_ocsp 1 + +/* ECPointFormat values from RFC4492 */ +# define TLSEXT_ECPOINTFORMAT_first 0 +# define TLSEXT_ECPOINTFORMAT_uncompressed 0 +# define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_prime 1 +# define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_char2 2 +# define TLSEXT_ECPOINTFORMAT_last 2 + +/* Signature and hash algorithms from RFC5246 */ +# define TLSEXT_signature_anonymous 0 +# define TLSEXT_signature_rsa 1 +# define TLSEXT_signature_dsa 2 +# define TLSEXT_signature_ecdsa 3 +# define TLSEXT_signature_gostr34102001 237 +# define TLSEXT_signature_gostr34102012_256 238 +# define TLSEXT_signature_gostr34102012_512 239 + +/* Total number of different signature algorithms */ +# define TLSEXT_signature_num 7 + +# define TLSEXT_hash_none 0 +# define TLSEXT_hash_md5 1 +# define TLSEXT_hash_sha1 2 +# define TLSEXT_hash_sha224 3 +# define TLSEXT_hash_sha256 4 +# define TLSEXT_hash_sha384 5 +# define TLSEXT_hash_sha512 6 +# define TLSEXT_hash_gostr3411 237 +# define TLSEXT_hash_gostr34112012_256 238 +# define TLSEXT_hash_gostr34112012_512 239 + +/* Total number of different digest algorithms */ + +# define TLSEXT_hash_num 10 + +/* Flag set for unrecognised algorithms */ +# define TLSEXT_nid_unknown 0x1000000 + +/* ECC curves */ + +# define TLSEXT_curve_P_256 23 +# define TLSEXT_curve_P_384 24 + +/* OpenSSL value to disable maximum fragment length extension */ +# define TLSEXT_max_fragment_length_DISABLED 0 +/* Allowed values for max fragment length extension */ +# define TLSEXT_max_fragment_length_512 1 +# define TLSEXT_max_fragment_length_1024 2 +# define TLSEXT_max_fragment_length_2048 3 +# define TLSEXT_max_fragment_length_4096 4 + +int SSL_CTX_set_tlsext_max_fragment_length(SSL_CTX *ctx, uint8_t mode); +int SSL_set_tlsext_max_fragment_length(SSL *ssl, uint8_t mode); + +# define TLSEXT_MAXLEN_host_name 255 + +__owur const char *SSL_get_servername(const SSL *s, const int type); +__owur int SSL_get_servername_type(const SSL *s); +/* + * SSL_export_keying_material exports a value derived from the master secret, + * as specified in RFC 5705. It writes |olen| bytes to |out| given a label and + * optional context. (Since a zero length context is allowed, the |use_context| + * flag controls whether a context is included.) It returns 1 on success and + * 0 or -1 otherwise. + */ +__owur int SSL_export_keying_material(SSL *s, unsigned char *out, size_t olen, + const char *label, size_t llen, + const unsigned char *context, + size_t contextlen, int use_context); + +/* + * SSL_export_keying_material_early exports a value derived from the + * early exporter master secret, as specified in + * https://tools.ietf.org/html/draft-ietf-tls-tls13-23. It writes + * |olen| bytes to |out| given a label and optional context. It + * returns 1 on success and 0 otherwise. + */ +__owur int SSL_export_keying_material_early(SSL *s, unsigned char *out, + size_t olen, const char *label, + size_t llen, + const unsigned char *context, + size_t contextlen); + +int SSL_get_peer_signature_type_nid(const SSL *s, int *pnid); +int SSL_get_signature_type_nid(const SSL *s, int *pnid); + +int SSL_get_sigalgs(SSL *s, int idx, + int *psign, int *phash, int *psignandhash, + unsigned char *rsig, unsigned char *rhash); + +int SSL_get_shared_sigalgs(SSL *s, int idx, + int *psign, int *phash, int *psignandhash, + unsigned char *rsig, unsigned char *rhash); + +__owur int SSL_check_chain(SSL *s, X509 *x, EVP_PKEY *pk, STACK_OF(X509) *chain); + +# define SSL_set_tlsext_host_name(s,name) \ + SSL_ctrl(s,SSL_CTRL_SET_TLSEXT_HOSTNAME,TLSEXT_NAMETYPE_host_name,\ + (void *)name) + +# define SSL_set_tlsext_debug_callback(ssl, cb) \ + SSL_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_DEBUG_CB,\ + (void (*)(void))cb) + +# define SSL_set_tlsext_debug_arg(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_DEBUG_ARG,0,arg) + +# define SSL_get_tlsext_status_type(ssl) \ + SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_TYPE,0,NULL) + +# define SSL_set_tlsext_status_type(ssl, type) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_TYPE,type,NULL) + +# define SSL_get_tlsext_status_exts(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_EXTS,0,arg) + +# define SSL_set_tlsext_status_exts(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_EXTS,0,arg) + +# define SSL_get_tlsext_status_ids(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_IDS,0,arg) + +# define SSL_set_tlsext_status_ids(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_IDS,0,arg) + +# define SSL_get_tlsext_status_ocsp_resp(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_OCSP_RESP,0,arg) + +# define SSL_set_tlsext_status_ocsp_resp(ssl, arg, arglen) \ + SSL_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_OCSP_RESP,arglen,arg) + +# define SSL_CTX_set_tlsext_servername_callback(ctx, cb) \ + SSL_CTX_callback_ctrl(ctx,SSL_CTRL_SET_TLSEXT_SERVERNAME_CB,\ + (void (*)(void))cb) + +# define SSL_TLSEXT_ERR_OK 0 +# define SSL_TLSEXT_ERR_ALERT_WARNING 1 +# define SSL_TLSEXT_ERR_ALERT_FATAL 2 +# define SSL_TLSEXT_ERR_NOACK 3 + +# define SSL_CTX_set_tlsext_servername_arg(ctx, arg) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TLSEXT_SERVERNAME_ARG,0,arg) + +# define SSL_CTX_get_tlsext_ticket_keys(ctx, keys, keylen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_GET_TLSEXT_TICKET_KEYS,keylen,keys) +# define SSL_CTX_set_tlsext_ticket_keys(ctx, keys, keylen) \ + SSL_CTX_ctrl(ctx,SSL_CTRL_SET_TLSEXT_TICKET_KEYS,keylen,keys) + +# define SSL_CTX_get_tlsext_status_cb(ssl, cb) \ + SSL_CTX_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_CB,0,(void *)cb) +# define SSL_CTX_set_tlsext_status_cb(ssl, cb) \ + SSL_CTX_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB,\ + (void (*)(void))cb) + +# define SSL_CTX_get_tlsext_status_arg(ssl, arg) \ + SSL_CTX_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_CB_ARG,0,arg) +# define SSL_CTX_set_tlsext_status_arg(ssl, arg) \ + SSL_CTX_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB_ARG,0,arg) + +# define SSL_CTX_set_tlsext_status_type(ssl, type) \ + SSL_CTX_ctrl(ssl,SSL_CTRL_SET_TLSEXT_STATUS_REQ_TYPE,type,NULL) + +# define SSL_CTX_get_tlsext_status_type(ssl) \ + SSL_CTX_ctrl(ssl,SSL_CTRL_GET_TLSEXT_STATUS_REQ_TYPE,0,NULL) + +# define SSL_CTX_set_tlsext_ticket_key_cb(ssl, cb) \ + SSL_CTX_callback_ctrl(ssl,SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB,\ + (void (*)(void))cb) + +# ifndef OPENSSL_NO_HEARTBEATS +# define SSL_DTLSEXT_HB_ENABLED 0x01 +# define SSL_DTLSEXT_HB_DONT_SEND_REQUESTS 0x02 +# define SSL_DTLSEXT_HB_DONT_RECV_REQUESTS 0x04 +# define SSL_get_dtlsext_heartbeat_pending(ssl) \ + SSL_ctrl(ssl,SSL_CTRL_GET_DTLS_EXT_HEARTBEAT_PENDING,0,NULL) +# define SSL_set_dtlsext_heartbeat_no_requests(ssl, arg) \ + SSL_ctrl(ssl,SSL_CTRL_SET_DTLS_EXT_HEARTBEAT_NO_REQUESTS,arg,NULL) + +# if OPENSSL_API_COMPAT < 0x10100000L +# define SSL_CTRL_TLS_EXT_SEND_HEARTBEAT \ + SSL_CTRL_DTLS_EXT_SEND_HEARTBEAT +# define SSL_CTRL_GET_TLS_EXT_HEARTBEAT_PENDING \ + SSL_CTRL_GET_DTLS_EXT_HEARTBEAT_PENDING +# define SSL_CTRL_SET_TLS_EXT_HEARTBEAT_NO_REQUESTS \ + SSL_CTRL_SET_DTLS_EXT_HEARTBEAT_NO_REQUESTS +# define SSL_TLSEXT_HB_ENABLED \ + SSL_DTLSEXT_HB_ENABLED +# define SSL_TLSEXT_HB_DONT_SEND_REQUESTS \ + SSL_DTLSEXT_HB_DONT_SEND_REQUESTS +# define SSL_TLSEXT_HB_DONT_RECV_REQUESTS \ + SSL_DTLSEXT_HB_DONT_RECV_REQUESTS +# define SSL_get_tlsext_heartbeat_pending(ssl) \ + SSL_get_dtlsext_heartbeat_pending(ssl) +# define SSL_set_tlsext_heartbeat_no_requests(ssl, arg) \ + SSL_set_dtlsext_heartbeat_no_requests(ssl,arg) +# endif +# endif + +/* PSK ciphersuites from 4279 */ +# define TLS1_CK_PSK_WITH_RC4_128_SHA 0x0300008A +# define TLS1_CK_PSK_WITH_3DES_EDE_CBC_SHA 0x0300008B +# define TLS1_CK_PSK_WITH_AES_128_CBC_SHA 0x0300008C +# define TLS1_CK_PSK_WITH_AES_256_CBC_SHA 0x0300008D +# define TLS1_CK_DHE_PSK_WITH_RC4_128_SHA 0x0300008E +# define TLS1_CK_DHE_PSK_WITH_3DES_EDE_CBC_SHA 0x0300008F +# define TLS1_CK_DHE_PSK_WITH_AES_128_CBC_SHA 0x03000090 +# define TLS1_CK_DHE_PSK_WITH_AES_256_CBC_SHA 0x03000091 +# define TLS1_CK_RSA_PSK_WITH_RC4_128_SHA 0x03000092 +# define TLS1_CK_RSA_PSK_WITH_3DES_EDE_CBC_SHA 0x03000093 +# define TLS1_CK_RSA_PSK_WITH_AES_128_CBC_SHA 0x03000094 +# define TLS1_CK_RSA_PSK_WITH_AES_256_CBC_SHA 0x03000095 + +/* PSK ciphersuites from 5487 */ +# define TLS1_CK_PSK_WITH_AES_128_GCM_SHA256 0x030000A8 +# define TLS1_CK_PSK_WITH_AES_256_GCM_SHA384 0x030000A9 +# define TLS1_CK_DHE_PSK_WITH_AES_128_GCM_SHA256 0x030000AA +# define TLS1_CK_DHE_PSK_WITH_AES_256_GCM_SHA384 0x030000AB +# define TLS1_CK_RSA_PSK_WITH_AES_128_GCM_SHA256 0x030000AC +# define TLS1_CK_RSA_PSK_WITH_AES_256_GCM_SHA384 0x030000AD +# define TLS1_CK_PSK_WITH_AES_128_CBC_SHA256 0x030000AE +# define TLS1_CK_PSK_WITH_AES_256_CBC_SHA384 0x030000AF +# define TLS1_CK_PSK_WITH_NULL_SHA256 0x030000B0 +# define TLS1_CK_PSK_WITH_NULL_SHA384 0x030000B1 +# define TLS1_CK_DHE_PSK_WITH_AES_128_CBC_SHA256 0x030000B2 +# define TLS1_CK_DHE_PSK_WITH_AES_256_CBC_SHA384 0x030000B3 +# define TLS1_CK_DHE_PSK_WITH_NULL_SHA256 0x030000B4 +# define TLS1_CK_DHE_PSK_WITH_NULL_SHA384 0x030000B5 +# define TLS1_CK_RSA_PSK_WITH_AES_128_CBC_SHA256 0x030000B6 +# define TLS1_CK_RSA_PSK_WITH_AES_256_CBC_SHA384 0x030000B7 +# define TLS1_CK_RSA_PSK_WITH_NULL_SHA256 0x030000B8 +# define TLS1_CK_RSA_PSK_WITH_NULL_SHA384 0x030000B9 + +/* NULL PSK ciphersuites from RFC4785 */ +# define TLS1_CK_PSK_WITH_NULL_SHA 0x0300002C +# define TLS1_CK_DHE_PSK_WITH_NULL_SHA 0x0300002D +# define TLS1_CK_RSA_PSK_WITH_NULL_SHA 0x0300002E + +/* AES ciphersuites from RFC3268 */ +# define TLS1_CK_RSA_WITH_AES_128_SHA 0x0300002F +# define TLS1_CK_DH_DSS_WITH_AES_128_SHA 0x03000030 +# define TLS1_CK_DH_RSA_WITH_AES_128_SHA 0x03000031 +# define TLS1_CK_DHE_DSS_WITH_AES_128_SHA 0x03000032 +# define TLS1_CK_DHE_RSA_WITH_AES_128_SHA 0x03000033 +# define TLS1_CK_ADH_WITH_AES_128_SHA 0x03000034 +# define TLS1_CK_RSA_WITH_AES_256_SHA 0x03000035 +# define TLS1_CK_DH_DSS_WITH_AES_256_SHA 0x03000036 +# define TLS1_CK_DH_RSA_WITH_AES_256_SHA 0x03000037 +# define TLS1_CK_DHE_DSS_WITH_AES_256_SHA 0x03000038 +# define TLS1_CK_DHE_RSA_WITH_AES_256_SHA 0x03000039 +# define TLS1_CK_ADH_WITH_AES_256_SHA 0x0300003A + +/* TLS v1.2 ciphersuites */ +# define TLS1_CK_RSA_WITH_NULL_SHA256 0x0300003B +# define TLS1_CK_RSA_WITH_AES_128_SHA256 0x0300003C +# define TLS1_CK_RSA_WITH_AES_256_SHA256 0x0300003D +# define TLS1_CK_DH_DSS_WITH_AES_128_SHA256 0x0300003E +# define TLS1_CK_DH_RSA_WITH_AES_128_SHA256 0x0300003F +# define TLS1_CK_DHE_DSS_WITH_AES_128_SHA256 0x03000040 + +/* Camellia ciphersuites from RFC4132 */ +# define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000041 +# define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA 0x03000042 +# define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000043 +# define TLS1_CK_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA 0x03000044 +# define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000045 +# define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA 0x03000046 + +/* TLS v1.2 ciphersuites */ +# define TLS1_CK_DHE_RSA_WITH_AES_128_SHA256 0x03000067 +# define TLS1_CK_DH_DSS_WITH_AES_256_SHA256 0x03000068 +# define TLS1_CK_DH_RSA_WITH_AES_256_SHA256 0x03000069 +# define TLS1_CK_DHE_DSS_WITH_AES_256_SHA256 0x0300006A +# define TLS1_CK_DHE_RSA_WITH_AES_256_SHA256 0x0300006B +# define TLS1_CK_ADH_WITH_AES_128_SHA256 0x0300006C +# define TLS1_CK_ADH_WITH_AES_256_SHA256 0x0300006D + +/* Camellia ciphersuites from RFC4132 */ +# define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000084 +# define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA 0x03000085 +# define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000086 +# define TLS1_CK_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA 0x03000087 +# define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000088 +# define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA 0x03000089 + +/* SEED ciphersuites from RFC4162 */ +# define TLS1_CK_RSA_WITH_SEED_SHA 0x03000096 +# define TLS1_CK_DH_DSS_WITH_SEED_SHA 0x03000097 +# define TLS1_CK_DH_RSA_WITH_SEED_SHA 0x03000098 +# define TLS1_CK_DHE_DSS_WITH_SEED_SHA 0x03000099 +# define TLS1_CK_DHE_RSA_WITH_SEED_SHA 0x0300009A +# define TLS1_CK_ADH_WITH_SEED_SHA 0x0300009B + +/* TLS v1.2 GCM ciphersuites from RFC5288 */ +# define TLS1_CK_RSA_WITH_AES_128_GCM_SHA256 0x0300009C +# define TLS1_CK_RSA_WITH_AES_256_GCM_SHA384 0x0300009D +# define TLS1_CK_DHE_RSA_WITH_AES_128_GCM_SHA256 0x0300009E +# define TLS1_CK_DHE_RSA_WITH_AES_256_GCM_SHA384 0x0300009F +# define TLS1_CK_DH_RSA_WITH_AES_128_GCM_SHA256 0x030000A0 +# define TLS1_CK_DH_RSA_WITH_AES_256_GCM_SHA384 0x030000A1 +# define TLS1_CK_DHE_DSS_WITH_AES_128_GCM_SHA256 0x030000A2 +# define TLS1_CK_DHE_DSS_WITH_AES_256_GCM_SHA384 0x030000A3 +# define TLS1_CK_DH_DSS_WITH_AES_128_GCM_SHA256 0x030000A4 +# define TLS1_CK_DH_DSS_WITH_AES_256_GCM_SHA384 0x030000A5 +# define TLS1_CK_ADH_WITH_AES_128_GCM_SHA256 0x030000A6 +# define TLS1_CK_ADH_WITH_AES_256_GCM_SHA384 0x030000A7 + +/* CCM ciphersuites from RFC6655 */ +# define TLS1_CK_RSA_WITH_AES_128_CCM 0x0300C09C +# define TLS1_CK_RSA_WITH_AES_256_CCM 0x0300C09D +# define TLS1_CK_DHE_RSA_WITH_AES_128_CCM 0x0300C09E +# define TLS1_CK_DHE_RSA_WITH_AES_256_CCM 0x0300C09F +# define TLS1_CK_RSA_WITH_AES_128_CCM_8 0x0300C0A0 +# define TLS1_CK_RSA_WITH_AES_256_CCM_8 0x0300C0A1 +# define TLS1_CK_DHE_RSA_WITH_AES_128_CCM_8 0x0300C0A2 +# define TLS1_CK_DHE_RSA_WITH_AES_256_CCM_8 0x0300C0A3 +# define TLS1_CK_PSK_WITH_AES_128_CCM 0x0300C0A4 +# define TLS1_CK_PSK_WITH_AES_256_CCM 0x0300C0A5 +# define TLS1_CK_DHE_PSK_WITH_AES_128_CCM 0x0300C0A6 +# define TLS1_CK_DHE_PSK_WITH_AES_256_CCM 0x0300C0A7 +# define TLS1_CK_PSK_WITH_AES_128_CCM_8 0x0300C0A8 +# define TLS1_CK_PSK_WITH_AES_256_CCM_8 0x0300C0A9 +# define TLS1_CK_DHE_PSK_WITH_AES_128_CCM_8 0x0300C0AA +# define TLS1_CK_DHE_PSK_WITH_AES_256_CCM_8 0x0300C0AB + +/* CCM ciphersuites from RFC7251 */ +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CCM 0x0300C0AC +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CCM 0x0300C0AD +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CCM_8 0x0300C0AE +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CCM_8 0x0300C0AF + +/* TLS 1.2 Camellia SHA-256 ciphersuites from RFC5932 */ +# define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA256 0x030000BA +# define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 0x030000BB +# define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 0x030000BC +# define TLS1_CK_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 0x030000BD +# define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 0x030000BE +# define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA256 0x030000BF + +# define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA256 0x030000C0 +# define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 0x030000C1 +# define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 0x030000C2 +# define TLS1_CK_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 0x030000C3 +# define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 0x030000C4 +# define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA256 0x030000C5 + +/* ECC ciphersuites from RFC4492 */ +# define TLS1_CK_ECDH_ECDSA_WITH_NULL_SHA 0x0300C001 +# define TLS1_CK_ECDH_ECDSA_WITH_RC4_128_SHA 0x0300C002 +# define TLS1_CK_ECDH_ECDSA_WITH_DES_192_CBC3_SHA 0x0300C003 +# define TLS1_CK_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0x0300C004 +# define TLS1_CK_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0x0300C005 + +# define TLS1_CK_ECDHE_ECDSA_WITH_NULL_SHA 0x0300C006 +# define TLS1_CK_ECDHE_ECDSA_WITH_RC4_128_SHA 0x0300C007 +# define TLS1_CK_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA 0x0300C008 +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0x0300C009 +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0x0300C00A + +# define TLS1_CK_ECDH_RSA_WITH_NULL_SHA 0x0300C00B +# define TLS1_CK_ECDH_RSA_WITH_RC4_128_SHA 0x0300C00C +# define TLS1_CK_ECDH_RSA_WITH_DES_192_CBC3_SHA 0x0300C00D +# define TLS1_CK_ECDH_RSA_WITH_AES_128_CBC_SHA 0x0300C00E +# define TLS1_CK_ECDH_RSA_WITH_AES_256_CBC_SHA 0x0300C00F + +# define TLS1_CK_ECDHE_RSA_WITH_NULL_SHA 0x0300C010 +# define TLS1_CK_ECDHE_RSA_WITH_RC4_128_SHA 0x0300C011 +# define TLS1_CK_ECDHE_RSA_WITH_DES_192_CBC3_SHA 0x0300C012 +# define TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA 0x0300C013 +# define TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA 0x0300C014 + +# define TLS1_CK_ECDH_anon_WITH_NULL_SHA 0x0300C015 +# define TLS1_CK_ECDH_anon_WITH_RC4_128_SHA 0x0300C016 +# define TLS1_CK_ECDH_anon_WITH_DES_192_CBC3_SHA 0x0300C017 +# define TLS1_CK_ECDH_anon_WITH_AES_128_CBC_SHA 0x0300C018 +# define TLS1_CK_ECDH_anon_WITH_AES_256_CBC_SHA 0x0300C019 + +/* SRP ciphersuites from RFC 5054 */ +# define TLS1_CK_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0x0300C01A +# define TLS1_CK_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0x0300C01B +# define TLS1_CK_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0x0300C01C +# define TLS1_CK_SRP_SHA_WITH_AES_128_CBC_SHA 0x0300C01D +# define TLS1_CK_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0x0300C01E +# define TLS1_CK_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0x0300C01F +# define TLS1_CK_SRP_SHA_WITH_AES_256_CBC_SHA 0x0300C020 +# define TLS1_CK_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0x0300C021 +# define TLS1_CK_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0x0300C022 + +/* ECDH HMAC based ciphersuites from RFC5289 */ +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256 0x0300C023 +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384 0x0300C024 +# define TLS1_CK_ECDH_ECDSA_WITH_AES_128_SHA256 0x0300C025 +# define TLS1_CK_ECDH_ECDSA_WITH_AES_256_SHA384 0x0300C026 +# define TLS1_CK_ECDHE_RSA_WITH_AES_128_SHA256 0x0300C027 +# define TLS1_CK_ECDHE_RSA_WITH_AES_256_SHA384 0x0300C028 +# define TLS1_CK_ECDH_RSA_WITH_AES_128_SHA256 0x0300C029 +# define TLS1_CK_ECDH_RSA_WITH_AES_256_SHA384 0x0300C02A + +/* ECDH GCM based ciphersuites from RFC5289 */ +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02B +# define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0x0300C02C +# define TLS1_CK_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02D +# define TLS1_CK_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0x0300C02E +# define TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0x0300C02F +# define TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0x0300C030 +# define TLS1_CK_ECDH_RSA_WITH_AES_128_GCM_SHA256 0x0300C031 +# define TLS1_CK_ECDH_RSA_WITH_AES_256_GCM_SHA384 0x0300C032 + +/* ECDHE PSK ciphersuites from RFC5489 */ +# define TLS1_CK_ECDHE_PSK_WITH_RC4_128_SHA 0x0300C033 +# define TLS1_CK_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA 0x0300C034 +# define TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA 0x0300C035 +# define TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA 0x0300C036 + +# define TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA256 0x0300C037 +# define TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA384 0x0300C038 + +/* NULL PSK ciphersuites from RFC4785 */ +# define TLS1_CK_ECDHE_PSK_WITH_NULL_SHA 0x0300C039 +# define TLS1_CK_ECDHE_PSK_WITH_NULL_SHA256 0x0300C03A +# define TLS1_CK_ECDHE_PSK_WITH_NULL_SHA384 0x0300C03B + +/* Camellia-CBC ciphersuites from RFC6367 */ +# define TLS1_CK_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 0x0300C072 +# define TLS1_CK_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 0x0300C073 +# define TLS1_CK_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 0x0300C074 +# define TLS1_CK_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 0x0300C075 +# define TLS1_CK_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 0x0300C076 +# define TLS1_CK_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 0x0300C077 +# define TLS1_CK_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 0x0300C078 +# define TLS1_CK_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 0x0300C079 + +# define TLS1_CK_PSK_WITH_CAMELLIA_128_CBC_SHA256 0x0300C094 +# define TLS1_CK_PSK_WITH_CAMELLIA_256_CBC_SHA384 0x0300C095 +# define TLS1_CK_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 0x0300C096 +# define TLS1_CK_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 0x0300C097 +# define TLS1_CK_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 0x0300C098 +# define TLS1_CK_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 0x0300C099 +# define TLS1_CK_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 0x0300C09A +# define TLS1_CK_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 0x0300C09B + +/* draft-ietf-tls-chacha20-poly1305-03 */ +# define TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305 0x0300CCA8 +# define TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 0x0300CCA9 +# define TLS1_CK_DHE_RSA_WITH_CHACHA20_POLY1305 0x0300CCAA +# define TLS1_CK_PSK_WITH_CHACHA20_POLY1305 0x0300CCAB +# define TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305 0x0300CCAC +# define TLS1_CK_DHE_PSK_WITH_CHACHA20_POLY1305 0x0300CCAD +# define TLS1_CK_RSA_PSK_WITH_CHACHA20_POLY1305 0x0300CCAE + +/* TLS v1.3 ciphersuites */ +# define TLS1_3_CK_AES_128_GCM_SHA256 0x03001301 +# define TLS1_3_CK_AES_256_GCM_SHA384 0x03001302 +# define TLS1_3_CK_CHACHA20_POLY1305_SHA256 0x03001303 +# define TLS1_3_CK_AES_128_CCM_SHA256 0x03001304 +# define TLS1_3_CK_AES_128_CCM_8_SHA256 0x03001305 + +/* Aria ciphersuites from RFC6209 */ +# define TLS1_CK_RSA_WITH_ARIA_128_GCM_SHA256 0x0300C050 +# define TLS1_CK_RSA_WITH_ARIA_256_GCM_SHA384 0x0300C051 +# define TLS1_CK_DHE_RSA_WITH_ARIA_128_GCM_SHA256 0x0300C052 +# define TLS1_CK_DHE_RSA_WITH_ARIA_256_GCM_SHA384 0x0300C053 +# define TLS1_CK_DH_RSA_WITH_ARIA_128_GCM_SHA256 0x0300C054 +# define TLS1_CK_DH_RSA_WITH_ARIA_256_GCM_SHA384 0x0300C055 +# define TLS1_CK_DHE_DSS_WITH_ARIA_128_GCM_SHA256 0x0300C056 +# define TLS1_CK_DHE_DSS_WITH_ARIA_256_GCM_SHA384 0x0300C057 +# define TLS1_CK_DH_DSS_WITH_ARIA_128_GCM_SHA256 0x0300C058 +# define TLS1_CK_DH_DSS_WITH_ARIA_256_GCM_SHA384 0x0300C059 +# define TLS1_CK_DH_anon_WITH_ARIA_128_GCM_SHA256 0x0300C05A +# define TLS1_CK_DH_anon_WITH_ARIA_256_GCM_SHA384 0x0300C05B +# define TLS1_CK_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 0x0300C05C +# define TLS1_CK_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 0x0300C05D +# define TLS1_CK_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 0x0300C05E +# define TLS1_CK_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 0x0300C05F +# define TLS1_CK_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 0x0300C060 +# define TLS1_CK_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 0x0300C061 +# define TLS1_CK_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 0x0300C062 +# define TLS1_CK_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 0x0300C063 +# define TLS1_CK_PSK_WITH_ARIA_128_GCM_SHA256 0x0300C06A +# define TLS1_CK_PSK_WITH_ARIA_256_GCM_SHA384 0x0300C06B +# define TLS1_CK_DHE_PSK_WITH_ARIA_128_GCM_SHA256 0x0300C06C +# define TLS1_CK_DHE_PSK_WITH_ARIA_256_GCM_SHA384 0x0300C06D +# define TLS1_CK_RSA_PSK_WITH_ARIA_128_GCM_SHA256 0x0300C06E +# define TLS1_CK_RSA_PSK_WITH_ARIA_256_GCM_SHA384 0x0300C06F + +/* a bundle of RFC standard cipher names, generated from ssl3_ciphers[] */ +# define TLS1_RFC_RSA_WITH_AES_128_SHA "TLS_RSA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_DHE_DSS_WITH_AES_128_SHA "TLS_DHE_DSS_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_AES_128_SHA "TLS_DHE_RSA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_ADH_WITH_AES_128_SHA "TLS_DH_anon_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_RSA_WITH_AES_256_SHA "TLS_RSA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_DHE_DSS_WITH_AES_256_SHA "TLS_DHE_DSS_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_AES_256_SHA "TLS_DHE_RSA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_ADH_WITH_AES_256_SHA "TLS_DH_anon_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_RSA_WITH_NULL_SHA256 "TLS_RSA_WITH_NULL_SHA256" +# define TLS1_RFC_RSA_WITH_AES_128_SHA256 "TLS_RSA_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_RSA_WITH_AES_256_SHA256 "TLS_RSA_WITH_AES_256_CBC_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_AES_128_SHA256 "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_AES_128_SHA256 "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_AES_256_SHA256 "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_AES_256_SHA256 "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256" +# define TLS1_RFC_ADH_WITH_AES_128_SHA256 "TLS_DH_anon_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_ADH_WITH_AES_256_SHA256 "TLS_DH_anon_WITH_AES_256_CBC_SHA256" +# define TLS1_RFC_RSA_WITH_AES_128_GCM_SHA256 "TLS_RSA_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_RSA_WITH_AES_256_GCM_SHA384 "TLS_RSA_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_DHE_RSA_WITH_AES_128_GCM_SHA256 "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_AES_256_GCM_SHA384 "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_DHE_DSS_WITH_AES_128_GCM_SHA256 "TLS_DHE_DSS_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_AES_256_GCM_SHA384 "TLS_DHE_DSS_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_ADH_WITH_AES_128_GCM_SHA256 "TLS_DH_anon_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_ADH_WITH_AES_256_GCM_SHA384 "TLS_DH_anon_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_RSA_WITH_AES_128_CCM "TLS_RSA_WITH_AES_128_CCM" +# define TLS1_RFC_RSA_WITH_AES_256_CCM "TLS_RSA_WITH_AES_256_CCM" +# define TLS1_RFC_DHE_RSA_WITH_AES_128_CCM "TLS_DHE_RSA_WITH_AES_128_CCM" +# define TLS1_RFC_DHE_RSA_WITH_AES_256_CCM "TLS_DHE_RSA_WITH_AES_256_CCM" +# define TLS1_RFC_RSA_WITH_AES_128_CCM_8 "TLS_RSA_WITH_AES_128_CCM_8" +# define TLS1_RFC_RSA_WITH_AES_256_CCM_8 "TLS_RSA_WITH_AES_256_CCM_8" +# define TLS1_RFC_DHE_RSA_WITH_AES_128_CCM_8 "TLS_DHE_RSA_WITH_AES_128_CCM_8" +# define TLS1_RFC_DHE_RSA_WITH_AES_256_CCM_8 "TLS_DHE_RSA_WITH_AES_256_CCM_8" +# define TLS1_RFC_PSK_WITH_AES_128_CCM "TLS_PSK_WITH_AES_128_CCM" +# define TLS1_RFC_PSK_WITH_AES_256_CCM "TLS_PSK_WITH_AES_256_CCM" +# define TLS1_RFC_DHE_PSK_WITH_AES_128_CCM "TLS_DHE_PSK_WITH_AES_128_CCM" +# define TLS1_RFC_DHE_PSK_WITH_AES_256_CCM "TLS_DHE_PSK_WITH_AES_256_CCM" +# define TLS1_RFC_PSK_WITH_AES_128_CCM_8 "TLS_PSK_WITH_AES_128_CCM_8" +# define TLS1_RFC_PSK_WITH_AES_256_CCM_8 "TLS_PSK_WITH_AES_256_CCM_8" +# define TLS1_RFC_DHE_PSK_WITH_AES_128_CCM_8 "TLS_PSK_DHE_WITH_AES_128_CCM_8" +# define TLS1_RFC_DHE_PSK_WITH_AES_256_CCM_8 "TLS_PSK_DHE_WITH_AES_256_CCM_8" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_128_CCM "TLS_ECDHE_ECDSA_WITH_AES_128_CCM" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_256_CCM "TLS_ECDHE_ECDSA_WITH_AES_256_CCM" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_128_CCM_8 "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_256_CCM_8 "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8" +# define TLS1_3_RFC_AES_128_GCM_SHA256 "TLS_AES_128_GCM_SHA256" +# define TLS1_3_RFC_AES_256_GCM_SHA384 "TLS_AES_256_GCM_SHA384" +# define TLS1_3_RFC_CHACHA20_POLY1305_SHA256 "TLS_CHACHA20_POLY1305_SHA256" +# define TLS1_3_RFC_AES_128_CCM_SHA256 "TLS_AES_128_CCM_SHA256" +# define TLS1_3_RFC_AES_128_CCM_8_SHA256 "TLS_AES_128_CCM_8_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_NULL_SHA "TLS_ECDHE_ECDSA_WITH_NULL_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_128_CBC_SHA "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_256_CBC_SHA "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_ECDHE_RSA_WITH_NULL_SHA "TLS_ECDHE_RSA_WITH_NULL_SHA" +# define TLS1_RFC_ECDHE_RSA_WITH_DES_192_CBC3_SHA "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_128_CBC_SHA "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_256_CBC_SHA "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_ECDH_anon_WITH_NULL_SHA "TLS_ECDH_anon_WITH_NULL_SHA" +# define TLS1_RFC_ECDH_anon_WITH_DES_192_CBC3_SHA "TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_ECDH_anon_WITH_AES_128_CBC_SHA "TLS_ECDH_anon_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_ECDH_anon_WITH_AES_256_CBC_SHA "TLS_ECDH_anon_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_128_SHA256 "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_256_SHA384 "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_128_SHA256 "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_256_SHA384 "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_128_GCM_SHA256 "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_ECDHE_RSA_WITH_AES_256_GCM_SHA384 "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_PSK_WITH_NULL_SHA "TLS_PSK_WITH_NULL_SHA" +# define TLS1_RFC_DHE_PSK_WITH_NULL_SHA "TLS_DHE_PSK_WITH_NULL_SHA" +# define TLS1_RFC_RSA_PSK_WITH_NULL_SHA "TLS_RSA_PSK_WITH_NULL_SHA" +# define TLS1_RFC_PSK_WITH_3DES_EDE_CBC_SHA "TLS_PSK_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_PSK_WITH_AES_128_CBC_SHA "TLS_PSK_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_PSK_WITH_AES_256_CBC_SHA "TLS_PSK_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_DHE_PSK_WITH_3DES_EDE_CBC_SHA "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_DHE_PSK_WITH_AES_128_CBC_SHA "TLS_DHE_PSK_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_DHE_PSK_WITH_AES_256_CBC_SHA "TLS_DHE_PSK_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_RSA_PSK_WITH_3DES_EDE_CBC_SHA "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_RSA_PSK_WITH_AES_128_CBC_SHA "TLS_RSA_PSK_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_RSA_PSK_WITH_AES_256_CBC_SHA "TLS_RSA_PSK_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_PSK_WITH_AES_128_GCM_SHA256 "TLS_PSK_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_PSK_WITH_AES_256_GCM_SHA384 "TLS_PSK_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_DHE_PSK_WITH_AES_128_GCM_SHA256 "TLS_DHE_PSK_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_AES_256_GCM_SHA384 "TLS_DHE_PSK_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_RSA_PSK_WITH_AES_128_GCM_SHA256 "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_AES_256_GCM_SHA384 "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384" +# define TLS1_RFC_PSK_WITH_AES_128_CBC_SHA256 "TLS_PSK_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_PSK_WITH_AES_256_CBC_SHA384 "TLS_PSK_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_PSK_WITH_NULL_SHA256 "TLS_PSK_WITH_NULL_SHA256" +# define TLS1_RFC_PSK_WITH_NULL_SHA384 "TLS_PSK_WITH_NULL_SHA384" +# define TLS1_RFC_DHE_PSK_WITH_AES_128_CBC_SHA256 "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_AES_256_CBC_SHA384 "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_DHE_PSK_WITH_NULL_SHA256 "TLS_DHE_PSK_WITH_NULL_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_NULL_SHA384 "TLS_DHE_PSK_WITH_NULL_SHA384" +# define TLS1_RFC_RSA_PSK_WITH_AES_128_CBC_SHA256 "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_AES_256_CBC_SHA384 "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_RSA_PSK_WITH_NULL_SHA256 "TLS_RSA_PSK_WITH_NULL_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_NULL_SHA384 "TLS_RSA_PSK_WITH_NULL_SHA384" +# define TLS1_RFC_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_ECDHE_PSK_WITH_AES_128_CBC_SHA "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_ECDHE_PSK_WITH_AES_256_CBC_SHA "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_ECDHE_PSK_WITH_AES_128_CBC_SHA256 "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_PSK_WITH_AES_256_CBC_SHA384 "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384" +# define TLS1_RFC_ECDHE_PSK_WITH_NULL_SHA "TLS_ECDHE_PSK_WITH_NULL_SHA" +# define TLS1_RFC_ECDHE_PSK_WITH_NULL_SHA256 "TLS_ECDHE_PSK_WITH_NULL_SHA256" +# define TLS1_RFC_ECDHE_PSK_WITH_NULL_SHA384 "TLS_ECDHE_PSK_WITH_NULL_SHA384" +# define TLS1_RFC_SRP_SHA_WITH_3DES_EDE_CBC_SHA "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA" +# define TLS1_RFC_SRP_SHA_WITH_AES_128_CBC_SHA "TLS_SRP_SHA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_SRP_SHA_RSA_WITH_AES_128_CBC_SHA "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_SRP_SHA_DSS_WITH_AES_128_CBC_SHA "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA" +# define TLS1_RFC_SRP_SHA_WITH_AES_256_CBC_SHA "TLS_SRP_SHA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_SRP_SHA_RSA_WITH_AES_256_CBC_SHA "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_SRP_SHA_DSS_WITH_AES_256_CBC_SHA "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_CHACHA20_POLY1305 "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_ECDHE_RSA_WITH_CHACHA20_POLY1305 "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_PSK_WITH_CHACHA20_POLY1305 "TLS_PSK_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_ECDHE_PSK_WITH_CHACHA20_POLY1305 "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_CHACHA20_POLY1305 "TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_CHACHA20_POLY1305 "TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256" +# define TLS1_RFC_RSA_WITH_CAMELLIA_128_CBC_SHA256 "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_ADH_WITH_CAMELLIA_128_CBC_SHA256 "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_RSA_WITH_CAMELLIA_256_CBC_SHA256 "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256" +# define TLS1_RFC_ADH_WITH_CAMELLIA_256_CBC_SHA256 "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256" +# define TLS1_RFC_RSA_WITH_CAMELLIA_256_CBC_SHA "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA" +# define TLS1_RFC_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA" +# define TLS1_RFC_ADH_WITH_CAMELLIA_256_CBC_SHA "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA" +# define TLS1_RFC_RSA_WITH_CAMELLIA_128_CBC_SHA "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA" +# define TLS1_RFC_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA" +# define TLS1_RFC_ADH_WITH_CAMELLIA_128_CBC_SHA "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_PSK_WITH_CAMELLIA_128_CBC_SHA256 "TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_PSK_WITH_CAMELLIA_256_CBC_SHA384 "TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 "TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 "TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 "TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 "TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 "TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256" +# define TLS1_RFC_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 "TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384" +# define TLS1_RFC_RSA_WITH_SEED_SHA "TLS_RSA_WITH_SEED_CBC_SHA" +# define TLS1_RFC_DHE_DSS_WITH_SEED_SHA "TLS_DHE_DSS_WITH_SEED_CBC_SHA" +# define TLS1_RFC_DHE_RSA_WITH_SEED_SHA "TLS_DHE_RSA_WITH_SEED_CBC_SHA" +# define TLS1_RFC_ADH_WITH_SEED_SHA "TLS_DH_anon_WITH_SEED_CBC_SHA" +# define TLS1_RFC_ECDHE_PSK_WITH_RC4_128_SHA "TLS_ECDHE_PSK_WITH_RC4_128_SHA" +# define TLS1_RFC_ECDH_anon_WITH_RC4_128_SHA "TLS_ECDH_anon_WITH_RC4_128_SHA" +# define TLS1_RFC_ECDHE_ECDSA_WITH_RC4_128_SHA "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA" +# define TLS1_RFC_ECDHE_RSA_WITH_RC4_128_SHA "TLS_ECDHE_RSA_WITH_RC4_128_SHA" +# define TLS1_RFC_PSK_WITH_RC4_128_SHA "TLS_PSK_WITH_RC4_128_SHA" +# define TLS1_RFC_RSA_PSK_WITH_RC4_128_SHA "TLS_RSA_PSK_WITH_RC4_128_SHA" +# define TLS1_RFC_DHE_PSK_WITH_RC4_128_SHA "TLS_DHE_PSK_WITH_RC4_128_SHA" +# define TLS1_RFC_RSA_WITH_ARIA_128_GCM_SHA256 "TLS_RSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_RSA_WITH_ARIA_256_GCM_SHA384 "TLS_RSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DHE_RSA_WITH_ARIA_128_GCM_SHA256 "TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DHE_RSA_WITH_ARIA_256_GCM_SHA384 "TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DH_RSA_WITH_ARIA_128_GCM_SHA256 "TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DH_RSA_WITH_ARIA_256_GCM_SHA384 "TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DHE_DSS_WITH_ARIA_128_GCM_SHA256 "TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DHE_DSS_WITH_ARIA_256_GCM_SHA384 "TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DH_DSS_WITH_ARIA_128_GCM_SHA256 "TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DH_DSS_WITH_ARIA_256_GCM_SHA384 "TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DH_anon_WITH_ARIA_128_GCM_SHA256 "TLS_DH_anon_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DH_anon_WITH_ARIA_256_GCM_SHA384 "TLS_DH_anon_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 "TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 "TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 "TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 "TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 "TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 "TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 "TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 "TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_PSK_WITH_ARIA_128_GCM_SHA256 "TLS_PSK_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_PSK_WITH_ARIA_256_GCM_SHA384 "TLS_PSK_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_DHE_PSK_WITH_ARIA_128_GCM_SHA256 "TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_DHE_PSK_WITH_ARIA_256_GCM_SHA384 "TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384" +# define TLS1_RFC_RSA_PSK_WITH_ARIA_128_GCM_SHA256 "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256" +# define TLS1_RFC_RSA_PSK_WITH_ARIA_256_GCM_SHA384 "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384" + + +/* + * XXX Backward compatibility alert: Older versions of OpenSSL gave some DHE + * ciphers names with "EDH" instead of "DHE". Going forward, we should be + * using DHE everywhere, though we may indefinitely maintain aliases for + * users or configurations that used "EDH" + */ +# define TLS1_TXT_DHE_DSS_WITH_RC4_128_SHA "DHE-DSS-RC4-SHA" + +# define TLS1_TXT_PSK_WITH_NULL_SHA "PSK-NULL-SHA" +# define TLS1_TXT_DHE_PSK_WITH_NULL_SHA "DHE-PSK-NULL-SHA" +# define TLS1_TXT_RSA_PSK_WITH_NULL_SHA "RSA-PSK-NULL-SHA" + +/* AES ciphersuites from RFC3268 */ +# define TLS1_TXT_RSA_WITH_AES_128_SHA "AES128-SHA" +# define TLS1_TXT_DH_DSS_WITH_AES_128_SHA "DH-DSS-AES128-SHA" +# define TLS1_TXT_DH_RSA_WITH_AES_128_SHA "DH-RSA-AES128-SHA" +# define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA "DHE-DSS-AES128-SHA" +# define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA "DHE-RSA-AES128-SHA" +# define TLS1_TXT_ADH_WITH_AES_128_SHA "ADH-AES128-SHA" + +# define TLS1_TXT_RSA_WITH_AES_256_SHA "AES256-SHA" +# define TLS1_TXT_DH_DSS_WITH_AES_256_SHA "DH-DSS-AES256-SHA" +# define TLS1_TXT_DH_RSA_WITH_AES_256_SHA "DH-RSA-AES256-SHA" +# define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA "DHE-DSS-AES256-SHA" +# define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA "DHE-RSA-AES256-SHA" +# define TLS1_TXT_ADH_WITH_AES_256_SHA "ADH-AES256-SHA" + +/* ECC ciphersuites from RFC4492 */ +# define TLS1_TXT_ECDH_ECDSA_WITH_NULL_SHA "ECDH-ECDSA-NULL-SHA" +# define TLS1_TXT_ECDH_ECDSA_WITH_RC4_128_SHA "ECDH-ECDSA-RC4-SHA" +# define TLS1_TXT_ECDH_ECDSA_WITH_DES_192_CBC3_SHA "ECDH-ECDSA-DES-CBC3-SHA" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_CBC_SHA "ECDH-ECDSA-AES128-SHA" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_CBC_SHA "ECDH-ECDSA-AES256-SHA" + +# define TLS1_TXT_ECDHE_ECDSA_WITH_NULL_SHA "ECDHE-ECDSA-NULL-SHA" +# define TLS1_TXT_ECDHE_ECDSA_WITH_RC4_128_SHA "ECDHE-ECDSA-RC4-SHA" +# define TLS1_TXT_ECDHE_ECDSA_WITH_DES_192_CBC3_SHA "ECDHE-ECDSA-DES-CBC3-SHA" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CBC_SHA "ECDHE-ECDSA-AES128-SHA" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CBC_SHA "ECDHE-ECDSA-AES256-SHA" + +# define TLS1_TXT_ECDH_RSA_WITH_NULL_SHA "ECDH-RSA-NULL-SHA" +# define TLS1_TXT_ECDH_RSA_WITH_RC4_128_SHA "ECDH-RSA-RC4-SHA" +# define TLS1_TXT_ECDH_RSA_WITH_DES_192_CBC3_SHA "ECDH-RSA-DES-CBC3-SHA" +# define TLS1_TXT_ECDH_RSA_WITH_AES_128_CBC_SHA "ECDH-RSA-AES128-SHA" +# define TLS1_TXT_ECDH_RSA_WITH_AES_256_CBC_SHA "ECDH-RSA-AES256-SHA" + +# define TLS1_TXT_ECDHE_RSA_WITH_NULL_SHA "ECDHE-RSA-NULL-SHA" +# define TLS1_TXT_ECDHE_RSA_WITH_RC4_128_SHA "ECDHE-RSA-RC4-SHA" +# define TLS1_TXT_ECDHE_RSA_WITH_DES_192_CBC3_SHA "ECDHE-RSA-DES-CBC3-SHA" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA "ECDHE-RSA-AES128-SHA" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_256_CBC_SHA "ECDHE-RSA-AES256-SHA" + +# define TLS1_TXT_ECDH_anon_WITH_NULL_SHA "AECDH-NULL-SHA" +# define TLS1_TXT_ECDH_anon_WITH_RC4_128_SHA "AECDH-RC4-SHA" +# define TLS1_TXT_ECDH_anon_WITH_DES_192_CBC3_SHA "AECDH-DES-CBC3-SHA" +# define TLS1_TXT_ECDH_anon_WITH_AES_128_CBC_SHA "AECDH-AES128-SHA" +# define TLS1_TXT_ECDH_anon_WITH_AES_256_CBC_SHA "AECDH-AES256-SHA" + +/* PSK ciphersuites from RFC 4279 */ +# define TLS1_TXT_PSK_WITH_RC4_128_SHA "PSK-RC4-SHA" +# define TLS1_TXT_PSK_WITH_3DES_EDE_CBC_SHA "PSK-3DES-EDE-CBC-SHA" +# define TLS1_TXT_PSK_WITH_AES_128_CBC_SHA "PSK-AES128-CBC-SHA" +# define TLS1_TXT_PSK_WITH_AES_256_CBC_SHA "PSK-AES256-CBC-SHA" + +# define TLS1_TXT_DHE_PSK_WITH_RC4_128_SHA "DHE-PSK-RC4-SHA" +# define TLS1_TXT_DHE_PSK_WITH_3DES_EDE_CBC_SHA "DHE-PSK-3DES-EDE-CBC-SHA" +# define TLS1_TXT_DHE_PSK_WITH_AES_128_CBC_SHA "DHE-PSK-AES128-CBC-SHA" +# define TLS1_TXT_DHE_PSK_WITH_AES_256_CBC_SHA "DHE-PSK-AES256-CBC-SHA" +# define TLS1_TXT_RSA_PSK_WITH_RC4_128_SHA "RSA-PSK-RC4-SHA" +# define TLS1_TXT_RSA_PSK_WITH_3DES_EDE_CBC_SHA "RSA-PSK-3DES-EDE-CBC-SHA" +# define TLS1_TXT_RSA_PSK_WITH_AES_128_CBC_SHA "RSA-PSK-AES128-CBC-SHA" +# define TLS1_TXT_RSA_PSK_WITH_AES_256_CBC_SHA "RSA-PSK-AES256-CBC-SHA" + +/* PSK ciphersuites from RFC 5487 */ +# define TLS1_TXT_PSK_WITH_AES_128_GCM_SHA256 "PSK-AES128-GCM-SHA256" +# define TLS1_TXT_PSK_WITH_AES_256_GCM_SHA384 "PSK-AES256-GCM-SHA384" +# define TLS1_TXT_DHE_PSK_WITH_AES_128_GCM_SHA256 "DHE-PSK-AES128-GCM-SHA256" +# define TLS1_TXT_DHE_PSK_WITH_AES_256_GCM_SHA384 "DHE-PSK-AES256-GCM-SHA384" +# define TLS1_TXT_RSA_PSK_WITH_AES_128_GCM_SHA256 "RSA-PSK-AES128-GCM-SHA256" +# define TLS1_TXT_RSA_PSK_WITH_AES_256_GCM_SHA384 "RSA-PSK-AES256-GCM-SHA384" + +# define TLS1_TXT_PSK_WITH_AES_128_CBC_SHA256 "PSK-AES128-CBC-SHA256" +# define TLS1_TXT_PSK_WITH_AES_256_CBC_SHA384 "PSK-AES256-CBC-SHA384" +# define TLS1_TXT_PSK_WITH_NULL_SHA256 "PSK-NULL-SHA256" +# define TLS1_TXT_PSK_WITH_NULL_SHA384 "PSK-NULL-SHA384" + +# define TLS1_TXT_DHE_PSK_WITH_AES_128_CBC_SHA256 "DHE-PSK-AES128-CBC-SHA256" +# define TLS1_TXT_DHE_PSK_WITH_AES_256_CBC_SHA384 "DHE-PSK-AES256-CBC-SHA384" +# define TLS1_TXT_DHE_PSK_WITH_NULL_SHA256 "DHE-PSK-NULL-SHA256" +# define TLS1_TXT_DHE_PSK_WITH_NULL_SHA384 "DHE-PSK-NULL-SHA384" + +# define TLS1_TXT_RSA_PSK_WITH_AES_128_CBC_SHA256 "RSA-PSK-AES128-CBC-SHA256" +# define TLS1_TXT_RSA_PSK_WITH_AES_256_CBC_SHA384 "RSA-PSK-AES256-CBC-SHA384" +# define TLS1_TXT_RSA_PSK_WITH_NULL_SHA256 "RSA-PSK-NULL-SHA256" +# define TLS1_TXT_RSA_PSK_WITH_NULL_SHA384 "RSA-PSK-NULL-SHA384" + +/* SRP ciphersuite from RFC 5054 */ +# define TLS1_TXT_SRP_SHA_WITH_3DES_EDE_CBC_SHA "SRP-3DES-EDE-CBC-SHA" +# define TLS1_TXT_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA "SRP-RSA-3DES-EDE-CBC-SHA" +# define TLS1_TXT_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA "SRP-DSS-3DES-EDE-CBC-SHA" +# define TLS1_TXT_SRP_SHA_WITH_AES_128_CBC_SHA "SRP-AES-128-CBC-SHA" +# define TLS1_TXT_SRP_SHA_RSA_WITH_AES_128_CBC_SHA "SRP-RSA-AES-128-CBC-SHA" +# define TLS1_TXT_SRP_SHA_DSS_WITH_AES_128_CBC_SHA "SRP-DSS-AES-128-CBC-SHA" +# define TLS1_TXT_SRP_SHA_WITH_AES_256_CBC_SHA "SRP-AES-256-CBC-SHA" +# define TLS1_TXT_SRP_SHA_RSA_WITH_AES_256_CBC_SHA "SRP-RSA-AES-256-CBC-SHA" +# define TLS1_TXT_SRP_SHA_DSS_WITH_AES_256_CBC_SHA "SRP-DSS-AES-256-CBC-SHA" + +/* Camellia ciphersuites from RFC4132 */ +# define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA "CAMELLIA128-SHA" +# define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA "DH-DSS-CAMELLIA128-SHA" +# define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA "DH-RSA-CAMELLIA128-SHA" +# define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA "DHE-DSS-CAMELLIA128-SHA" +# define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA "DHE-RSA-CAMELLIA128-SHA" +# define TLS1_TXT_ADH_WITH_CAMELLIA_128_CBC_SHA "ADH-CAMELLIA128-SHA" + +# define TLS1_TXT_RSA_WITH_CAMELLIA_256_CBC_SHA "CAMELLIA256-SHA" +# define TLS1_TXT_DH_DSS_WITH_CAMELLIA_256_CBC_SHA "DH-DSS-CAMELLIA256-SHA" +# define TLS1_TXT_DH_RSA_WITH_CAMELLIA_256_CBC_SHA "DH-RSA-CAMELLIA256-SHA" +# define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA "DHE-DSS-CAMELLIA256-SHA" +# define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA "DHE-RSA-CAMELLIA256-SHA" +# define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA "ADH-CAMELLIA256-SHA" + +/* TLS 1.2 Camellia SHA-256 ciphersuites from RFC5932 */ +# define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA256 "CAMELLIA128-SHA256" +# define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 "DH-DSS-CAMELLIA128-SHA256" +# define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 "DH-RSA-CAMELLIA128-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 "DHE-DSS-CAMELLIA128-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 "DHE-RSA-CAMELLIA128-SHA256" +# define TLS1_TXT_ADH_WITH_CAMELLIA_128_CBC_SHA256 "ADH-CAMELLIA128-SHA256" + +# define TLS1_TXT_RSA_WITH_CAMELLIA_256_CBC_SHA256 "CAMELLIA256-SHA256" +# define TLS1_TXT_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 "DH-DSS-CAMELLIA256-SHA256" +# define TLS1_TXT_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 "DH-RSA-CAMELLIA256-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 "DHE-DSS-CAMELLIA256-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 "DHE-RSA-CAMELLIA256-SHA256" +# define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA256 "ADH-CAMELLIA256-SHA256" + +# define TLS1_TXT_PSK_WITH_CAMELLIA_128_CBC_SHA256 "PSK-CAMELLIA128-SHA256" +# define TLS1_TXT_PSK_WITH_CAMELLIA_256_CBC_SHA384 "PSK-CAMELLIA256-SHA384" +# define TLS1_TXT_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 "DHE-PSK-CAMELLIA128-SHA256" +# define TLS1_TXT_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 "DHE-PSK-CAMELLIA256-SHA384" +# define TLS1_TXT_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 "RSA-PSK-CAMELLIA128-SHA256" +# define TLS1_TXT_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 "RSA-PSK-CAMELLIA256-SHA384" +# define TLS1_TXT_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 "ECDHE-PSK-CAMELLIA128-SHA256" +# define TLS1_TXT_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 "ECDHE-PSK-CAMELLIA256-SHA384" + +/* SEED ciphersuites from RFC4162 */ +# define TLS1_TXT_RSA_WITH_SEED_SHA "SEED-SHA" +# define TLS1_TXT_DH_DSS_WITH_SEED_SHA "DH-DSS-SEED-SHA" +# define TLS1_TXT_DH_RSA_WITH_SEED_SHA "DH-RSA-SEED-SHA" +# define TLS1_TXT_DHE_DSS_WITH_SEED_SHA "DHE-DSS-SEED-SHA" +# define TLS1_TXT_DHE_RSA_WITH_SEED_SHA "DHE-RSA-SEED-SHA" +# define TLS1_TXT_ADH_WITH_SEED_SHA "ADH-SEED-SHA" + +/* TLS v1.2 ciphersuites */ +# define TLS1_TXT_RSA_WITH_NULL_SHA256 "NULL-SHA256" +# define TLS1_TXT_RSA_WITH_AES_128_SHA256 "AES128-SHA256" +# define TLS1_TXT_RSA_WITH_AES_256_SHA256 "AES256-SHA256" +# define TLS1_TXT_DH_DSS_WITH_AES_128_SHA256 "DH-DSS-AES128-SHA256" +# define TLS1_TXT_DH_RSA_WITH_AES_128_SHA256 "DH-RSA-AES128-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_AES_128_SHA256 "DHE-DSS-AES128-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_AES_128_SHA256 "DHE-RSA-AES128-SHA256" +# define TLS1_TXT_DH_DSS_WITH_AES_256_SHA256 "DH-DSS-AES256-SHA256" +# define TLS1_TXT_DH_RSA_WITH_AES_256_SHA256 "DH-RSA-AES256-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_AES_256_SHA256 "DHE-DSS-AES256-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA256 "DHE-RSA-AES256-SHA256" +# define TLS1_TXT_ADH_WITH_AES_128_SHA256 "ADH-AES128-SHA256" +# define TLS1_TXT_ADH_WITH_AES_256_SHA256 "ADH-AES256-SHA256" + +/* TLS v1.2 GCM ciphersuites from RFC5288 */ +# define TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256 "AES128-GCM-SHA256" +# define TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384 "AES256-GCM-SHA384" +# define TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256 "DHE-RSA-AES128-GCM-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384 "DHE-RSA-AES256-GCM-SHA384" +# define TLS1_TXT_DH_RSA_WITH_AES_128_GCM_SHA256 "DH-RSA-AES128-GCM-SHA256" +# define TLS1_TXT_DH_RSA_WITH_AES_256_GCM_SHA384 "DH-RSA-AES256-GCM-SHA384" +# define TLS1_TXT_DHE_DSS_WITH_AES_128_GCM_SHA256 "DHE-DSS-AES128-GCM-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_AES_256_GCM_SHA384 "DHE-DSS-AES256-GCM-SHA384" +# define TLS1_TXT_DH_DSS_WITH_AES_128_GCM_SHA256 "DH-DSS-AES128-GCM-SHA256" +# define TLS1_TXT_DH_DSS_WITH_AES_256_GCM_SHA384 "DH-DSS-AES256-GCM-SHA384" +# define TLS1_TXT_ADH_WITH_AES_128_GCM_SHA256 "ADH-AES128-GCM-SHA256" +# define TLS1_TXT_ADH_WITH_AES_256_GCM_SHA384 "ADH-AES256-GCM-SHA384" + +/* CCM ciphersuites from RFC6655 */ +# define TLS1_TXT_RSA_WITH_AES_128_CCM "AES128-CCM" +# define TLS1_TXT_RSA_WITH_AES_256_CCM "AES256-CCM" +# define TLS1_TXT_DHE_RSA_WITH_AES_128_CCM "DHE-RSA-AES128-CCM" +# define TLS1_TXT_DHE_RSA_WITH_AES_256_CCM "DHE-RSA-AES256-CCM" + +# define TLS1_TXT_RSA_WITH_AES_128_CCM_8 "AES128-CCM8" +# define TLS1_TXT_RSA_WITH_AES_256_CCM_8 "AES256-CCM8" +# define TLS1_TXT_DHE_RSA_WITH_AES_128_CCM_8 "DHE-RSA-AES128-CCM8" +# define TLS1_TXT_DHE_RSA_WITH_AES_256_CCM_8 "DHE-RSA-AES256-CCM8" + +# define TLS1_TXT_PSK_WITH_AES_128_CCM "PSK-AES128-CCM" +# define TLS1_TXT_PSK_WITH_AES_256_CCM "PSK-AES256-CCM" +# define TLS1_TXT_DHE_PSK_WITH_AES_128_CCM "DHE-PSK-AES128-CCM" +# define TLS1_TXT_DHE_PSK_WITH_AES_256_CCM "DHE-PSK-AES256-CCM" + +# define TLS1_TXT_PSK_WITH_AES_128_CCM_8 "PSK-AES128-CCM8" +# define TLS1_TXT_PSK_WITH_AES_256_CCM_8 "PSK-AES256-CCM8" +# define TLS1_TXT_DHE_PSK_WITH_AES_128_CCM_8 "DHE-PSK-AES128-CCM8" +# define TLS1_TXT_DHE_PSK_WITH_AES_256_CCM_8 "DHE-PSK-AES256-CCM8" + +/* CCM ciphersuites from RFC7251 */ +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CCM "ECDHE-ECDSA-AES128-CCM" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CCM "ECDHE-ECDSA-AES256-CCM" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CCM_8 "ECDHE-ECDSA-AES128-CCM8" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CCM_8 "ECDHE-ECDSA-AES256-CCM8" + +/* ECDH HMAC based ciphersuites from RFC5289 */ +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_SHA256 "ECDHE-ECDSA-AES128-SHA256" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_SHA384 "ECDHE-ECDSA-AES256-SHA384" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_SHA256 "ECDH-ECDSA-AES128-SHA256" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_SHA384 "ECDH-ECDSA-AES256-SHA384" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_128_SHA256 "ECDHE-RSA-AES128-SHA256" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_256_SHA384 "ECDHE-RSA-AES256-SHA384" +# define TLS1_TXT_ECDH_RSA_WITH_AES_128_SHA256 "ECDH-RSA-AES128-SHA256" +# define TLS1_TXT_ECDH_RSA_WITH_AES_256_SHA384 "ECDH-RSA-AES256-SHA384" + +/* ECDH GCM based ciphersuites from RFC5289 */ +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 "ECDHE-ECDSA-AES128-GCM-SHA256" +# define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 "ECDHE-ECDSA-AES256-GCM-SHA384" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 "ECDH-ECDSA-AES128-GCM-SHA256" +# define TLS1_TXT_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 "ECDH-ECDSA-AES256-GCM-SHA384" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256 "ECDHE-RSA-AES128-GCM-SHA256" +# define TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384 "ECDHE-RSA-AES256-GCM-SHA384" +# define TLS1_TXT_ECDH_RSA_WITH_AES_128_GCM_SHA256 "ECDH-RSA-AES128-GCM-SHA256" +# define TLS1_TXT_ECDH_RSA_WITH_AES_256_GCM_SHA384 "ECDH-RSA-AES256-GCM-SHA384" + +/* TLS v1.2 PSK GCM ciphersuites from RFC5487 */ +# define TLS1_TXT_PSK_WITH_AES_128_GCM_SHA256 "PSK-AES128-GCM-SHA256" +# define TLS1_TXT_PSK_WITH_AES_256_GCM_SHA384 "PSK-AES256-GCM-SHA384" + +/* ECDHE PSK ciphersuites from RFC 5489 */ +# define TLS1_TXT_ECDHE_PSK_WITH_RC4_128_SHA "ECDHE-PSK-RC4-SHA" +# define TLS1_TXT_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA "ECDHE-PSK-3DES-EDE-CBC-SHA" +# define TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA "ECDHE-PSK-AES128-CBC-SHA" +# define TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA "ECDHE-PSK-AES256-CBC-SHA" + +# define TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA256 "ECDHE-PSK-AES128-CBC-SHA256" +# define TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA384 "ECDHE-PSK-AES256-CBC-SHA384" + +# define TLS1_TXT_ECDHE_PSK_WITH_NULL_SHA "ECDHE-PSK-NULL-SHA" +# define TLS1_TXT_ECDHE_PSK_WITH_NULL_SHA256 "ECDHE-PSK-NULL-SHA256" +# define TLS1_TXT_ECDHE_PSK_WITH_NULL_SHA384 "ECDHE-PSK-NULL-SHA384" + +/* Camellia-CBC ciphersuites from RFC6367 */ +# define TLS1_TXT_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 "ECDHE-ECDSA-CAMELLIA128-SHA256" +# define TLS1_TXT_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 "ECDHE-ECDSA-CAMELLIA256-SHA384" +# define TLS1_TXT_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 "ECDH-ECDSA-CAMELLIA128-SHA256" +# define TLS1_TXT_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 "ECDH-ECDSA-CAMELLIA256-SHA384" +# define TLS1_TXT_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 "ECDHE-RSA-CAMELLIA128-SHA256" +# define TLS1_TXT_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 "ECDHE-RSA-CAMELLIA256-SHA384" +# define TLS1_TXT_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 "ECDH-RSA-CAMELLIA128-SHA256" +# define TLS1_TXT_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 "ECDH-RSA-CAMELLIA256-SHA384" + +/* draft-ietf-tls-chacha20-poly1305-03 */ +# define TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305 "ECDHE-RSA-CHACHA20-POLY1305" +# define TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 "ECDHE-ECDSA-CHACHA20-POLY1305" +# define TLS1_TXT_DHE_RSA_WITH_CHACHA20_POLY1305 "DHE-RSA-CHACHA20-POLY1305" +# define TLS1_TXT_PSK_WITH_CHACHA20_POLY1305 "PSK-CHACHA20-POLY1305" +# define TLS1_TXT_ECDHE_PSK_WITH_CHACHA20_POLY1305 "ECDHE-PSK-CHACHA20-POLY1305" +# define TLS1_TXT_DHE_PSK_WITH_CHACHA20_POLY1305 "DHE-PSK-CHACHA20-POLY1305" +# define TLS1_TXT_RSA_PSK_WITH_CHACHA20_POLY1305 "RSA-PSK-CHACHA20-POLY1305" + +/* Aria ciphersuites from RFC6209 */ +# define TLS1_TXT_RSA_WITH_ARIA_128_GCM_SHA256 "ARIA128-GCM-SHA256" +# define TLS1_TXT_RSA_WITH_ARIA_256_GCM_SHA384 "ARIA256-GCM-SHA384" +# define TLS1_TXT_DHE_RSA_WITH_ARIA_128_GCM_SHA256 "DHE-RSA-ARIA128-GCM-SHA256" +# define TLS1_TXT_DHE_RSA_WITH_ARIA_256_GCM_SHA384 "DHE-RSA-ARIA256-GCM-SHA384" +# define TLS1_TXT_DH_RSA_WITH_ARIA_128_GCM_SHA256 "DH-RSA-ARIA128-GCM-SHA256" +# define TLS1_TXT_DH_RSA_WITH_ARIA_256_GCM_SHA384 "DH-RSA-ARIA256-GCM-SHA384" +# define TLS1_TXT_DHE_DSS_WITH_ARIA_128_GCM_SHA256 "DHE-DSS-ARIA128-GCM-SHA256" +# define TLS1_TXT_DHE_DSS_WITH_ARIA_256_GCM_SHA384 "DHE-DSS-ARIA256-GCM-SHA384" +# define TLS1_TXT_DH_DSS_WITH_ARIA_128_GCM_SHA256 "DH-DSS-ARIA128-GCM-SHA256" +# define TLS1_TXT_DH_DSS_WITH_ARIA_256_GCM_SHA384 "DH-DSS-ARIA256-GCM-SHA384" +# define TLS1_TXT_DH_anon_WITH_ARIA_128_GCM_SHA256 "ADH-ARIA128-GCM-SHA256" +# define TLS1_TXT_DH_anon_WITH_ARIA_256_GCM_SHA384 "ADH-ARIA256-GCM-SHA384" +# define TLS1_TXT_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 "ECDHE-ECDSA-ARIA128-GCM-SHA256" +# define TLS1_TXT_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 "ECDHE-ECDSA-ARIA256-GCM-SHA384" +# define TLS1_TXT_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 "ECDH-ECDSA-ARIA128-GCM-SHA256" +# define TLS1_TXT_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 "ECDH-ECDSA-ARIA256-GCM-SHA384" +# define TLS1_TXT_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 "ECDHE-ARIA128-GCM-SHA256" +# define TLS1_TXT_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 "ECDHE-ARIA256-GCM-SHA384" +# define TLS1_TXT_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 "ECDH-ARIA128-GCM-SHA256" +# define TLS1_TXT_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 "ECDH-ARIA256-GCM-SHA384" +# define TLS1_TXT_PSK_WITH_ARIA_128_GCM_SHA256 "PSK-ARIA128-GCM-SHA256" +# define TLS1_TXT_PSK_WITH_ARIA_256_GCM_SHA384 "PSK-ARIA256-GCM-SHA384" +# define TLS1_TXT_DHE_PSK_WITH_ARIA_128_GCM_SHA256 "DHE-PSK-ARIA128-GCM-SHA256" +# define TLS1_TXT_DHE_PSK_WITH_ARIA_256_GCM_SHA384 "DHE-PSK-ARIA256-GCM-SHA384" +# define TLS1_TXT_RSA_PSK_WITH_ARIA_128_GCM_SHA256 "RSA-PSK-ARIA128-GCM-SHA256" +# define TLS1_TXT_RSA_PSK_WITH_ARIA_256_GCM_SHA384 "RSA-PSK-ARIA256-GCM-SHA384" + +# define TLS_CT_RSA_SIGN 1 +# define TLS_CT_DSS_SIGN 2 +# define TLS_CT_RSA_FIXED_DH 3 +# define TLS_CT_DSS_FIXED_DH 4 +# define TLS_CT_ECDSA_SIGN 64 +# define TLS_CT_RSA_FIXED_ECDH 65 +# define TLS_CT_ECDSA_FIXED_ECDH 66 +# define TLS_CT_GOST01_SIGN 22 +# define TLS_CT_GOST12_SIGN 238 +# define TLS_CT_GOST12_512_SIGN 239 + +/* + * when correcting this number, correct also SSL3_CT_NUMBER in ssl3.h (see + * comment there) + */ +# define TLS_CT_NUMBER 10 + +# if defined(SSL3_CT_NUMBER) +# if TLS_CT_NUMBER != SSL3_CT_NUMBER +# error "SSL/TLS CT_NUMBER values do not match" +# endif +# endif + +# define TLS1_FINISH_MAC_LENGTH 12 + +# define TLS_MD_MAX_CONST_SIZE 22 +# define TLS_MD_CLIENT_FINISH_CONST "client finished" +# define TLS_MD_CLIENT_FINISH_CONST_SIZE 15 +# define TLS_MD_SERVER_FINISH_CONST "server finished" +# define TLS_MD_SERVER_FINISH_CONST_SIZE 15 +# define TLS_MD_KEY_EXPANSION_CONST "key expansion" +# define TLS_MD_KEY_EXPANSION_CONST_SIZE 13 +# define TLS_MD_CLIENT_WRITE_KEY_CONST "client write key" +# define TLS_MD_CLIENT_WRITE_KEY_CONST_SIZE 16 +# define TLS_MD_SERVER_WRITE_KEY_CONST "server write key" +# define TLS_MD_SERVER_WRITE_KEY_CONST_SIZE 16 +# define TLS_MD_IV_BLOCK_CONST "IV block" +# define TLS_MD_IV_BLOCK_CONST_SIZE 8 +# define TLS_MD_MASTER_SECRET_CONST "master secret" +# define TLS_MD_MASTER_SECRET_CONST_SIZE 13 +# define TLS_MD_EXTENDED_MASTER_SECRET_CONST "extended master secret" +# define TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE 22 + +# ifdef CHARSET_EBCDIC +# undef TLS_MD_CLIENT_FINISH_CONST +/* + * client finished + */ +# define TLS_MD_CLIENT_FINISH_CONST "\x63\x6c\x69\x65\x6e\x74\x20\x66\x69\x6e\x69\x73\x68\x65\x64" + +# undef TLS_MD_SERVER_FINISH_CONST +/* + * server finished + */ +# define TLS_MD_SERVER_FINISH_CONST "\x73\x65\x72\x76\x65\x72\x20\x66\x69\x6e\x69\x73\x68\x65\x64" + +# undef TLS_MD_SERVER_WRITE_KEY_CONST +/* + * server write key + */ +# define TLS_MD_SERVER_WRITE_KEY_CONST "\x73\x65\x72\x76\x65\x72\x20\x77\x72\x69\x74\x65\x20\x6b\x65\x79" + +# undef TLS_MD_KEY_EXPANSION_CONST +/* + * key expansion + */ +# define TLS_MD_KEY_EXPANSION_CONST "\x6b\x65\x79\x20\x65\x78\x70\x61\x6e\x73\x69\x6f\x6e" + +# undef TLS_MD_CLIENT_WRITE_KEY_CONST +/* + * client write key + */ +# define TLS_MD_CLIENT_WRITE_KEY_CONST "\x63\x6c\x69\x65\x6e\x74\x20\x77\x72\x69\x74\x65\x20\x6b\x65\x79" + +# undef TLS_MD_SERVER_WRITE_KEY_CONST +/* + * server write key + */ +# define TLS_MD_SERVER_WRITE_KEY_CONST "\x73\x65\x72\x76\x65\x72\x20\x77\x72\x69\x74\x65\x20\x6b\x65\x79" + +# undef TLS_MD_IV_BLOCK_CONST +/* + * IV block + */ +# define TLS_MD_IV_BLOCK_CONST "\x49\x56\x20\x62\x6c\x6f\x63\x6b" + +# undef TLS_MD_MASTER_SECRET_CONST +/* + * master secret + */ +# define TLS_MD_MASTER_SECRET_CONST "\x6d\x61\x73\x74\x65\x72\x20\x73\x65\x63\x72\x65\x74" +# undef TLS_MD_EXTENDED_MASTER_SECRET_CONST +/* + * extended master secret + */ +# define TLS_MD_EXTENDED_MASTER_SECRET_CONST "\x65\x78\x74\x65\x6e\x64\x65\x64\x20\x6d\x61\x73\x74\x65\x72\x20\x73\x65\x63\x72\x65\x74" +# endif + +/* TLS Session Ticket extension struct */ +struct tls_session_ticket_ext_st { + unsigned short length; + void *data; +}; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ts.h b/third_party/openssl/uos/sw_64/include/openssl/ts.h new file mode 100644 index 00000000..3b58aa52 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ts.h @@ -0,0 +1,559 @@ +/* + * Copyright 2006-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_TS_H +# define HEADER_TS_H + +# include + +# ifndef OPENSSL_NO_TS +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +# include +# include + +typedef struct TS_msg_imprint_st TS_MSG_IMPRINT; +typedef struct TS_req_st TS_REQ; +typedef struct TS_accuracy_st TS_ACCURACY; +typedef struct TS_tst_info_st TS_TST_INFO; + +/* Possible values for status. */ +# define TS_STATUS_GRANTED 0 +# define TS_STATUS_GRANTED_WITH_MODS 1 +# define TS_STATUS_REJECTION 2 +# define TS_STATUS_WAITING 3 +# define TS_STATUS_REVOCATION_WARNING 4 +# define TS_STATUS_REVOCATION_NOTIFICATION 5 + +/* Possible values for failure_info. */ +# define TS_INFO_BAD_ALG 0 +# define TS_INFO_BAD_REQUEST 2 +# define TS_INFO_BAD_DATA_FORMAT 5 +# define TS_INFO_TIME_NOT_AVAILABLE 14 +# define TS_INFO_UNACCEPTED_POLICY 15 +# define TS_INFO_UNACCEPTED_EXTENSION 16 +# define TS_INFO_ADD_INFO_NOT_AVAILABLE 17 +# define TS_INFO_SYSTEM_FAILURE 25 + + +typedef struct TS_status_info_st TS_STATUS_INFO; +typedef struct ESS_issuer_serial ESS_ISSUER_SERIAL; +typedef struct ESS_cert_id ESS_CERT_ID; +typedef struct ESS_signing_cert ESS_SIGNING_CERT; + +DEFINE_STACK_OF(ESS_CERT_ID) + +typedef struct ESS_cert_id_v2_st ESS_CERT_ID_V2; +typedef struct ESS_signing_cert_v2_st ESS_SIGNING_CERT_V2; + +DEFINE_STACK_OF(ESS_CERT_ID_V2) + +typedef struct TS_resp_st TS_RESP; + +TS_REQ *TS_REQ_new(void); +void TS_REQ_free(TS_REQ *a); +int i2d_TS_REQ(const TS_REQ *a, unsigned char **pp); +TS_REQ *d2i_TS_REQ(TS_REQ **a, const unsigned char **pp, long length); + +TS_REQ *TS_REQ_dup(TS_REQ *a); + +#ifndef OPENSSL_NO_STDIO +TS_REQ *d2i_TS_REQ_fp(FILE *fp, TS_REQ **a); +int i2d_TS_REQ_fp(FILE *fp, TS_REQ *a); +#endif +TS_REQ *d2i_TS_REQ_bio(BIO *fp, TS_REQ **a); +int i2d_TS_REQ_bio(BIO *fp, TS_REQ *a); + +TS_MSG_IMPRINT *TS_MSG_IMPRINT_new(void); +void TS_MSG_IMPRINT_free(TS_MSG_IMPRINT *a); +int i2d_TS_MSG_IMPRINT(const TS_MSG_IMPRINT *a, unsigned char **pp); +TS_MSG_IMPRINT *d2i_TS_MSG_IMPRINT(TS_MSG_IMPRINT **a, + const unsigned char **pp, long length); + +TS_MSG_IMPRINT *TS_MSG_IMPRINT_dup(TS_MSG_IMPRINT *a); + +#ifndef OPENSSL_NO_STDIO +TS_MSG_IMPRINT *d2i_TS_MSG_IMPRINT_fp(FILE *fp, TS_MSG_IMPRINT **a); +int i2d_TS_MSG_IMPRINT_fp(FILE *fp, TS_MSG_IMPRINT *a); +#endif +TS_MSG_IMPRINT *d2i_TS_MSG_IMPRINT_bio(BIO *bio, TS_MSG_IMPRINT **a); +int i2d_TS_MSG_IMPRINT_bio(BIO *bio, TS_MSG_IMPRINT *a); + +TS_RESP *TS_RESP_new(void); +void TS_RESP_free(TS_RESP *a); +int i2d_TS_RESP(const TS_RESP *a, unsigned char **pp); +TS_RESP *d2i_TS_RESP(TS_RESP **a, const unsigned char **pp, long length); +TS_TST_INFO *PKCS7_to_TS_TST_INFO(PKCS7 *token); +TS_RESP *TS_RESP_dup(TS_RESP *a); + +#ifndef OPENSSL_NO_STDIO +TS_RESP *d2i_TS_RESP_fp(FILE *fp, TS_RESP **a); +int i2d_TS_RESP_fp(FILE *fp, TS_RESP *a); +#endif +TS_RESP *d2i_TS_RESP_bio(BIO *bio, TS_RESP **a); +int i2d_TS_RESP_bio(BIO *bio, TS_RESP *a); + +TS_STATUS_INFO *TS_STATUS_INFO_new(void); +void TS_STATUS_INFO_free(TS_STATUS_INFO *a); +int i2d_TS_STATUS_INFO(const TS_STATUS_INFO *a, unsigned char **pp); +TS_STATUS_INFO *d2i_TS_STATUS_INFO(TS_STATUS_INFO **a, + const unsigned char **pp, long length); +TS_STATUS_INFO *TS_STATUS_INFO_dup(TS_STATUS_INFO *a); + +TS_TST_INFO *TS_TST_INFO_new(void); +void TS_TST_INFO_free(TS_TST_INFO *a); +int i2d_TS_TST_INFO(const TS_TST_INFO *a, unsigned char **pp); +TS_TST_INFO *d2i_TS_TST_INFO(TS_TST_INFO **a, const unsigned char **pp, + long length); +TS_TST_INFO *TS_TST_INFO_dup(TS_TST_INFO *a); + +#ifndef OPENSSL_NO_STDIO +TS_TST_INFO *d2i_TS_TST_INFO_fp(FILE *fp, TS_TST_INFO **a); +int i2d_TS_TST_INFO_fp(FILE *fp, TS_TST_INFO *a); +#endif +TS_TST_INFO *d2i_TS_TST_INFO_bio(BIO *bio, TS_TST_INFO **a); +int i2d_TS_TST_INFO_bio(BIO *bio, TS_TST_INFO *a); + +TS_ACCURACY *TS_ACCURACY_new(void); +void TS_ACCURACY_free(TS_ACCURACY *a); +int i2d_TS_ACCURACY(const TS_ACCURACY *a, unsigned char **pp); +TS_ACCURACY *d2i_TS_ACCURACY(TS_ACCURACY **a, const unsigned char **pp, + long length); +TS_ACCURACY *TS_ACCURACY_dup(TS_ACCURACY *a); + +ESS_ISSUER_SERIAL *ESS_ISSUER_SERIAL_new(void); +void ESS_ISSUER_SERIAL_free(ESS_ISSUER_SERIAL *a); +int i2d_ESS_ISSUER_SERIAL(const ESS_ISSUER_SERIAL *a, unsigned char **pp); +ESS_ISSUER_SERIAL *d2i_ESS_ISSUER_SERIAL(ESS_ISSUER_SERIAL **a, + const unsigned char **pp, + long length); +ESS_ISSUER_SERIAL *ESS_ISSUER_SERIAL_dup(ESS_ISSUER_SERIAL *a); + +ESS_CERT_ID *ESS_CERT_ID_new(void); +void ESS_CERT_ID_free(ESS_CERT_ID *a); +int i2d_ESS_CERT_ID(const ESS_CERT_ID *a, unsigned char **pp); +ESS_CERT_ID *d2i_ESS_CERT_ID(ESS_CERT_ID **a, const unsigned char **pp, + long length); +ESS_CERT_ID *ESS_CERT_ID_dup(ESS_CERT_ID *a); + +ESS_SIGNING_CERT *ESS_SIGNING_CERT_new(void); +void ESS_SIGNING_CERT_free(ESS_SIGNING_CERT *a); +int i2d_ESS_SIGNING_CERT(const ESS_SIGNING_CERT *a, unsigned char **pp); +ESS_SIGNING_CERT *d2i_ESS_SIGNING_CERT(ESS_SIGNING_CERT **a, + const unsigned char **pp, long length); +ESS_SIGNING_CERT *ESS_SIGNING_CERT_dup(ESS_SIGNING_CERT *a); + +ESS_CERT_ID_V2 *ESS_CERT_ID_V2_new(void); +void ESS_CERT_ID_V2_free(ESS_CERT_ID_V2 *a); +int i2d_ESS_CERT_ID_V2(const ESS_CERT_ID_V2 *a, unsigned char **pp); +ESS_CERT_ID_V2 *d2i_ESS_CERT_ID_V2(ESS_CERT_ID_V2 **a, + const unsigned char **pp, long length); +ESS_CERT_ID_V2 *ESS_CERT_ID_V2_dup(ESS_CERT_ID_V2 *a); + +ESS_SIGNING_CERT_V2 *ESS_SIGNING_CERT_V2_new(void); +void ESS_SIGNING_CERT_V2_free(ESS_SIGNING_CERT_V2 *a); +int i2d_ESS_SIGNING_CERT_V2(const ESS_SIGNING_CERT_V2 *a, unsigned char **pp); +ESS_SIGNING_CERT_V2 *d2i_ESS_SIGNING_CERT_V2(ESS_SIGNING_CERT_V2 **a, + const unsigned char **pp, + long length); +ESS_SIGNING_CERT_V2 *ESS_SIGNING_CERT_V2_dup(ESS_SIGNING_CERT_V2 *a); + +int TS_REQ_set_version(TS_REQ *a, long version); +long TS_REQ_get_version(const TS_REQ *a); + +int TS_STATUS_INFO_set_status(TS_STATUS_INFO *a, int i); +const ASN1_INTEGER *TS_STATUS_INFO_get0_status(const TS_STATUS_INFO *a); + +const STACK_OF(ASN1_UTF8STRING) * +TS_STATUS_INFO_get0_text(const TS_STATUS_INFO *a); + +const ASN1_BIT_STRING * +TS_STATUS_INFO_get0_failure_info(const TS_STATUS_INFO *a); + +int TS_REQ_set_msg_imprint(TS_REQ *a, TS_MSG_IMPRINT *msg_imprint); +TS_MSG_IMPRINT *TS_REQ_get_msg_imprint(TS_REQ *a); + +int TS_MSG_IMPRINT_set_algo(TS_MSG_IMPRINT *a, X509_ALGOR *alg); +X509_ALGOR *TS_MSG_IMPRINT_get_algo(TS_MSG_IMPRINT *a); + +int TS_MSG_IMPRINT_set_msg(TS_MSG_IMPRINT *a, unsigned char *d, int len); +ASN1_OCTET_STRING *TS_MSG_IMPRINT_get_msg(TS_MSG_IMPRINT *a); + +int TS_REQ_set_policy_id(TS_REQ *a, const ASN1_OBJECT *policy); +ASN1_OBJECT *TS_REQ_get_policy_id(TS_REQ *a); + +int TS_REQ_set_nonce(TS_REQ *a, const ASN1_INTEGER *nonce); +const ASN1_INTEGER *TS_REQ_get_nonce(const TS_REQ *a); + +int TS_REQ_set_cert_req(TS_REQ *a, int cert_req); +int TS_REQ_get_cert_req(const TS_REQ *a); + +STACK_OF(X509_EXTENSION) *TS_REQ_get_exts(TS_REQ *a); +void TS_REQ_ext_free(TS_REQ *a); +int TS_REQ_get_ext_count(TS_REQ *a); +int TS_REQ_get_ext_by_NID(TS_REQ *a, int nid, int lastpos); +int TS_REQ_get_ext_by_OBJ(TS_REQ *a, const ASN1_OBJECT *obj, int lastpos); +int TS_REQ_get_ext_by_critical(TS_REQ *a, int crit, int lastpos); +X509_EXTENSION *TS_REQ_get_ext(TS_REQ *a, int loc); +X509_EXTENSION *TS_REQ_delete_ext(TS_REQ *a, int loc); +int TS_REQ_add_ext(TS_REQ *a, X509_EXTENSION *ex, int loc); +void *TS_REQ_get_ext_d2i(TS_REQ *a, int nid, int *crit, int *idx); + +/* Function declarations for TS_REQ defined in ts/ts_req_print.c */ + +int TS_REQ_print_bio(BIO *bio, TS_REQ *a); + +/* Function declarations for TS_RESP defined in ts/ts_resp_utils.c */ + +int TS_RESP_set_status_info(TS_RESP *a, TS_STATUS_INFO *info); +TS_STATUS_INFO *TS_RESP_get_status_info(TS_RESP *a); + +/* Caller loses ownership of PKCS7 and TS_TST_INFO objects. */ +void TS_RESP_set_tst_info(TS_RESP *a, PKCS7 *p7, TS_TST_INFO *tst_info); +PKCS7 *TS_RESP_get_token(TS_RESP *a); +TS_TST_INFO *TS_RESP_get_tst_info(TS_RESP *a); + +int TS_TST_INFO_set_version(TS_TST_INFO *a, long version); +long TS_TST_INFO_get_version(const TS_TST_INFO *a); + +int TS_TST_INFO_set_policy_id(TS_TST_INFO *a, ASN1_OBJECT *policy_id); +ASN1_OBJECT *TS_TST_INFO_get_policy_id(TS_TST_INFO *a); + +int TS_TST_INFO_set_msg_imprint(TS_TST_INFO *a, TS_MSG_IMPRINT *msg_imprint); +TS_MSG_IMPRINT *TS_TST_INFO_get_msg_imprint(TS_TST_INFO *a); + +int TS_TST_INFO_set_serial(TS_TST_INFO *a, const ASN1_INTEGER *serial); +const ASN1_INTEGER *TS_TST_INFO_get_serial(const TS_TST_INFO *a); + +int TS_TST_INFO_set_time(TS_TST_INFO *a, const ASN1_GENERALIZEDTIME *gtime); +const ASN1_GENERALIZEDTIME *TS_TST_INFO_get_time(const TS_TST_INFO *a); + +int TS_TST_INFO_set_accuracy(TS_TST_INFO *a, TS_ACCURACY *accuracy); +TS_ACCURACY *TS_TST_INFO_get_accuracy(TS_TST_INFO *a); + +int TS_ACCURACY_set_seconds(TS_ACCURACY *a, const ASN1_INTEGER *seconds); +const ASN1_INTEGER *TS_ACCURACY_get_seconds(const TS_ACCURACY *a); + +int TS_ACCURACY_set_millis(TS_ACCURACY *a, const ASN1_INTEGER *millis); +const ASN1_INTEGER *TS_ACCURACY_get_millis(const TS_ACCURACY *a); + +int TS_ACCURACY_set_micros(TS_ACCURACY *a, const ASN1_INTEGER *micros); +const ASN1_INTEGER *TS_ACCURACY_get_micros(const TS_ACCURACY *a); + +int TS_TST_INFO_set_ordering(TS_TST_INFO *a, int ordering); +int TS_TST_INFO_get_ordering(const TS_TST_INFO *a); + +int TS_TST_INFO_set_nonce(TS_TST_INFO *a, const ASN1_INTEGER *nonce); +const ASN1_INTEGER *TS_TST_INFO_get_nonce(const TS_TST_INFO *a); + +int TS_TST_INFO_set_tsa(TS_TST_INFO *a, GENERAL_NAME *tsa); +GENERAL_NAME *TS_TST_INFO_get_tsa(TS_TST_INFO *a); + +STACK_OF(X509_EXTENSION) *TS_TST_INFO_get_exts(TS_TST_INFO *a); +void TS_TST_INFO_ext_free(TS_TST_INFO *a); +int TS_TST_INFO_get_ext_count(TS_TST_INFO *a); +int TS_TST_INFO_get_ext_by_NID(TS_TST_INFO *a, int nid, int lastpos); +int TS_TST_INFO_get_ext_by_OBJ(TS_TST_INFO *a, const ASN1_OBJECT *obj, + int lastpos); +int TS_TST_INFO_get_ext_by_critical(TS_TST_INFO *a, int crit, int lastpos); +X509_EXTENSION *TS_TST_INFO_get_ext(TS_TST_INFO *a, int loc); +X509_EXTENSION *TS_TST_INFO_delete_ext(TS_TST_INFO *a, int loc); +int TS_TST_INFO_add_ext(TS_TST_INFO *a, X509_EXTENSION *ex, int loc); +void *TS_TST_INFO_get_ext_d2i(TS_TST_INFO *a, int nid, int *crit, int *idx); + +/* + * Declarations related to response generation, defined in ts/ts_resp_sign.c. + */ + +/* Optional flags for response generation. */ + +/* Don't include the TSA name in response. */ +# define TS_TSA_NAME 0x01 + +/* Set ordering to true in response. */ +# define TS_ORDERING 0x02 + +/* + * Include the signer certificate and the other specified certificates in + * the ESS signing certificate attribute beside the PKCS7 signed data. + * Only the signer certificates is included by default. + */ +# define TS_ESS_CERT_ID_CHAIN 0x04 + +/* Forward declaration. */ +struct TS_resp_ctx; + +/* This must return a unique number less than 160 bits long. */ +typedef ASN1_INTEGER *(*TS_serial_cb) (struct TS_resp_ctx *, void *); + +/* + * This must return the seconds and microseconds since Jan 1, 1970 in the sec + * and usec variables allocated by the caller. Return non-zero for success + * and zero for failure. + */ +typedef int (*TS_time_cb) (struct TS_resp_ctx *, void *, long *sec, + long *usec); + +/* + * This must process the given extension. It can modify the TS_TST_INFO + * object of the context. Return values: !0 (processed), 0 (error, it must + * set the status info/failure info of the response). + */ +typedef int (*TS_extension_cb) (struct TS_resp_ctx *, X509_EXTENSION *, + void *); + +typedef struct TS_resp_ctx TS_RESP_CTX; + +DEFINE_STACK_OF_CONST(EVP_MD) + +/* Creates a response context that can be used for generating responses. */ +TS_RESP_CTX *TS_RESP_CTX_new(void); +void TS_RESP_CTX_free(TS_RESP_CTX *ctx); + +/* This parameter must be set. */ +int TS_RESP_CTX_set_signer_cert(TS_RESP_CTX *ctx, X509 *signer); + +/* This parameter must be set. */ +int TS_RESP_CTX_set_signer_key(TS_RESP_CTX *ctx, EVP_PKEY *key); + +int TS_RESP_CTX_set_signer_digest(TS_RESP_CTX *ctx, + const EVP_MD *signer_digest); +int TS_RESP_CTX_set_ess_cert_id_digest(TS_RESP_CTX *ctx, const EVP_MD *md); + +/* This parameter must be set. */ +int TS_RESP_CTX_set_def_policy(TS_RESP_CTX *ctx, const ASN1_OBJECT *def_policy); + +/* No additional certs are included in the response by default. */ +int TS_RESP_CTX_set_certs(TS_RESP_CTX *ctx, STACK_OF(X509) *certs); + +/* + * Adds a new acceptable policy, only the default policy is accepted by + * default. + */ +int TS_RESP_CTX_add_policy(TS_RESP_CTX *ctx, const ASN1_OBJECT *policy); + +/* + * Adds a new acceptable message digest. Note that no message digests are + * accepted by default. The md argument is shared with the caller. + */ +int TS_RESP_CTX_add_md(TS_RESP_CTX *ctx, const EVP_MD *md); + +/* Accuracy is not included by default. */ +int TS_RESP_CTX_set_accuracy(TS_RESP_CTX *ctx, + int secs, int millis, int micros); + +/* + * Clock precision digits, i.e. the number of decimal digits: '0' means sec, + * '3' msec, '6' usec, and so on. Default is 0. + */ +int TS_RESP_CTX_set_clock_precision_digits(TS_RESP_CTX *ctx, + unsigned clock_precision_digits); +/* At most we accept usec precision. */ +# define TS_MAX_CLOCK_PRECISION_DIGITS 6 + +/* Maximum status message length */ +# define TS_MAX_STATUS_LENGTH (1024 * 1024) + +/* No flags are set by default. */ +void TS_RESP_CTX_add_flags(TS_RESP_CTX *ctx, int flags); + +/* Default callback always returns a constant. */ +void TS_RESP_CTX_set_serial_cb(TS_RESP_CTX *ctx, TS_serial_cb cb, void *data); + +/* Default callback uses the gettimeofday() and gmtime() system calls. */ +void TS_RESP_CTX_set_time_cb(TS_RESP_CTX *ctx, TS_time_cb cb, void *data); + +/* + * Default callback rejects all extensions. The extension callback is called + * when the TS_TST_INFO object is already set up and not signed yet. + */ +/* FIXME: extension handling is not tested yet. */ +void TS_RESP_CTX_set_extension_cb(TS_RESP_CTX *ctx, + TS_extension_cb cb, void *data); + +/* The following methods can be used in the callbacks. */ +int TS_RESP_CTX_set_status_info(TS_RESP_CTX *ctx, + int status, const char *text); + +/* Sets the status info only if it is still TS_STATUS_GRANTED. */ +int TS_RESP_CTX_set_status_info_cond(TS_RESP_CTX *ctx, + int status, const char *text); + +int TS_RESP_CTX_add_failure_info(TS_RESP_CTX *ctx, int failure); + +/* The get methods below can be used in the extension callback. */ +TS_REQ *TS_RESP_CTX_get_request(TS_RESP_CTX *ctx); + +TS_TST_INFO *TS_RESP_CTX_get_tst_info(TS_RESP_CTX *ctx); + +/* + * Creates the signed TS_TST_INFO and puts it in TS_RESP. + * In case of errors it sets the status info properly. + * Returns NULL only in case of memory allocation/fatal error. + */ +TS_RESP *TS_RESP_create_response(TS_RESP_CTX *ctx, BIO *req_bio); + +/* + * Declarations related to response verification, + * they are defined in ts/ts_resp_verify.c. + */ + +int TS_RESP_verify_signature(PKCS7 *token, STACK_OF(X509) *certs, + X509_STORE *store, X509 **signer_out); + +/* Context structure for the generic verify method. */ + +/* Verify the signer's certificate and the signature of the response. */ +# define TS_VFY_SIGNATURE (1u << 0) +/* Verify the version number of the response. */ +# define TS_VFY_VERSION (1u << 1) +/* Verify if the policy supplied by the user matches the policy of the TSA. */ +# define TS_VFY_POLICY (1u << 2) +/* + * Verify the message imprint provided by the user. This flag should not be + * specified with TS_VFY_DATA. + */ +# define TS_VFY_IMPRINT (1u << 3) +/* + * Verify the message imprint computed by the verify method from the user + * provided data and the MD algorithm of the response. This flag should not + * be specified with TS_VFY_IMPRINT. + */ +# define TS_VFY_DATA (1u << 4) +/* Verify the nonce value. */ +# define TS_VFY_NONCE (1u << 5) +/* Verify if the TSA name field matches the signer certificate. */ +# define TS_VFY_SIGNER (1u << 6) +/* Verify if the TSA name field equals to the user provided name. */ +# define TS_VFY_TSA_NAME (1u << 7) + +/* You can use the following convenience constants. */ +# define TS_VFY_ALL_IMPRINT (TS_VFY_SIGNATURE \ + | TS_VFY_VERSION \ + | TS_VFY_POLICY \ + | TS_VFY_IMPRINT \ + | TS_VFY_NONCE \ + | TS_VFY_SIGNER \ + | TS_VFY_TSA_NAME) +# define TS_VFY_ALL_DATA (TS_VFY_SIGNATURE \ + | TS_VFY_VERSION \ + | TS_VFY_POLICY \ + | TS_VFY_DATA \ + | TS_VFY_NONCE \ + | TS_VFY_SIGNER \ + | TS_VFY_TSA_NAME) + +typedef struct TS_verify_ctx TS_VERIFY_CTX; + +int TS_RESP_verify_response(TS_VERIFY_CTX *ctx, TS_RESP *response); +int TS_RESP_verify_token(TS_VERIFY_CTX *ctx, PKCS7 *token); + +/* + * Declarations related to response verification context, + */ +TS_VERIFY_CTX *TS_VERIFY_CTX_new(void); +void TS_VERIFY_CTX_init(TS_VERIFY_CTX *ctx); +void TS_VERIFY_CTX_free(TS_VERIFY_CTX *ctx); +void TS_VERIFY_CTX_cleanup(TS_VERIFY_CTX *ctx); +int TS_VERIFY_CTX_set_flags(TS_VERIFY_CTX *ctx, int f); +int TS_VERIFY_CTX_add_flags(TS_VERIFY_CTX *ctx, int f); +BIO *TS_VERIFY_CTX_set_data(TS_VERIFY_CTX *ctx, BIO *b); +unsigned char *TS_VERIFY_CTX_set_imprint(TS_VERIFY_CTX *ctx, + unsigned char *hexstr, long len); +X509_STORE *TS_VERIFY_CTX_set_store(TS_VERIFY_CTX *ctx, X509_STORE *s); +STACK_OF(X509) *TS_VERIFY_CTS_set_certs(TS_VERIFY_CTX *ctx, STACK_OF(X509) *certs); + +/*- + * If ctx is NULL, it allocates and returns a new object, otherwise + * it returns ctx. It initialises all the members as follows: + * flags = TS_VFY_ALL_IMPRINT & ~(TS_VFY_TSA_NAME | TS_VFY_SIGNATURE) + * certs = NULL + * store = NULL + * policy = policy from the request or NULL if absent (in this case + * TS_VFY_POLICY is cleared from flags as well) + * md_alg = MD algorithm from request + * imprint, imprint_len = imprint from request + * data = NULL + * nonce, nonce_len = nonce from the request or NULL if absent (in this case + * TS_VFY_NONCE is cleared from flags as well) + * tsa_name = NULL + * Important: after calling this method TS_VFY_SIGNATURE should be added! + */ +TS_VERIFY_CTX *TS_REQ_to_TS_VERIFY_CTX(TS_REQ *req, TS_VERIFY_CTX *ctx); + +/* Function declarations for TS_RESP defined in ts/ts_resp_print.c */ + +int TS_RESP_print_bio(BIO *bio, TS_RESP *a); +int TS_STATUS_INFO_print_bio(BIO *bio, TS_STATUS_INFO *a); +int TS_TST_INFO_print_bio(BIO *bio, TS_TST_INFO *a); + +/* Common utility functions defined in ts/ts_lib.c */ + +int TS_ASN1_INTEGER_print_bio(BIO *bio, const ASN1_INTEGER *num); +int TS_OBJ_print_bio(BIO *bio, const ASN1_OBJECT *obj); +int TS_ext_print_bio(BIO *bio, const STACK_OF(X509_EXTENSION) *extensions); +int TS_X509_ALGOR_print_bio(BIO *bio, const X509_ALGOR *alg); +int TS_MSG_IMPRINT_print_bio(BIO *bio, TS_MSG_IMPRINT *msg); + +/* + * Function declarations for handling configuration options, defined in + * ts/ts_conf.c + */ + +X509 *TS_CONF_load_cert(const char *file); +STACK_OF(X509) *TS_CONF_load_certs(const char *file); +EVP_PKEY *TS_CONF_load_key(const char *file, const char *pass); +const char *TS_CONF_get_tsa_section(CONF *conf, const char *section); +int TS_CONF_set_serial(CONF *conf, const char *section, TS_serial_cb cb, + TS_RESP_CTX *ctx); +#ifndef OPENSSL_NO_ENGINE +int TS_CONF_set_crypto_device(CONF *conf, const char *section, + const char *device); +int TS_CONF_set_default_engine(const char *name); +#endif +int TS_CONF_set_signer_cert(CONF *conf, const char *section, + const char *cert, TS_RESP_CTX *ctx); +int TS_CONF_set_certs(CONF *conf, const char *section, const char *certs, + TS_RESP_CTX *ctx); +int TS_CONF_set_signer_key(CONF *conf, const char *section, + const char *key, const char *pass, + TS_RESP_CTX *ctx); +int TS_CONF_set_signer_digest(CONF *conf, const char *section, + const char *md, TS_RESP_CTX *ctx); +int TS_CONF_set_def_policy(CONF *conf, const char *section, + const char *policy, TS_RESP_CTX *ctx); +int TS_CONF_set_policies(CONF *conf, const char *section, TS_RESP_CTX *ctx); +int TS_CONF_set_digests(CONF *conf, const char *section, TS_RESP_CTX *ctx); +int TS_CONF_set_accuracy(CONF *conf, const char *section, TS_RESP_CTX *ctx); +int TS_CONF_set_clock_precision_digits(CONF *conf, const char *section, + TS_RESP_CTX *ctx); +int TS_CONF_set_ordering(CONF *conf, const char *section, TS_RESP_CTX *ctx); +int TS_CONF_set_tsa_name(CONF *conf, const char *section, TS_RESP_CTX *ctx); +int TS_CONF_set_ess_cert_id_chain(CONF *conf, const char *section, + TS_RESP_CTX *ctx); +int TS_CONF_set_ess_cert_id_digest(CONF *conf, const char *section, + TS_RESP_CTX *ctx); + +# ifdef __cplusplus +} +# endif +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/tserr.h b/third_party/openssl/uos/sw_64/include/openssl/tserr.h new file mode 100644 index 00000000..07f23339 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/tserr.h @@ -0,0 +1,132 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_TSERR_H +# define HEADER_TSERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# include + +# ifndef OPENSSL_NO_TS + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_TS_strings(void); + +/* + * TS function codes. + */ +# define TS_F_DEF_SERIAL_CB 110 +# define TS_F_DEF_TIME_CB 111 +# define TS_F_ESS_ADD_SIGNING_CERT 112 +# define TS_F_ESS_ADD_SIGNING_CERT_V2 147 +# define TS_F_ESS_CERT_ID_NEW_INIT 113 +# define TS_F_ESS_CERT_ID_V2_NEW_INIT 156 +# define TS_F_ESS_SIGNING_CERT_NEW_INIT 114 +# define TS_F_ESS_SIGNING_CERT_V2_NEW_INIT 157 +# define TS_F_INT_TS_RESP_VERIFY_TOKEN 149 +# define TS_F_PKCS7_TO_TS_TST_INFO 148 +# define TS_F_TS_ACCURACY_SET_MICROS 115 +# define TS_F_TS_ACCURACY_SET_MILLIS 116 +# define TS_F_TS_ACCURACY_SET_SECONDS 117 +# define TS_F_TS_CHECK_IMPRINTS 100 +# define TS_F_TS_CHECK_NONCES 101 +# define TS_F_TS_CHECK_POLICY 102 +# define TS_F_TS_CHECK_SIGNING_CERTS 103 +# define TS_F_TS_CHECK_STATUS_INFO 104 +# define TS_F_TS_COMPUTE_IMPRINT 145 +# define TS_F_TS_CONF_INVALID 151 +# define TS_F_TS_CONF_LOAD_CERT 153 +# define TS_F_TS_CONF_LOAD_CERTS 154 +# define TS_F_TS_CONF_LOAD_KEY 155 +# define TS_F_TS_CONF_LOOKUP_FAIL 152 +# define TS_F_TS_CONF_SET_DEFAULT_ENGINE 146 +# define TS_F_TS_GET_STATUS_TEXT 105 +# define TS_F_TS_MSG_IMPRINT_SET_ALGO 118 +# define TS_F_TS_REQ_SET_MSG_IMPRINT 119 +# define TS_F_TS_REQ_SET_NONCE 120 +# define TS_F_TS_REQ_SET_POLICY_ID 121 +# define TS_F_TS_RESP_CREATE_RESPONSE 122 +# define TS_F_TS_RESP_CREATE_TST_INFO 123 +# define TS_F_TS_RESP_CTX_ADD_FAILURE_INFO 124 +# define TS_F_TS_RESP_CTX_ADD_MD 125 +# define TS_F_TS_RESP_CTX_ADD_POLICY 126 +# define TS_F_TS_RESP_CTX_NEW 127 +# define TS_F_TS_RESP_CTX_SET_ACCURACY 128 +# define TS_F_TS_RESP_CTX_SET_CERTS 129 +# define TS_F_TS_RESP_CTX_SET_DEF_POLICY 130 +# define TS_F_TS_RESP_CTX_SET_SIGNER_CERT 131 +# define TS_F_TS_RESP_CTX_SET_STATUS_INFO 132 +# define TS_F_TS_RESP_GET_POLICY 133 +# define TS_F_TS_RESP_SET_GENTIME_WITH_PRECISION 134 +# define TS_F_TS_RESP_SET_STATUS_INFO 135 +# define TS_F_TS_RESP_SET_TST_INFO 150 +# define TS_F_TS_RESP_SIGN 136 +# define TS_F_TS_RESP_VERIFY_SIGNATURE 106 +# define TS_F_TS_TST_INFO_SET_ACCURACY 137 +# define TS_F_TS_TST_INFO_SET_MSG_IMPRINT 138 +# define TS_F_TS_TST_INFO_SET_NONCE 139 +# define TS_F_TS_TST_INFO_SET_POLICY_ID 140 +# define TS_F_TS_TST_INFO_SET_SERIAL 141 +# define TS_F_TS_TST_INFO_SET_TIME 142 +# define TS_F_TS_TST_INFO_SET_TSA 143 +# define TS_F_TS_VERIFY 108 +# define TS_F_TS_VERIFY_CERT 109 +# define TS_F_TS_VERIFY_CTX_NEW 144 + +/* + * TS reason codes. + */ +# define TS_R_BAD_PKCS7_TYPE 132 +# define TS_R_BAD_TYPE 133 +# define TS_R_CANNOT_LOAD_CERT 137 +# define TS_R_CANNOT_LOAD_KEY 138 +# define TS_R_CERTIFICATE_VERIFY_ERROR 100 +# define TS_R_COULD_NOT_SET_ENGINE 127 +# define TS_R_COULD_NOT_SET_TIME 115 +# define TS_R_DETACHED_CONTENT 134 +# define TS_R_ESS_ADD_SIGNING_CERT_ERROR 116 +# define TS_R_ESS_ADD_SIGNING_CERT_V2_ERROR 139 +# define TS_R_ESS_SIGNING_CERTIFICATE_ERROR 101 +# define TS_R_INVALID_NULL_POINTER 102 +# define TS_R_INVALID_SIGNER_CERTIFICATE_PURPOSE 117 +# define TS_R_MESSAGE_IMPRINT_MISMATCH 103 +# define TS_R_NONCE_MISMATCH 104 +# define TS_R_NONCE_NOT_RETURNED 105 +# define TS_R_NO_CONTENT 106 +# define TS_R_NO_TIME_STAMP_TOKEN 107 +# define TS_R_PKCS7_ADD_SIGNATURE_ERROR 118 +# define TS_R_PKCS7_ADD_SIGNED_ATTR_ERROR 119 +# define TS_R_PKCS7_TO_TS_TST_INFO_FAILED 129 +# define TS_R_POLICY_MISMATCH 108 +# define TS_R_PRIVATE_KEY_DOES_NOT_MATCH_CERTIFICATE 120 +# define TS_R_RESPONSE_SETUP_ERROR 121 +# define TS_R_SIGNATURE_FAILURE 109 +# define TS_R_THERE_MUST_BE_ONE_SIGNER 110 +# define TS_R_TIME_SYSCALL_ERROR 122 +# define TS_R_TOKEN_NOT_PRESENT 130 +# define TS_R_TOKEN_PRESENT 131 +# define TS_R_TSA_NAME_MISMATCH 111 +# define TS_R_TSA_UNTRUSTED 112 +# define TS_R_TST_INFO_SETUP_ERROR 123 +# define TS_R_TS_DATASIGN 124 +# define TS_R_UNACCEPTABLE_POLICY 125 +# define TS_R_UNSUPPORTED_MD_ALGORITHM 126 +# define TS_R_UNSUPPORTED_VERSION 113 +# define TS_R_VAR_BAD_VALUE 135 +# define TS_R_VAR_LOOKUP_FAILURE 136 +# define TS_R_WRONG_CONTENT_TYPE 114 + +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/txt_db.h b/third_party/openssl/uos/sw_64/include/openssl/txt_db.h new file mode 100644 index 00000000..ec981a43 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/txt_db.h @@ -0,0 +1,57 @@ +/* + * Copyright 1995-2017 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_TXT_DB_H +# define HEADER_TXT_DB_H + +# include +# include +# include +# include + +# define DB_ERROR_OK 0 +# define DB_ERROR_MALLOC 1 +# define DB_ERROR_INDEX_CLASH 2 +# define DB_ERROR_INDEX_OUT_OF_RANGE 3 +# define DB_ERROR_NO_INDEX 4 +# define DB_ERROR_INSERT_INDEX_CLASH 5 +# define DB_ERROR_WRONG_NUM_FIELDS 6 + +#ifdef __cplusplus +extern "C" { +#endif + +typedef OPENSSL_STRING *OPENSSL_PSTRING; +DEFINE_SPECIAL_STACK_OF(OPENSSL_PSTRING, OPENSSL_STRING) + +typedef struct txt_db_st { + int num_fields; + STACK_OF(OPENSSL_PSTRING) *data; + LHASH_OF(OPENSSL_STRING) **index; + int (**qual) (OPENSSL_STRING *); + long error; + long arg1; + long arg2; + OPENSSL_STRING *arg_row; +} TXT_DB; + +TXT_DB *TXT_DB_read(BIO *in, int num); +long TXT_DB_write(BIO *out, TXT_DB *db); +int TXT_DB_create_index(TXT_DB *db, int field, int (*qual) (OPENSSL_STRING *), + OPENSSL_LH_HASHFUNC hash, OPENSSL_LH_COMPFUNC cmp); +void TXT_DB_free(TXT_DB *db); +OPENSSL_STRING *TXT_DB_get_by_index(TXT_DB *db, int idx, + OPENSSL_STRING *value); +int TXT_DB_insert(TXT_DB *db, OPENSSL_STRING *value); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/ui.h b/third_party/openssl/uos/sw_64/include/openssl/ui.h new file mode 100644 index 00000000..7c721ec8 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/ui.h @@ -0,0 +1,368 @@ +/* + * Copyright 2001-2018 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_UI_H +# define HEADER_UI_H + +# include + +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# endif +# include +# include +# include +# include + +/* For compatibility reasons, the macro OPENSSL_NO_UI is currently retained */ +# if OPENSSL_API_COMPAT < 0x10200000L +# ifdef OPENSSL_NO_UI_CONSOLE +# define OPENSSL_NO_UI +# endif +# endif + +# ifdef __cplusplus +extern "C" { +# endif + +/* + * All the following functions return -1 or NULL on error and in some cases + * (UI_process()) -2 if interrupted or in some other way cancelled. When + * everything is fine, they return 0, a positive value or a non-NULL pointer, + * all depending on their purpose. + */ + +/* Creators and destructor. */ +UI *UI_new(void); +UI *UI_new_method(const UI_METHOD *method); +void UI_free(UI *ui); + +/*- + The following functions are used to add strings to be printed and prompt + strings to prompt for data. The names are UI_{add,dup}__string + and UI_{add,dup}_input_boolean. + + UI_{add,dup}__string have the following meanings: + add add a text or prompt string. The pointers given to these + functions are used verbatim, no copying is done. + dup make a copy of the text or prompt string, then add the copy + to the collection of strings in the user interface. + + The function is a name for the functionality that the given + string shall be used for. It can be one of: + input use the string as data prompt. + verify use the string as verification prompt. This + is used to verify a previous input. + info use the string for informational output. + error use the string for error output. + Honestly, there's currently no difference between info and error for the + moment. + + UI_{add,dup}_input_boolean have the same semantics for "add" and "dup", + and are typically used when one wants to prompt for a yes/no response. + + All of the functions in this group take a UI and a prompt string. + The string input and verify addition functions also take a flag argument, + a buffer for the result to end up with, a minimum input size and a maximum + input size (the result buffer MUST be large enough to be able to contain + the maximum number of characters). Additionally, the verify addition + functions takes another buffer to compare the result against. + The boolean input functions take an action description string (which should + be safe to ignore if the expected user action is obvious, for example with + a dialog box with an OK button and a Cancel button), a string of acceptable + characters to mean OK and to mean Cancel. The two last strings are checked + to make sure they don't have common characters. Additionally, the same + flag argument as for the string input is taken, as well as a result buffer. + The result buffer is required to be at least one byte long. Depending on + the answer, the first character from the OK or the Cancel character strings + will be stored in the first byte of the result buffer. No NUL will be + added, so the result is *not* a string. + + On success, the all return an index of the added information. That index + is useful when retrieving results with UI_get0_result(). */ +int UI_add_input_string(UI *ui, const char *prompt, int flags, + char *result_buf, int minsize, int maxsize); +int UI_dup_input_string(UI *ui, const char *prompt, int flags, + char *result_buf, int minsize, int maxsize); +int UI_add_verify_string(UI *ui, const char *prompt, int flags, + char *result_buf, int minsize, int maxsize, + const char *test_buf); +int UI_dup_verify_string(UI *ui, const char *prompt, int flags, + char *result_buf, int minsize, int maxsize, + const char *test_buf); +int UI_add_input_boolean(UI *ui, const char *prompt, const char *action_desc, + const char *ok_chars, const char *cancel_chars, + int flags, char *result_buf); +int UI_dup_input_boolean(UI *ui, const char *prompt, const char *action_desc, + const char *ok_chars, const char *cancel_chars, + int flags, char *result_buf); +int UI_add_info_string(UI *ui, const char *text); +int UI_dup_info_string(UI *ui, const char *text); +int UI_add_error_string(UI *ui, const char *text); +int UI_dup_error_string(UI *ui, const char *text); + +/* These are the possible flags. They can be or'ed together. */ +/* Use to have echoing of input */ +# define UI_INPUT_FLAG_ECHO 0x01 +/* + * Use a default password. Where that password is found is completely up to + * the application, it might for example be in the user data set with + * UI_add_user_data(). It is not recommended to have more than one input in + * each UI being marked with this flag, or the application might get + * confused. + */ +# define UI_INPUT_FLAG_DEFAULT_PWD 0x02 + +/*- + * The user of these routines may want to define flags of their own. The core + * UI won't look at those, but will pass them on to the method routines. They + * must use higher bits so they don't get confused with the UI bits above. + * UI_INPUT_FLAG_USER_BASE tells which is the lowest bit to use. A good + * example of use is this: + * + * #define MY_UI_FLAG1 (0x01 << UI_INPUT_FLAG_USER_BASE) + * +*/ +# define UI_INPUT_FLAG_USER_BASE 16 + +/*- + * The following function helps construct a prompt. object_desc is a + * textual short description of the object, for example "pass phrase", + * and object_name is the name of the object (might be a card name or + * a file name. + * The returned string shall always be allocated on the heap with + * OPENSSL_malloc(), and need to be free'd with OPENSSL_free(). + * + * If the ui_method doesn't contain a pointer to a user-defined prompt + * constructor, a default string is built, looking like this: + * + * "Enter {object_desc} for {object_name}:" + * + * So, if object_desc has the value "pass phrase" and object_name has + * the value "foo.key", the resulting string is: + * + * "Enter pass phrase for foo.key:" +*/ +char *UI_construct_prompt(UI *ui_method, + const char *object_desc, const char *object_name); + +/* + * The following function is used to store a pointer to user-specific data. + * Any previous such pointer will be returned and replaced. + * + * For callback purposes, this function makes a lot more sense than using + * ex_data, since the latter requires that different parts of OpenSSL or + * applications share the same ex_data index. + * + * Note that the UI_OpenSSL() method completely ignores the user data. Other + * methods may not, however. + */ +void *UI_add_user_data(UI *ui, void *user_data); +/* + * Alternatively, this function is used to duplicate the user data. + * This uses the duplicator method function. The destroy function will + * be used to free the user data in this case. + */ +int UI_dup_user_data(UI *ui, void *user_data); +/* We need a user data retrieving function as well. */ +void *UI_get0_user_data(UI *ui); + +/* Return the result associated with a prompt given with the index i. */ +const char *UI_get0_result(UI *ui, int i); +int UI_get_result_length(UI *ui, int i); + +/* When all strings have been added, process the whole thing. */ +int UI_process(UI *ui); + +/* + * Give a user interface parameterised control commands. This can be used to + * send down an integer, a data pointer or a function pointer, as well as be + * used to get information from a UI. + */ +int UI_ctrl(UI *ui, int cmd, long i, void *p, void (*f) (void)); + +/* The commands */ +/* + * Use UI_CONTROL_PRINT_ERRORS with the value 1 to have UI_process print the + * OpenSSL error stack before printing any info or added error messages and + * before any prompting. + */ +# define UI_CTRL_PRINT_ERRORS 1 +/* + * Check if a UI_process() is possible to do again with the same instance of + * a user interface. This makes UI_ctrl() return 1 if it is redoable, and 0 + * if not. + */ +# define UI_CTRL_IS_REDOABLE 2 + +/* Some methods may use extra data */ +# define UI_set_app_data(s,arg) UI_set_ex_data(s,0,arg) +# define UI_get_app_data(s) UI_get_ex_data(s,0) + +# define UI_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_UI, l, p, newf, dupf, freef) +int UI_set_ex_data(UI *r, int idx, void *arg); +void *UI_get_ex_data(UI *r, int idx); + +/* Use specific methods instead of the built-in one */ +void UI_set_default_method(const UI_METHOD *meth); +const UI_METHOD *UI_get_default_method(void); +const UI_METHOD *UI_get_method(UI *ui); +const UI_METHOD *UI_set_method(UI *ui, const UI_METHOD *meth); + +# ifndef OPENSSL_NO_UI_CONSOLE + +/* The method with all the built-in thingies */ +UI_METHOD *UI_OpenSSL(void); + +# endif + +/* + * NULL method. Literally does nothing, but may serve as a placeholder + * to avoid internal default. + */ +const UI_METHOD *UI_null(void); + +/* ---------- For method writers ---------- */ +/*- + A method contains a number of functions that implement the low level + of the User Interface. The functions are: + + an opener This function starts a session, maybe by opening + a channel to a tty, or by opening a window. + a writer This function is called to write a given string, + maybe to the tty, maybe as a field label in a + window. + a flusher This function is called to flush everything that + has been output so far. It can be used to actually + display a dialog box after it has been built. + a reader This function is called to read a given prompt, + maybe from the tty, maybe from a field in a + window. Note that it's called with all string + structures, not only the prompt ones, so it must + check such things itself. + a closer This function closes the session, maybe by closing + the channel to the tty, or closing the window. + + All these functions are expected to return: + + 0 on error. + 1 on success. + -1 on out-of-band events, for example if some prompting has + been canceled (by pressing Ctrl-C, for example). This is + only checked when returned by the flusher or the reader. + + The way this is used, the opener is first called, then the writer for all + strings, then the flusher, then the reader for all strings and finally the + closer. Note that if you want to prompt from a terminal or other command + line interface, the best is to have the reader also write the prompts + instead of having the writer do it. If you want to prompt from a dialog + box, the writer can be used to build up the contents of the box, and the + flusher to actually display the box and run the event loop until all data + has been given, after which the reader only grabs the given data and puts + them back into the UI strings. + + All method functions take a UI as argument. Additionally, the writer and + the reader take a UI_STRING. +*/ + +/* + * The UI_STRING type is the data structure that contains all the needed info + * about a string or a prompt, including test data for a verification prompt. + */ +typedef struct ui_string_st UI_STRING; +DEFINE_STACK_OF(UI_STRING) + +/* + * The different types of strings that are currently supported. This is only + * needed by method authors. + */ +enum UI_string_types { + UIT_NONE = 0, + UIT_PROMPT, /* Prompt for a string */ + UIT_VERIFY, /* Prompt for a string and verify */ + UIT_BOOLEAN, /* Prompt for a yes/no response */ + UIT_INFO, /* Send info to the user */ + UIT_ERROR /* Send an error message to the user */ +}; + +/* Create and manipulate methods */ +UI_METHOD *UI_create_method(const char *name); +void UI_destroy_method(UI_METHOD *ui_method); +int UI_method_set_opener(UI_METHOD *method, int (*opener) (UI *ui)); +int UI_method_set_writer(UI_METHOD *method, + int (*writer) (UI *ui, UI_STRING *uis)); +int UI_method_set_flusher(UI_METHOD *method, int (*flusher) (UI *ui)); +int UI_method_set_reader(UI_METHOD *method, + int (*reader) (UI *ui, UI_STRING *uis)); +int UI_method_set_closer(UI_METHOD *method, int (*closer) (UI *ui)); +int UI_method_set_data_duplicator(UI_METHOD *method, + void *(*duplicator) (UI *ui, void *ui_data), + void (*destructor)(UI *ui, void *ui_data)); +int UI_method_set_prompt_constructor(UI_METHOD *method, + char *(*prompt_constructor) (UI *ui, + const char + *object_desc, + const char + *object_name)); +int UI_method_set_ex_data(UI_METHOD *method, int idx, void *data); +int (*UI_method_get_opener(const UI_METHOD *method)) (UI *); +int (*UI_method_get_writer(const UI_METHOD *method)) (UI *, UI_STRING *); +int (*UI_method_get_flusher(const UI_METHOD *method)) (UI *); +int (*UI_method_get_reader(const UI_METHOD *method)) (UI *, UI_STRING *); +int (*UI_method_get_closer(const UI_METHOD *method)) (UI *); +char *(*UI_method_get_prompt_constructor(const UI_METHOD *method)) + (UI *, const char *, const char *); +void *(*UI_method_get_data_duplicator(const UI_METHOD *method)) (UI *, void *); +void (*UI_method_get_data_destructor(const UI_METHOD *method)) (UI *, void *); +const void *UI_method_get_ex_data(const UI_METHOD *method, int idx); + +/* + * The following functions are helpers for method writers to access relevant + * data from a UI_STRING. + */ + +/* Return type of the UI_STRING */ +enum UI_string_types UI_get_string_type(UI_STRING *uis); +/* Return input flags of the UI_STRING */ +int UI_get_input_flags(UI_STRING *uis); +/* Return the actual string to output (the prompt, info or error) */ +const char *UI_get0_output_string(UI_STRING *uis); +/* + * Return the optional action string to output (the boolean prompt + * instruction) + */ +const char *UI_get0_action_string(UI_STRING *uis); +/* Return the result of a prompt */ +const char *UI_get0_result_string(UI_STRING *uis); +int UI_get_result_string_length(UI_STRING *uis); +/* + * Return the string to test the result against. Only useful with verifies. + */ +const char *UI_get0_test_string(UI_STRING *uis); +/* Return the required minimum size of the result */ +int UI_get_result_minsize(UI_STRING *uis); +/* Return the required maximum size of the result */ +int UI_get_result_maxsize(UI_STRING *uis); +/* Set the result of a UI_STRING. */ +int UI_set_result(UI *ui, UI_STRING *uis, const char *result); +int UI_set_result_ex(UI *ui, UI_STRING *uis, const char *result, int len); + +/* A couple of popular utility functions */ +int UI_UTIL_read_pw_string(char *buf, int length, const char *prompt, + int verify); +int UI_UTIL_read_pw(char *buf, char *buff, int size, const char *prompt, + int verify); +UI_METHOD *UI_UTIL_wrap_read_pem_callback(pem_password_cb *cb, int rwflag); + + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/uierr.h b/third_party/openssl/uos/sw_64/include/openssl/uierr.h new file mode 100644 index 00000000..bd68864d --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/uierr.h @@ -0,0 +1,65 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_UIERR_H +# define HEADER_UIERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_UI_strings(void); + +/* + * UI function codes. + */ +# define UI_F_CLOSE_CONSOLE 115 +# define UI_F_ECHO_CONSOLE 116 +# define UI_F_GENERAL_ALLOCATE_BOOLEAN 108 +# define UI_F_GENERAL_ALLOCATE_PROMPT 109 +# define UI_F_NOECHO_CONSOLE 117 +# define UI_F_OPEN_CONSOLE 114 +# define UI_F_UI_CONSTRUCT_PROMPT 121 +# define UI_F_UI_CREATE_METHOD 112 +# define UI_F_UI_CTRL 111 +# define UI_F_UI_DUP_ERROR_STRING 101 +# define UI_F_UI_DUP_INFO_STRING 102 +# define UI_F_UI_DUP_INPUT_BOOLEAN 110 +# define UI_F_UI_DUP_INPUT_STRING 103 +# define UI_F_UI_DUP_USER_DATA 118 +# define UI_F_UI_DUP_VERIFY_STRING 106 +# define UI_F_UI_GET0_RESULT 107 +# define UI_F_UI_GET_RESULT_LENGTH 119 +# define UI_F_UI_NEW_METHOD 104 +# define UI_F_UI_PROCESS 113 +# define UI_F_UI_SET_RESULT 105 +# define UI_F_UI_SET_RESULT_EX 120 + +/* + * UI reason codes. + */ +# define UI_R_COMMON_OK_AND_CANCEL_CHARACTERS 104 +# define UI_R_INDEX_TOO_LARGE 102 +# define UI_R_INDEX_TOO_SMALL 103 +# define UI_R_NO_RESULT_BUFFER 105 +# define UI_R_PROCESSING_ERROR 107 +# define UI_R_RESULT_TOO_LARGE 100 +# define UI_R_RESULT_TOO_SMALL 101 +# define UI_R_SYSASSIGN_ERROR 109 +# define UI_R_SYSDASSGN_ERROR 110 +# define UI_R_SYSQIOW_ERROR 111 +# define UI_R_UNKNOWN_CONTROL_COMMAND 106 +# define UI_R_UNKNOWN_TTYGET_ERRNO_VALUE 108 +# define UI_R_USER_DATA_DUPLICATION_UNSUPPORTED 112 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/whrlpool.h b/third_party/openssl/uos/sw_64/include/openssl/whrlpool.h new file mode 100644 index 00000000..20ea3503 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/whrlpool.h @@ -0,0 +1,48 @@ +/* + * Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_WHRLPOOL_H +# define HEADER_WHRLPOOL_H + +#include + +# ifndef OPENSSL_NO_WHIRLPOOL +# include +# include +# ifdef __cplusplus +extern "C" { +# endif + +# define WHIRLPOOL_DIGEST_LENGTH (512/8) +# define WHIRLPOOL_BBLOCK 512 +# define WHIRLPOOL_COUNTER (256/8) + +typedef struct { + union { + unsigned char c[WHIRLPOOL_DIGEST_LENGTH]; + /* double q is here to ensure 64-bit alignment */ + double q[WHIRLPOOL_DIGEST_LENGTH / sizeof(double)]; + } H; + unsigned char data[WHIRLPOOL_BBLOCK / 8]; + unsigned int bitoff; + size_t bitlen[WHIRLPOOL_COUNTER / sizeof(size_t)]; +} WHIRLPOOL_CTX; + +int WHIRLPOOL_Init(WHIRLPOOL_CTX *c); +int WHIRLPOOL_Update(WHIRLPOOL_CTX *c, const void *inp, size_t bytes); +void WHIRLPOOL_BitUpdate(WHIRLPOOL_CTX *c, const void *inp, size_t bits); +int WHIRLPOOL_Final(unsigned char *md, WHIRLPOOL_CTX *c); +unsigned char *WHIRLPOOL(const void *inp, size_t bytes, unsigned char *md); + +# ifdef __cplusplus +} +# endif +# endif + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/x509.h b/third_party/openssl/uos/sw_64/include/openssl/x509.h new file mode 100644 index 00000000..3ff86ec7 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/x509.h @@ -0,0 +1,1050 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_X509_H +# define HEADER_X509_H + +# include +# include +# include +# include +# include +# include +# include +# include +# include + +# if OPENSSL_API_COMPAT < 0x10100000L +# include +# include +# include +# endif + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Flags for X509_get_signature_info() */ +/* Signature info is valid */ +# define X509_SIG_INFO_VALID 0x1 +/* Signature is suitable for TLS use */ +# define X509_SIG_INFO_TLS 0x2 + +# define X509_FILETYPE_PEM 1 +# define X509_FILETYPE_ASN1 2 +# define X509_FILETYPE_DEFAULT 3 + +# define X509v3_KU_DIGITAL_SIGNATURE 0x0080 +# define X509v3_KU_NON_REPUDIATION 0x0040 +# define X509v3_KU_KEY_ENCIPHERMENT 0x0020 +# define X509v3_KU_DATA_ENCIPHERMENT 0x0010 +# define X509v3_KU_KEY_AGREEMENT 0x0008 +# define X509v3_KU_KEY_CERT_SIGN 0x0004 +# define X509v3_KU_CRL_SIGN 0x0002 +# define X509v3_KU_ENCIPHER_ONLY 0x0001 +# define X509v3_KU_DECIPHER_ONLY 0x8000 +# define X509v3_KU_UNDEF 0xffff + +struct X509_algor_st { + ASN1_OBJECT *algorithm; + ASN1_TYPE *parameter; +} /* X509_ALGOR */ ; + +typedef STACK_OF(X509_ALGOR) X509_ALGORS; + +typedef struct X509_val_st { + ASN1_TIME *notBefore; + ASN1_TIME *notAfter; +} X509_VAL; + +typedef struct X509_sig_st X509_SIG; + +typedef struct X509_name_entry_st X509_NAME_ENTRY; + +DEFINE_STACK_OF(X509_NAME_ENTRY) + +DEFINE_STACK_OF(X509_NAME) + +# define X509_EX_V_NETSCAPE_HACK 0x8000 +# define X509_EX_V_INIT 0x0001 +typedef struct X509_extension_st X509_EXTENSION; + +typedef STACK_OF(X509_EXTENSION) X509_EXTENSIONS; + +DEFINE_STACK_OF(X509_EXTENSION) + +typedef struct x509_attributes_st X509_ATTRIBUTE; + +DEFINE_STACK_OF(X509_ATTRIBUTE) + +typedef struct X509_req_info_st X509_REQ_INFO; + +typedef struct X509_req_st X509_REQ; + +typedef struct x509_cert_aux_st X509_CERT_AUX; + +typedef struct x509_cinf_st X509_CINF; + +DEFINE_STACK_OF(X509) + +/* This is used for a table of trust checking functions */ + +typedef struct x509_trust_st { + int trust; + int flags; + int (*check_trust) (struct x509_trust_st *, X509 *, int); + char *name; + int arg1; + void *arg2; +} X509_TRUST; + +DEFINE_STACK_OF(X509_TRUST) + +/* standard trust ids */ + +# define X509_TRUST_DEFAULT 0 /* Only valid in purpose settings */ + +# define X509_TRUST_COMPAT 1 +# define X509_TRUST_SSL_CLIENT 2 +# define X509_TRUST_SSL_SERVER 3 +# define X509_TRUST_EMAIL 4 +# define X509_TRUST_OBJECT_SIGN 5 +# define X509_TRUST_OCSP_SIGN 6 +# define X509_TRUST_OCSP_REQUEST 7 +# define X509_TRUST_TSA 8 + +/* Keep these up to date! */ +# define X509_TRUST_MIN 1 +# define X509_TRUST_MAX 8 + +/* trust_flags values */ +# define X509_TRUST_DYNAMIC (1U << 0) +# define X509_TRUST_DYNAMIC_NAME (1U << 1) +/* No compat trust if self-signed, preempts "DO_SS" */ +# define X509_TRUST_NO_SS_COMPAT (1U << 2) +/* Compat trust if no explicit accepted trust EKUs */ +# define X509_TRUST_DO_SS_COMPAT (1U << 3) +/* Accept "anyEKU" as a wildcard trust OID */ +# define X509_TRUST_OK_ANY_EKU (1U << 4) + +/* check_trust return codes */ + +# define X509_TRUST_TRUSTED 1 +# define X509_TRUST_REJECTED 2 +# define X509_TRUST_UNTRUSTED 3 + +/* Flags for X509_print_ex() */ + +# define X509_FLAG_COMPAT 0 +# define X509_FLAG_NO_HEADER 1L +# define X509_FLAG_NO_VERSION (1L << 1) +# define X509_FLAG_NO_SERIAL (1L << 2) +# define X509_FLAG_NO_SIGNAME (1L << 3) +# define X509_FLAG_NO_ISSUER (1L << 4) +# define X509_FLAG_NO_VALIDITY (1L << 5) +# define X509_FLAG_NO_SUBJECT (1L << 6) +# define X509_FLAG_NO_PUBKEY (1L << 7) +# define X509_FLAG_NO_EXTENSIONS (1L << 8) +# define X509_FLAG_NO_SIGDUMP (1L << 9) +# define X509_FLAG_NO_AUX (1L << 10) +# define X509_FLAG_NO_ATTRIBUTES (1L << 11) +# define X509_FLAG_NO_IDS (1L << 12) + +/* Flags specific to X509_NAME_print_ex() */ + +/* The field separator information */ + +# define XN_FLAG_SEP_MASK (0xf << 16) + +# define XN_FLAG_COMPAT 0/* Traditional; use old X509_NAME_print */ +# define XN_FLAG_SEP_COMMA_PLUS (1 << 16)/* RFC2253 ,+ */ +# define XN_FLAG_SEP_CPLUS_SPC (2 << 16)/* ,+ spaced: more readable */ +# define XN_FLAG_SEP_SPLUS_SPC (3 << 16)/* ;+ spaced */ +# define XN_FLAG_SEP_MULTILINE (4 << 16)/* One line per field */ + +# define XN_FLAG_DN_REV (1 << 20)/* Reverse DN order */ + +/* How the field name is shown */ + +# define XN_FLAG_FN_MASK (0x3 << 21) + +# define XN_FLAG_FN_SN 0/* Object short name */ +# define XN_FLAG_FN_LN (1 << 21)/* Object long name */ +# define XN_FLAG_FN_OID (2 << 21)/* Always use OIDs */ +# define XN_FLAG_FN_NONE (3 << 21)/* No field names */ + +# define XN_FLAG_SPC_EQ (1 << 23)/* Put spaces round '=' */ + +/* + * This determines if we dump fields we don't recognise: RFC2253 requires + * this. + */ + +# define XN_FLAG_DUMP_UNKNOWN_FIELDS (1 << 24) + +# define XN_FLAG_FN_ALIGN (1 << 25)/* Align field names to 20 + * characters */ + +/* Complete set of RFC2253 flags */ + +# define XN_FLAG_RFC2253 (ASN1_STRFLGS_RFC2253 | \ + XN_FLAG_SEP_COMMA_PLUS | \ + XN_FLAG_DN_REV | \ + XN_FLAG_FN_SN | \ + XN_FLAG_DUMP_UNKNOWN_FIELDS) + +/* readable oneline form */ + +# define XN_FLAG_ONELINE (ASN1_STRFLGS_RFC2253 | \ + ASN1_STRFLGS_ESC_QUOTE | \ + XN_FLAG_SEP_CPLUS_SPC | \ + XN_FLAG_SPC_EQ | \ + XN_FLAG_FN_SN) + +/* readable multiline form */ + +# define XN_FLAG_MULTILINE (ASN1_STRFLGS_ESC_CTRL | \ + ASN1_STRFLGS_ESC_MSB | \ + XN_FLAG_SEP_MULTILINE | \ + XN_FLAG_SPC_EQ | \ + XN_FLAG_FN_LN | \ + XN_FLAG_FN_ALIGN) + +DEFINE_STACK_OF(X509_REVOKED) + +typedef struct X509_crl_info_st X509_CRL_INFO; + +DEFINE_STACK_OF(X509_CRL) + +typedef struct private_key_st { + int version; + /* The PKCS#8 data types */ + X509_ALGOR *enc_algor; + ASN1_OCTET_STRING *enc_pkey; /* encrypted pub key */ + /* When decrypted, the following will not be NULL */ + EVP_PKEY *dec_pkey; + /* used to encrypt and decrypt */ + int key_length; + char *key_data; + int key_free; /* true if we should auto free key_data */ + /* expanded version of 'enc_algor' */ + EVP_CIPHER_INFO cipher; +} X509_PKEY; + +typedef struct X509_info_st { + X509 *x509; + X509_CRL *crl; + X509_PKEY *x_pkey; + EVP_CIPHER_INFO enc_cipher; + int enc_len; + char *enc_data; +} X509_INFO; + +DEFINE_STACK_OF(X509_INFO) + +/* + * The next 2 structures and their 8 routines are used to manipulate Netscape's + * spki structures - useful if you are writing a CA web page + */ +typedef struct Netscape_spkac_st { + X509_PUBKEY *pubkey; + ASN1_IA5STRING *challenge; /* challenge sent in atlas >= PR2 */ +} NETSCAPE_SPKAC; + +typedef struct Netscape_spki_st { + NETSCAPE_SPKAC *spkac; /* signed public key and challenge */ + X509_ALGOR sig_algor; + ASN1_BIT_STRING *signature; +} NETSCAPE_SPKI; + +/* Netscape certificate sequence structure */ +typedef struct Netscape_certificate_sequence { + ASN1_OBJECT *type; + STACK_OF(X509) *certs; +} NETSCAPE_CERT_SEQUENCE; + +/*- Unused (and iv length is wrong) +typedef struct CBCParameter_st + { + unsigned char iv[8]; + } CBC_PARAM; +*/ + +/* Password based encryption structure */ + +typedef struct PBEPARAM_st { + ASN1_OCTET_STRING *salt; + ASN1_INTEGER *iter; +} PBEPARAM; + +/* Password based encryption V2 structures */ + +typedef struct PBE2PARAM_st { + X509_ALGOR *keyfunc; + X509_ALGOR *encryption; +} PBE2PARAM; + +typedef struct PBKDF2PARAM_st { +/* Usually OCTET STRING but could be anything */ + ASN1_TYPE *salt; + ASN1_INTEGER *iter; + ASN1_INTEGER *keylength; + X509_ALGOR *prf; +} PBKDF2PARAM; + +#ifndef OPENSSL_NO_SCRYPT +typedef struct SCRYPT_PARAMS_st { + ASN1_OCTET_STRING *salt; + ASN1_INTEGER *costParameter; + ASN1_INTEGER *blockSize; + ASN1_INTEGER *parallelizationParameter; + ASN1_INTEGER *keyLength; +} SCRYPT_PARAMS; +#endif + +#ifdef __cplusplus +} +#endif + +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +# define X509_EXT_PACK_UNKNOWN 1 +# define X509_EXT_PACK_STRING 2 + +# define X509_extract_key(x) X509_get_pubkey(x)/*****/ +# define X509_REQ_extract_key(a) X509_REQ_get_pubkey(a) +# define X509_name_cmp(a,b) X509_NAME_cmp((a),(b)) + +void X509_CRL_set_default_method(const X509_CRL_METHOD *meth); +X509_CRL_METHOD *X509_CRL_METHOD_new(int (*crl_init) (X509_CRL *crl), + int (*crl_free) (X509_CRL *crl), + int (*crl_lookup) (X509_CRL *crl, + X509_REVOKED **ret, + ASN1_INTEGER *ser, + X509_NAME *issuer), + int (*crl_verify) (X509_CRL *crl, + EVP_PKEY *pk)); +void X509_CRL_METHOD_free(X509_CRL_METHOD *m); + +void X509_CRL_set_meth_data(X509_CRL *crl, void *dat); +void *X509_CRL_get_meth_data(X509_CRL *crl); + +const char *X509_verify_cert_error_string(long n); + +int X509_verify(X509 *a, EVP_PKEY *r); + +int X509_REQ_verify(X509_REQ *a, EVP_PKEY *r); +int X509_CRL_verify(X509_CRL *a, EVP_PKEY *r); +int NETSCAPE_SPKI_verify(NETSCAPE_SPKI *a, EVP_PKEY *r); + +NETSCAPE_SPKI *NETSCAPE_SPKI_b64_decode(const char *str, int len); +char *NETSCAPE_SPKI_b64_encode(NETSCAPE_SPKI *x); +EVP_PKEY *NETSCAPE_SPKI_get_pubkey(NETSCAPE_SPKI *x); +int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *x, EVP_PKEY *pkey); + +int NETSCAPE_SPKI_print(BIO *out, NETSCAPE_SPKI *spki); + +int X509_signature_dump(BIO *bp, const ASN1_STRING *sig, int indent); +int X509_signature_print(BIO *bp, const X509_ALGOR *alg, + const ASN1_STRING *sig); + +int X509_sign(X509 *x, EVP_PKEY *pkey, const EVP_MD *md); +int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx); +# ifndef OPENSSL_NO_OCSP +int X509_http_nbio(OCSP_REQ_CTX *rctx, X509 **pcert); +# endif +int X509_REQ_sign(X509_REQ *x, EVP_PKEY *pkey, const EVP_MD *md); +int X509_REQ_sign_ctx(X509_REQ *x, EVP_MD_CTX *ctx); +int X509_CRL_sign(X509_CRL *x, EVP_PKEY *pkey, const EVP_MD *md); +int X509_CRL_sign_ctx(X509_CRL *x, EVP_MD_CTX *ctx); +# ifndef OPENSSL_NO_OCSP +int X509_CRL_http_nbio(OCSP_REQ_CTX *rctx, X509_CRL **pcrl); +# endif +int NETSCAPE_SPKI_sign(NETSCAPE_SPKI *x, EVP_PKEY *pkey, const EVP_MD *md); + +int X509_pubkey_digest(const X509 *data, const EVP_MD *type, + unsigned char *md, unsigned int *len); +int X509_digest(const X509 *data, const EVP_MD *type, + unsigned char *md, unsigned int *len); +int X509_CRL_digest(const X509_CRL *data, const EVP_MD *type, + unsigned char *md, unsigned int *len); +int X509_REQ_digest(const X509_REQ *data, const EVP_MD *type, + unsigned char *md, unsigned int *len); +int X509_NAME_digest(const X509_NAME *data, const EVP_MD *type, + unsigned char *md, unsigned int *len); + +# ifndef OPENSSL_NO_STDIO +X509 *d2i_X509_fp(FILE *fp, X509 **x509); +int i2d_X509_fp(FILE *fp, X509 *x509); +X509_CRL *d2i_X509_CRL_fp(FILE *fp, X509_CRL **crl); +int i2d_X509_CRL_fp(FILE *fp, X509_CRL *crl); +X509_REQ *d2i_X509_REQ_fp(FILE *fp, X509_REQ **req); +int i2d_X509_REQ_fp(FILE *fp, X509_REQ *req); +# ifndef OPENSSL_NO_RSA +RSA *d2i_RSAPrivateKey_fp(FILE *fp, RSA **rsa); +int i2d_RSAPrivateKey_fp(FILE *fp, RSA *rsa); +RSA *d2i_RSAPublicKey_fp(FILE *fp, RSA **rsa); +int i2d_RSAPublicKey_fp(FILE *fp, RSA *rsa); +RSA *d2i_RSA_PUBKEY_fp(FILE *fp, RSA **rsa); +int i2d_RSA_PUBKEY_fp(FILE *fp, RSA *rsa); +# endif +# ifndef OPENSSL_NO_DSA +DSA *d2i_DSA_PUBKEY_fp(FILE *fp, DSA **dsa); +int i2d_DSA_PUBKEY_fp(FILE *fp, DSA *dsa); +DSA *d2i_DSAPrivateKey_fp(FILE *fp, DSA **dsa); +int i2d_DSAPrivateKey_fp(FILE *fp, DSA *dsa); +# endif +# ifndef OPENSSL_NO_EC +EC_KEY *d2i_EC_PUBKEY_fp(FILE *fp, EC_KEY **eckey); +int i2d_EC_PUBKEY_fp(FILE *fp, EC_KEY *eckey); +EC_KEY *d2i_ECPrivateKey_fp(FILE *fp, EC_KEY **eckey); +int i2d_ECPrivateKey_fp(FILE *fp, EC_KEY *eckey); +# endif +X509_SIG *d2i_PKCS8_fp(FILE *fp, X509_SIG **p8); +int i2d_PKCS8_fp(FILE *fp, X509_SIG *p8); +PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_fp(FILE *fp, + PKCS8_PRIV_KEY_INFO **p8inf); +int i2d_PKCS8_PRIV_KEY_INFO_fp(FILE *fp, PKCS8_PRIV_KEY_INFO *p8inf); +int i2d_PKCS8PrivateKeyInfo_fp(FILE *fp, EVP_PKEY *key); +int i2d_PrivateKey_fp(FILE *fp, EVP_PKEY *pkey); +EVP_PKEY *d2i_PrivateKey_fp(FILE *fp, EVP_PKEY **a); +int i2d_PUBKEY_fp(FILE *fp, EVP_PKEY *pkey); +EVP_PKEY *d2i_PUBKEY_fp(FILE *fp, EVP_PKEY **a); +# endif + +X509 *d2i_X509_bio(BIO *bp, X509 **x509); +int i2d_X509_bio(BIO *bp, X509 *x509); +X509_CRL *d2i_X509_CRL_bio(BIO *bp, X509_CRL **crl); +int i2d_X509_CRL_bio(BIO *bp, X509_CRL *crl); +X509_REQ *d2i_X509_REQ_bio(BIO *bp, X509_REQ **req); +int i2d_X509_REQ_bio(BIO *bp, X509_REQ *req); +# ifndef OPENSSL_NO_RSA +RSA *d2i_RSAPrivateKey_bio(BIO *bp, RSA **rsa); +int i2d_RSAPrivateKey_bio(BIO *bp, RSA *rsa); +RSA *d2i_RSAPublicKey_bio(BIO *bp, RSA **rsa); +int i2d_RSAPublicKey_bio(BIO *bp, RSA *rsa); +RSA *d2i_RSA_PUBKEY_bio(BIO *bp, RSA **rsa); +int i2d_RSA_PUBKEY_bio(BIO *bp, RSA *rsa); +# endif +# ifndef OPENSSL_NO_DSA +DSA *d2i_DSA_PUBKEY_bio(BIO *bp, DSA **dsa); +int i2d_DSA_PUBKEY_bio(BIO *bp, DSA *dsa); +DSA *d2i_DSAPrivateKey_bio(BIO *bp, DSA **dsa); +int i2d_DSAPrivateKey_bio(BIO *bp, DSA *dsa); +# endif +# ifndef OPENSSL_NO_EC +EC_KEY *d2i_EC_PUBKEY_bio(BIO *bp, EC_KEY **eckey); +int i2d_EC_PUBKEY_bio(BIO *bp, EC_KEY *eckey); +EC_KEY *d2i_ECPrivateKey_bio(BIO *bp, EC_KEY **eckey); +int i2d_ECPrivateKey_bio(BIO *bp, EC_KEY *eckey); +# endif +X509_SIG *d2i_PKCS8_bio(BIO *bp, X509_SIG **p8); +int i2d_PKCS8_bio(BIO *bp, X509_SIG *p8); +PKCS8_PRIV_KEY_INFO *d2i_PKCS8_PRIV_KEY_INFO_bio(BIO *bp, + PKCS8_PRIV_KEY_INFO **p8inf); +int i2d_PKCS8_PRIV_KEY_INFO_bio(BIO *bp, PKCS8_PRIV_KEY_INFO *p8inf); +int i2d_PKCS8PrivateKeyInfo_bio(BIO *bp, EVP_PKEY *key); +int i2d_PrivateKey_bio(BIO *bp, EVP_PKEY *pkey); +EVP_PKEY *d2i_PrivateKey_bio(BIO *bp, EVP_PKEY **a); +int i2d_PUBKEY_bio(BIO *bp, EVP_PKEY *pkey); +EVP_PKEY *d2i_PUBKEY_bio(BIO *bp, EVP_PKEY **a); + +X509 *X509_dup(X509 *x509); +X509_ATTRIBUTE *X509_ATTRIBUTE_dup(X509_ATTRIBUTE *xa); +X509_EXTENSION *X509_EXTENSION_dup(X509_EXTENSION *ex); +X509_CRL *X509_CRL_dup(X509_CRL *crl); +X509_REVOKED *X509_REVOKED_dup(X509_REVOKED *rev); +X509_REQ *X509_REQ_dup(X509_REQ *req); +X509_ALGOR *X509_ALGOR_dup(X509_ALGOR *xn); +int X509_ALGOR_set0(X509_ALGOR *alg, ASN1_OBJECT *aobj, int ptype, + void *pval); +void X509_ALGOR_get0(const ASN1_OBJECT **paobj, int *pptype, + const void **ppval, const X509_ALGOR *algor); +void X509_ALGOR_set_md(X509_ALGOR *alg, const EVP_MD *md); +int X509_ALGOR_cmp(const X509_ALGOR *a, const X509_ALGOR *b); +int X509_ALGOR_copy(X509_ALGOR *dest, const X509_ALGOR *src); + +X509_NAME *X509_NAME_dup(X509_NAME *xn); +X509_NAME_ENTRY *X509_NAME_ENTRY_dup(X509_NAME_ENTRY *ne); + +int X509_cmp_time(const ASN1_TIME *s, time_t *t); +int X509_cmp_current_time(const ASN1_TIME *s); +ASN1_TIME *X509_time_adj(ASN1_TIME *s, long adj, time_t *t); +ASN1_TIME *X509_time_adj_ex(ASN1_TIME *s, + int offset_day, long offset_sec, time_t *t); +ASN1_TIME *X509_gmtime_adj(ASN1_TIME *s, long adj); + +const char *X509_get_default_cert_area(void); +const char *X509_get_default_cert_dir(void); +const char *X509_get_default_cert_file(void); +const char *X509_get_default_cert_dir_env(void); +const char *X509_get_default_cert_file_env(void); +const char *X509_get_default_private_dir(void); + +X509_REQ *X509_to_X509_REQ(X509 *x, EVP_PKEY *pkey, const EVP_MD *md); +X509 *X509_REQ_to_X509(X509_REQ *r, int days, EVP_PKEY *pkey); + +DECLARE_ASN1_FUNCTIONS(X509_ALGOR) +DECLARE_ASN1_ENCODE_FUNCTIONS(X509_ALGORS, X509_ALGORS, X509_ALGORS) +DECLARE_ASN1_FUNCTIONS(X509_VAL) + +DECLARE_ASN1_FUNCTIONS(X509_PUBKEY) + +int X509_PUBKEY_set(X509_PUBKEY **x, EVP_PKEY *pkey); +EVP_PKEY *X509_PUBKEY_get0(X509_PUBKEY *key); +EVP_PKEY *X509_PUBKEY_get(X509_PUBKEY *key); +int X509_get_pubkey_parameters(EVP_PKEY *pkey, STACK_OF(X509) *chain); +long X509_get_pathlen(X509 *x); +int i2d_PUBKEY(EVP_PKEY *a, unsigned char **pp); +EVP_PKEY *d2i_PUBKEY(EVP_PKEY **a, const unsigned char **pp, long length); +# ifndef OPENSSL_NO_RSA +int i2d_RSA_PUBKEY(RSA *a, unsigned char **pp); +RSA *d2i_RSA_PUBKEY(RSA **a, const unsigned char **pp, long length); +# endif +# ifndef OPENSSL_NO_DSA +int i2d_DSA_PUBKEY(DSA *a, unsigned char **pp); +DSA *d2i_DSA_PUBKEY(DSA **a, const unsigned char **pp, long length); +# endif +# ifndef OPENSSL_NO_EC +int i2d_EC_PUBKEY(EC_KEY *a, unsigned char **pp); +EC_KEY *d2i_EC_PUBKEY(EC_KEY **a, const unsigned char **pp, long length); +# endif + +DECLARE_ASN1_FUNCTIONS(X509_SIG) +void X509_SIG_get0(const X509_SIG *sig, const X509_ALGOR **palg, + const ASN1_OCTET_STRING **pdigest); +void X509_SIG_getm(X509_SIG *sig, X509_ALGOR **palg, + ASN1_OCTET_STRING **pdigest); + +DECLARE_ASN1_FUNCTIONS(X509_REQ_INFO) +DECLARE_ASN1_FUNCTIONS(X509_REQ) + +DECLARE_ASN1_FUNCTIONS(X509_ATTRIBUTE) +X509_ATTRIBUTE *X509_ATTRIBUTE_create(int nid, int atrtype, void *value); + +DECLARE_ASN1_FUNCTIONS(X509_EXTENSION) +DECLARE_ASN1_ENCODE_FUNCTIONS(X509_EXTENSIONS, X509_EXTENSIONS, X509_EXTENSIONS) + +DECLARE_ASN1_FUNCTIONS(X509_NAME_ENTRY) + +DECLARE_ASN1_FUNCTIONS(X509_NAME) + +int X509_NAME_set(X509_NAME **xn, X509_NAME *name); + +DECLARE_ASN1_FUNCTIONS(X509_CINF) + +DECLARE_ASN1_FUNCTIONS(X509) +DECLARE_ASN1_FUNCTIONS(X509_CERT_AUX) + +#define X509_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_X509, l, p, newf, dupf, freef) +int X509_set_ex_data(X509 *r, int idx, void *arg); +void *X509_get_ex_data(X509 *r, int idx); +int i2d_X509_AUX(X509 *a, unsigned char **pp); +X509 *d2i_X509_AUX(X509 **a, const unsigned char **pp, long length); + +int i2d_re_X509_tbs(X509 *x, unsigned char **pp); + +int X509_SIG_INFO_get(const X509_SIG_INFO *siginf, int *mdnid, int *pknid, + int *secbits, uint32_t *flags); +void X509_SIG_INFO_set(X509_SIG_INFO *siginf, int mdnid, int pknid, + int secbits, uint32_t flags); + +int X509_get_signature_info(X509 *x, int *mdnid, int *pknid, int *secbits, + uint32_t *flags); + +void X509_get0_signature(const ASN1_BIT_STRING **psig, + const X509_ALGOR **palg, const X509 *x); +int X509_get_signature_nid(const X509 *x); + +int X509_trusted(const X509 *x); +int X509_alias_set1(X509 *x, const unsigned char *name, int len); +int X509_keyid_set1(X509 *x, const unsigned char *id, int len); +unsigned char *X509_alias_get0(X509 *x, int *len); +unsigned char *X509_keyid_get0(X509 *x, int *len); +int (*X509_TRUST_set_default(int (*trust) (int, X509 *, int))) (int, X509 *, + int); +int X509_TRUST_set(int *t, int trust); +int X509_add1_trust_object(X509 *x, const ASN1_OBJECT *obj); +int X509_add1_reject_object(X509 *x, const ASN1_OBJECT *obj); +void X509_trust_clear(X509 *x); +void X509_reject_clear(X509 *x); + +STACK_OF(ASN1_OBJECT) *X509_get0_trust_objects(X509 *x); +STACK_OF(ASN1_OBJECT) *X509_get0_reject_objects(X509 *x); + +DECLARE_ASN1_FUNCTIONS(X509_REVOKED) +DECLARE_ASN1_FUNCTIONS(X509_CRL_INFO) +DECLARE_ASN1_FUNCTIONS(X509_CRL) + +int X509_CRL_add0_revoked(X509_CRL *crl, X509_REVOKED *rev); +int X509_CRL_get0_by_serial(X509_CRL *crl, + X509_REVOKED **ret, ASN1_INTEGER *serial); +int X509_CRL_get0_by_cert(X509_CRL *crl, X509_REVOKED **ret, X509 *x); + +X509_PKEY *X509_PKEY_new(void); +void X509_PKEY_free(X509_PKEY *a); + +DECLARE_ASN1_FUNCTIONS(NETSCAPE_SPKI) +DECLARE_ASN1_FUNCTIONS(NETSCAPE_SPKAC) +DECLARE_ASN1_FUNCTIONS(NETSCAPE_CERT_SEQUENCE) + +X509_INFO *X509_INFO_new(void); +void X509_INFO_free(X509_INFO *a); +char *X509_NAME_oneline(const X509_NAME *a, char *buf, int size); + +int ASN1_verify(i2d_of_void *i2d, X509_ALGOR *algor1, + ASN1_BIT_STRING *signature, char *data, EVP_PKEY *pkey); + +int ASN1_digest(i2d_of_void *i2d, const EVP_MD *type, char *data, + unsigned char *md, unsigned int *len); + +int ASN1_sign(i2d_of_void *i2d, X509_ALGOR *algor1, + X509_ALGOR *algor2, ASN1_BIT_STRING *signature, + char *data, EVP_PKEY *pkey, const EVP_MD *type); + +int ASN1_item_digest(const ASN1_ITEM *it, const EVP_MD *type, void *data, + unsigned char *md, unsigned int *len); + +int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *algor1, + ASN1_BIT_STRING *signature, void *data, EVP_PKEY *pkey); + +int ASN1_item_sign(const ASN1_ITEM *it, X509_ALGOR *algor1, + X509_ALGOR *algor2, ASN1_BIT_STRING *signature, void *data, + EVP_PKEY *pkey, const EVP_MD *type); +int ASN1_item_sign_ctx(const ASN1_ITEM *it, X509_ALGOR *algor1, + X509_ALGOR *algor2, ASN1_BIT_STRING *signature, + void *asn, EVP_MD_CTX *ctx); + +long X509_get_version(const X509 *x); +int X509_set_version(X509 *x, long version); +int X509_set_serialNumber(X509 *x, ASN1_INTEGER *serial); +ASN1_INTEGER *X509_get_serialNumber(X509 *x); +const ASN1_INTEGER *X509_get0_serialNumber(const X509 *x); +int X509_set_issuer_name(X509 *x, X509_NAME *name); +X509_NAME *X509_get_issuer_name(const X509 *a); +int X509_set_subject_name(X509 *x, X509_NAME *name); +X509_NAME *X509_get_subject_name(const X509 *a); +const ASN1_TIME * X509_get0_notBefore(const X509 *x); +ASN1_TIME *X509_getm_notBefore(const X509 *x); +int X509_set1_notBefore(X509 *x, const ASN1_TIME *tm); +const ASN1_TIME *X509_get0_notAfter(const X509 *x); +ASN1_TIME *X509_getm_notAfter(const X509 *x); +int X509_set1_notAfter(X509 *x, const ASN1_TIME *tm); +int X509_set_pubkey(X509 *x, EVP_PKEY *pkey); +int X509_up_ref(X509 *x); +int X509_get_signature_type(const X509 *x); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define X509_get_notBefore X509_getm_notBefore +# define X509_get_notAfter X509_getm_notAfter +# define X509_set_notBefore X509_set1_notBefore +# define X509_set_notAfter X509_set1_notAfter +#endif + + +/* + * This one is only used so that a binary form can output, as in + * i2d_X509_PUBKEY(X509_get_X509_PUBKEY(x), &buf) + */ +X509_PUBKEY *X509_get_X509_PUBKEY(const X509 *x); +const STACK_OF(X509_EXTENSION) *X509_get0_extensions(const X509 *x); +void X509_get0_uids(const X509 *x, const ASN1_BIT_STRING **piuid, + const ASN1_BIT_STRING **psuid); +const X509_ALGOR *X509_get0_tbs_sigalg(const X509 *x); + +EVP_PKEY *X509_get0_pubkey(const X509 *x); +EVP_PKEY *X509_get_pubkey(X509 *x); +ASN1_BIT_STRING *X509_get0_pubkey_bitstr(const X509 *x); +int X509_certificate_type(const X509 *x, const EVP_PKEY *pubkey); + +long X509_REQ_get_version(const X509_REQ *req); +int X509_REQ_set_version(X509_REQ *x, long version); +X509_NAME *X509_REQ_get_subject_name(const X509_REQ *req); +int X509_REQ_set_subject_name(X509_REQ *req, X509_NAME *name); +void X509_REQ_get0_signature(const X509_REQ *req, const ASN1_BIT_STRING **psig, + const X509_ALGOR **palg); +void X509_REQ_set0_signature(X509_REQ *req, ASN1_BIT_STRING *psig); +int X509_REQ_set1_signature_algo(X509_REQ *req, X509_ALGOR *palg); +int X509_REQ_get_signature_nid(const X509_REQ *req); +int i2d_re_X509_REQ_tbs(X509_REQ *req, unsigned char **pp); +int X509_REQ_set_pubkey(X509_REQ *x, EVP_PKEY *pkey); +EVP_PKEY *X509_REQ_get_pubkey(X509_REQ *req); +EVP_PKEY *X509_REQ_get0_pubkey(X509_REQ *req); +X509_PUBKEY *X509_REQ_get_X509_PUBKEY(X509_REQ *req); +int X509_REQ_extension_nid(int nid); +int *X509_REQ_get_extension_nids(void); +void X509_REQ_set_extension_nids(int *nids); +STACK_OF(X509_EXTENSION) *X509_REQ_get_extensions(X509_REQ *req); +int X509_REQ_add_extensions_nid(X509_REQ *req, STACK_OF(X509_EXTENSION) *exts, + int nid); +int X509_REQ_add_extensions(X509_REQ *req, STACK_OF(X509_EXTENSION) *exts); +int X509_REQ_get_attr_count(const X509_REQ *req); +int X509_REQ_get_attr_by_NID(const X509_REQ *req, int nid, int lastpos); +int X509_REQ_get_attr_by_OBJ(const X509_REQ *req, const ASN1_OBJECT *obj, + int lastpos); +X509_ATTRIBUTE *X509_REQ_get_attr(const X509_REQ *req, int loc); +X509_ATTRIBUTE *X509_REQ_delete_attr(X509_REQ *req, int loc); +int X509_REQ_add1_attr(X509_REQ *req, X509_ATTRIBUTE *attr); +int X509_REQ_add1_attr_by_OBJ(X509_REQ *req, + const ASN1_OBJECT *obj, int type, + const unsigned char *bytes, int len); +int X509_REQ_add1_attr_by_NID(X509_REQ *req, + int nid, int type, + const unsigned char *bytes, int len); +int X509_REQ_add1_attr_by_txt(X509_REQ *req, + const char *attrname, int type, + const unsigned char *bytes, int len); + +int X509_CRL_set_version(X509_CRL *x, long version); +int X509_CRL_set_issuer_name(X509_CRL *x, X509_NAME *name); +int X509_CRL_set1_lastUpdate(X509_CRL *x, const ASN1_TIME *tm); +int X509_CRL_set1_nextUpdate(X509_CRL *x, const ASN1_TIME *tm); +int X509_CRL_sort(X509_CRL *crl); +int X509_CRL_up_ref(X509_CRL *crl); + +# if OPENSSL_API_COMPAT < 0x10100000L +# define X509_CRL_set_lastUpdate X509_CRL_set1_lastUpdate +# define X509_CRL_set_nextUpdate X509_CRL_set1_nextUpdate +#endif + +long X509_CRL_get_version(const X509_CRL *crl); +const ASN1_TIME *X509_CRL_get0_lastUpdate(const X509_CRL *crl); +const ASN1_TIME *X509_CRL_get0_nextUpdate(const X509_CRL *crl); +DEPRECATEDIN_1_1_0(ASN1_TIME *X509_CRL_get_lastUpdate(X509_CRL *crl)) +DEPRECATEDIN_1_1_0(ASN1_TIME *X509_CRL_get_nextUpdate(X509_CRL *crl)) +X509_NAME *X509_CRL_get_issuer(const X509_CRL *crl); +const STACK_OF(X509_EXTENSION) *X509_CRL_get0_extensions(const X509_CRL *crl); +STACK_OF(X509_REVOKED) *X509_CRL_get_REVOKED(X509_CRL *crl); +void X509_CRL_get0_signature(const X509_CRL *crl, const ASN1_BIT_STRING **psig, + const X509_ALGOR **palg); +int X509_CRL_get_signature_nid(const X509_CRL *crl); +int i2d_re_X509_CRL_tbs(X509_CRL *req, unsigned char **pp); + +const ASN1_INTEGER *X509_REVOKED_get0_serialNumber(const X509_REVOKED *x); +int X509_REVOKED_set_serialNumber(X509_REVOKED *x, ASN1_INTEGER *serial); +const ASN1_TIME *X509_REVOKED_get0_revocationDate(const X509_REVOKED *x); +int X509_REVOKED_set_revocationDate(X509_REVOKED *r, ASN1_TIME *tm); +const STACK_OF(X509_EXTENSION) * +X509_REVOKED_get0_extensions(const X509_REVOKED *r); + +X509_CRL *X509_CRL_diff(X509_CRL *base, X509_CRL *newer, + EVP_PKEY *skey, const EVP_MD *md, unsigned int flags); + +int X509_REQ_check_private_key(X509_REQ *x509, EVP_PKEY *pkey); + +int X509_check_private_key(const X509 *x509, const EVP_PKEY *pkey); +int X509_chain_check_suiteb(int *perror_depth, + X509 *x, STACK_OF(X509) *chain, + unsigned long flags); +int X509_CRL_check_suiteb(X509_CRL *crl, EVP_PKEY *pk, unsigned long flags); +STACK_OF(X509) *X509_chain_up_ref(STACK_OF(X509) *chain); + +int X509_issuer_and_serial_cmp(const X509 *a, const X509 *b); +unsigned long X509_issuer_and_serial_hash(X509 *a); + +int X509_issuer_name_cmp(const X509 *a, const X509 *b); +unsigned long X509_issuer_name_hash(X509 *a); + +int X509_subject_name_cmp(const X509 *a, const X509 *b); +unsigned long X509_subject_name_hash(X509 *x); + +# ifndef OPENSSL_NO_MD5 +unsigned long X509_issuer_name_hash_old(X509 *a); +unsigned long X509_subject_name_hash_old(X509 *x); +# endif + +int X509_cmp(const X509 *a, const X509 *b); +int X509_NAME_cmp(const X509_NAME *a, const X509_NAME *b); +unsigned long X509_NAME_hash(X509_NAME *x); +unsigned long X509_NAME_hash_old(X509_NAME *x); + +int X509_CRL_cmp(const X509_CRL *a, const X509_CRL *b); +int X509_CRL_match(const X509_CRL *a, const X509_CRL *b); +int X509_aux_print(BIO *out, X509 *x, int indent); +# ifndef OPENSSL_NO_STDIO +int X509_print_ex_fp(FILE *bp, X509 *x, unsigned long nmflag, + unsigned long cflag); +int X509_print_fp(FILE *bp, X509 *x); +int X509_CRL_print_fp(FILE *bp, X509_CRL *x); +int X509_REQ_print_fp(FILE *bp, X509_REQ *req); +int X509_NAME_print_ex_fp(FILE *fp, const X509_NAME *nm, int indent, + unsigned long flags); +# endif + +int X509_NAME_print(BIO *bp, const X509_NAME *name, int obase); +int X509_NAME_print_ex(BIO *out, const X509_NAME *nm, int indent, + unsigned long flags); +int X509_print_ex(BIO *bp, X509 *x, unsigned long nmflag, + unsigned long cflag); +int X509_print(BIO *bp, X509 *x); +int X509_ocspid_print(BIO *bp, X509 *x); +int X509_CRL_print_ex(BIO *out, X509_CRL *x, unsigned long nmflag); +int X509_CRL_print(BIO *bp, X509_CRL *x); +int X509_REQ_print_ex(BIO *bp, X509_REQ *x, unsigned long nmflag, + unsigned long cflag); +int X509_REQ_print(BIO *bp, X509_REQ *req); + +int X509_NAME_entry_count(const X509_NAME *name); +int X509_NAME_get_text_by_NID(X509_NAME *name, int nid, char *buf, int len); +int X509_NAME_get_text_by_OBJ(X509_NAME *name, const ASN1_OBJECT *obj, + char *buf, int len); + +/* + * NOTE: you should be passing -1, not 0 as lastpos. The functions that use + * lastpos, search after that position on. + */ +int X509_NAME_get_index_by_NID(X509_NAME *name, int nid, int lastpos); +int X509_NAME_get_index_by_OBJ(X509_NAME *name, const ASN1_OBJECT *obj, + int lastpos); +X509_NAME_ENTRY *X509_NAME_get_entry(const X509_NAME *name, int loc); +X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc); +int X509_NAME_add_entry(X509_NAME *name, const X509_NAME_ENTRY *ne, + int loc, int set); +int X509_NAME_add_entry_by_OBJ(X509_NAME *name, const ASN1_OBJECT *obj, int type, + const unsigned char *bytes, int len, int loc, + int set); +int X509_NAME_add_entry_by_NID(X509_NAME *name, int nid, int type, + const unsigned char *bytes, int len, int loc, + int set); +X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt(X509_NAME_ENTRY **ne, + const char *field, int type, + const unsigned char *bytes, + int len); +X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID(X509_NAME_ENTRY **ne, int nid, + int type, + const unsigned char *bytes, + int len); +int X509_NAME_add_entry_by_txt(X509_NAME *name, const char *field, int type, + const unsigned char *bytes, int len, int loc, + int set); +X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_OBJ(X509_NAME_ENTRY **ne, + const ASN1_OBJECT *obj, int type, + const unsigned char *bytes, + int len); +int X509_NAME_ENTRY_set_object(X509_NAME_ENTRY *ne, const ASN1_OBJECT *obj); +int X509_NAME_ENTRY_set_data(X509_NAME_ENTRY *ne, int type, + const unsigned char *bytes, int len); +ASN1_OBJECT *X509_NAME_ENTRY_get_object(const X509_NAME_ENTRY *ne); +ASN1_STRING * X509_NAME_ENTRY_get_data(const X509_NAME_ENTRY *ne); +int X509_NAME_ENTRY_set(const X509_NAME_ENTRY *ne); + +int X509_NAME_get0_der(X509_NAME *nm, const unsigned char **pder, + size_t *pderlen); + +int X509v3_get_ext_count(const STACK_OF(X509_EXTENSION) *x); +int X509v3_get_ext_by_NID(const STACK_OF(X509_EXTENSION) *x, + int nid, int lastpos); +int X509v3_get_ext_by_OBJ(const STACK_OF(X509_EXTENSION) *x, + const ASN1_OBJECT *obj, int lastpos); +int X509v3_get_ext_by_critical(const STACK_OF(X509_EXTENSION) *x, + int crit, int lastpos); +X509_EXTENSION *X509v3_get_ext(const STACK_OF(X509_EXTENSION) *x, int loc); +X509_EXTENSION *X509v3_delete_ext(STACK_OF(X509_EXTENSION) *x, int loc); +STACK_OF(X509_EXTENSION) *X509v3_add_ext(STACK_OF(X509_EXTENSION) **x, + X509_EXTENSION *ex, int loc); + +int X509_get_ext_count(const X509 *x); +int X509_get_ext_by_NID(const X509 *x, int nid, int lastpos); +int X509_get_ext_by_OBJ(const X509 *x, const ASN1_OBJECT *obj, int lastpos); +int X509_get_ext_by_critical(const X509 *x, int crit, int lastpos); +X509_EXTENSION *X509_get_ext(const X509 *x, int loc); +X509_EXTENSION *X509_delete_ext(X509 *x, int loc); +int X509_add_ext(X509 *x, X509_EXTENSION *ex, int loc); +void *X509_get_ext_d2i(const X509 *x, int nid, int *crit, int *idx); +int X509_add1_ext_i2d(X509 *x, int nid, void *value, int crit, + unsigned long flags); + +int X509_CRL_get_ext_count(const X509_CRL *x); +int X509_CRL_get_ext_by_NID(const X509_CRL *x, int nid, int lastpos); +int X509_CRL_get_ext_by_OBJ(const X509_CRL *x, const ASN1_OBJECT *obj, + int lastpos); +int X509_CRL_get_ext_by_critical(const X509_CRL *x, int crit, int lastpos); +X509_EXTENSION *X509_CRL_get_ext(const X509_CRL *x, int loc); +X509_EXTENSION *X509_CRL_delete_ext(X509_CRL *x, int loc); +int X509_CRL_add_ext(X509_CRL *x, X509_EXTENSION *ex, int loc); +void *X509_CRL_get_ext_d2i(const X509_CRL *x, int nid, int *crit, int *idx); +int X509_CRL_add1_ext_i2d(X509_CRL *x, int nid, void *value, int crit, + unsigned long flags); + +int X509_REVOKED_get_ext_count(const X509_REVOKED *x); +int X509_REVOKED_get_ext_by_NID(const X509_REVOKED *x, int nid, int lastpos); +int X509_REVOKED_get_ext_by_OBJ(const X509_REVOKED *x, const ASN1_OBJECT *obj, + int lastpos); +int X509_REVOKED_get_ext_by_critical(const X509_REVOKED *x, int crit, + int lastpos); +X509_EXTENSION *X509_REVOKED_get_ext(const X509_REVOKED *x, int loc); +X509_EXTENSION *X509_REVOKED_delete_ext(X509_REVOKED *x, int loc); +int X509_REVOKED_add_ext(X509_REVOKED *x, X509_EXTENSION *ex, int loc); +void *X509_REVOKED_get_ext_d2i(const X509_REVOKED *x, int nid, int *crit, + int *idx); +int X509_REVOKED_add1_ext_i2d(X509_REVOKED *x, int nid, void *value, int crit, + unsigned long flags); + +X509_EXTENSION *X509_EXTENSION_create_by_NID(X509_EXTENSION **ex, + int nid, int crit, + ASN1_OCTET_STRING *data); +X509_EXTENSION *X509_EXTENSION_create_by_OBJ(X509_EXTENSION **ex, + const ASN1_OBJECT *obj, int crit, + ASN1_OCTET_STRING *data); +int X509_EXTENSION_set_object(X509_EXTENSION *ex, const ASN1_OBJECT *obj); +int X509_EXTENSION_set_critical(X509_EXTENSION *ex, int crit); +int X509_EXTENSION_set_data(X509_EXTENSION *ex, ASN1_OCTET_STRING *data); +ASN1_OBJECT *X509_EXTENSION_get_object(X509_EXTENSION *ex); +ASN1_OCTET_STRING *X509_EXTENSION_get_data(X509_EXTENSION *ne); +int X509_EXTENSION_get_critical(const X509_EXTENSION *ex); + +int X509at_get_attr_count(const STACK_OF(X509_ATTRIBUTE) *x); +int X509at_get_attr_by_NID(const STACK_OF(X509_ATTRIBUTE) *x, int nid, + int lastpos); +int X509at_get_attr_by_OBJ(const STACK_OF(X509_ATTRIBUTE) *sk, + const ASN1_OBJECT *obj, int lastpos); +X509_ATTRIBUTE *X509at_get_attr(const STACK_OF(X509_ATTRIBUTE) *x, int loc); +X509_ATTRIBUTE *X509at_delete_attr(STACK_OF(X509_ATTRIBUTE) *x, int loc); +STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr(STACK_OF(X509_ATTRIBUTE) **x, + X509_ATTRIBUTE *attr); +STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_OBJ(STACK_OF(X509_ATTRIBUTE) + **x, const ASN1_OBJECT *obj, + int type, + const unsigned char *bytes, + int len); +STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_NID(STACK_OF(X509_ATTRIBUTE) + **x, int nid, int type, + const unsigned char *bytes, + int len); +STACK_OF(X509_ATTRIBUTE) *X509at_add1_attr_by_txt(STACK_OF(X509_ATTRIBUTE) + **x, const char *attrname, + int type, + const unsigned char *bytes, + int len); +void *X509at_get0_data_by_OBJ(const STACK_OF(X509_ATTRIBUTE) *x, + const ASN1_OBJECT *obj, int lastpos, int type); +X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_NID(X509_ATTRIBUTE **attr, int nid, + int atrtype, const void *data, + int len); +X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_OBJ(X509_ATTRIBUTE **attr, + const ASN1_OBJECT *obj, + int atrtype, const void *data, + int len); +X509_ATTRIBUTE *X509_ATTRIBUTE_create_by_txt(X509_ATTRIBUTE **attr, + const char *atrname, int type, + const unsigned char *bytes, + int len); +int X509_ATTRIBUTE_set1_object(X509_ATTRIBUTE *attr, const ASN1_OBJECT *obj); +int X509_ATTRIBUTE_set1_data(X509_ATTRIBUTE *attr, int attrtype, + const void *data, int len); +void *X509_ATTRIBUTE_get0_data(X509_ATTRIBUTE *attr, int idx, int atrtype, + void *data); +int X509_ATTRIBUTE_count(const X509_ATTRIBUTE *attr); +ASN1_OBJECT *X509_ATTRIBUTE_get0_object(X509_ATTRIBUTE *attr); +ASN1_TYPE *X509_ATTRIBUTE_get0_type(X509_ATTRIBUTE *attr, int idx); + +int EVP_PKEY_get_attr_count(const EVP_PKEY *key); +int EVP_PKEY_get_attr_by_NID(const EVP_PKEY *key, int nid, int lastpos); +int EVP_PKEY_get_attr_by_OBJ(const EVP_PKEY *key, const ASN1_OBJECT *obj, + int lastpos); +X509_ATTRIBUTE *EVP_PKEY_get_attr(const EVP_PKEY *key, int loc); +X509_ATTRIBUTE *EVP_PKEY_delete_attr(EVP_PKEY *key, int loc); +int EVP_PKEY_add1_attr(EVP_PKEY *key, X509_ATTRIBUTE *attr); +int EVP_PKEY_add1_attr_by_OBJ(EVP_PKEY *key, + const ASN1_OBJECT *obj, int type, + const unsigned char *bytes, int len); +int EVP_PKEY_add1_attr_by_NID(EVP_PKEY *key, + int nid, int type, + const unsigned char *bytes, int len); +int EVP_PKEY_add1_attr_by_txt(EVP_PKEY *key, + const char *attrname, int type, + const unsigned char *bytes, int len); + +int X509_verify_cert(X509_STORE_CTX *ctx); + +/* lookup a cert from a X509 STACK */ +X509 *X509_find_by_issuer_and_serial(STACK_OF(X509) *sk, X509_NAME *name, + ASN1_INTEGER *serial); +X509 *X509_find_by_subject(STACK_OF(X509) *sk, X509_NAME *name); + +DECLARE_ASN1_FUNCTIONS(PBEPARAM) +DECLARE_ASN1_FUNCTIONS(PBE2PARAM) +DECLARE_ASN1_FUNCTIONS(PBKDF2PARAM) +#ifndef OPENSSL_NO_SCRYPT +DECLARE_ASN1_FUNCTIONS(SCRYPT_PARAMS) +#endif + +int PKCS5_pbe_set0_algor(X509_ALGOR *algor, int alg, int iter, + const unsigned char *salt, int saltlen); + +X509_ALGOR *PKCS5_pbe_set(int alg, int iter, + const unsigned char *salt, int saltlen); +X509_ALGOR *PKCS5_pbe2_set(const EVP_CIPHER *cipher, int iter, + unsigned char *salt, int saltlen); +X509_ALGOR *PKCS5_pbe2_set_iv(const EVP_CIPHER *cipher, int iter, + unsigned char *salt, int saltlen, + unsigned char *aiv, int prf_nid); + +#ifndef OPENSSL_NO_SCRYPT +X509_ALGOR *PKCS5_pbe2_set_scrypt(const EVP_CIPHER *cipher, + const unsigned char *salt, int saltlen, + unsigned char *aiv, uint64_t N, uint64_t r, + uint64_t p); +#endif + +X509_ALGOR *PKCS5_pbkdf2_set(int iter, unsigned char *salt, int saltlen, + int prf_nid, int keylen); + +/* PKCS#8 utilities */ + +DECLARE_ASN1_FUNCTIONS(PKCS8_PRIV_KEY_INFO) + +EVP_PKEY *EVP_PKCS82PKEY(const PKCS8_PRIV_KEY_INFO *p8); +PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(EVP_PKEY *pkey); + +int PKCS8_pkey_set0(PKCS8_PRIV_KEY_INFO *priv, ASN1_OBJECT *aobj, + int version, int ptype, void *pval, + unsigned char *penc, int penclen); +int PKCS8_pkey_get0(const ASN1_OBJECT **ppkalg, + const unsigned char **pk, int *ppklen, + const X509_ALGOR **pa, const PKCS8_PRIV_KEY_INFO *p8); + +const STACK_OF(X509_ATTRIBUTE) * +PKCS8_pkey_get0_attrs(const PKCS8_PRIV_KEY_INFO *p8); +int PKCS8_pkey_add1_attr_by_NID(PKCS8_PRIV_KEY_INFO *p8, int nid, int type, + const unsigned char *bytes, int len); + +int X509_PUBKEY_set0_param(X509_PUBKEY *pub, ASN1_OBJECT *aobj, + int ptype, void *pval, + unsigned char *penc, int penclen); +int X509_PUBKEY_get0_param(ASN1_OBJECT **ppkalg, + const unsigned char **pk, int *ppklen, + X509_ALGOR **pa, X509_PUBKEY *pub); + +int X509_check_trust(X509 *x, int id, int flags); +int X509_TRUST_get_count(void); +X509_TRUST *X509_TRUST_get0(int idx); +int X509_TRUST_get_by_id(int id); +int X509_TRUST_add(int id, int flags, int (*ck) (X509_TRUST *, X509 *, int), + const char *name, int arg1, void *arg2); +void X509_TRUST_cleanup(void); +int X509_TRUST_get_flags(const X509_TRUST *xp); +char *X509_TRUST_get0_name(const X509_TRUST *xp); +int X509_TRUST_get_trust(const X509_TRUST *xp); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/x509_vfy.h b/third_party/openssl/uos/sw_64/include/openssl/x509_vfy.h new file mode 100644 index 00000000..25c79f1b --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/x509_vfy.h @@ -0,0 +1,632 @@ +/* + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_X509_VFY_H +# define HEADER_X509_VFY_H + +/* + * Protect against recursion, x509.h and x509_vfy.h each include the other. + */ +# ifndef HEADER_X509_H +# include +# endif + +# include +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/*- +SSL_CTX -> X509_STORE + -> X509_LOOKUP + ->X509_LOOKUP_METHOD + -> X509_LOOKUP + ->X509_LOOKUP_METHOD + +SSL -> X509_STORE_CTX + ->X509_STORE + +The X509_STORE holds the tables etc for verification stuff. +A X509_STORE_CTX is used while validating a single certificate. +The X509_STORE has X509_LOOKUPs for looking up certs. +The X509_STORE then calls a function to actually verify the +certificate chain. +*/ + +typedef enum { + X509_LU_NONE = 0, + X509_LU_X509, X509_LU_CRL +} X509_LOOKUP_TYPE; + +#if OPENSSL_API_COMPAT < 0x10100000L +#define X509_LU_RETRY -1 +#define X509_LU_FAIL 0 +#endif + +DEFINE_STACK_OF(X509_LOOKUP) +DEFINE_STACK_OF(X509_OBJECT) +DEFINE_STACK_OF(X509_VERIFY_PARAM) + +int X509_STORE_set_depth(X509_STORE *store, int depth); + +typedef int (*X509_STORE_CTX_verify_cb)(int, X509_STORE_CTX *); +typedef int (*X509_STORE_CTX_verify_fn)(X509_STORE_CTX *); +typedef int (*X509_STORE_CTX_get_issuer_fn)(X509 **issuer, + X509_STORE_CTX *ctx, X509 *x); +typedef int (*X509_STORE_CTX_check_issued_fn)(X509_STORE_CTX *ctx, + X509 *x, X509 *issuer); +typedef int (*X509_STORE_CTX_check_revocation_fn)(X509_STORE_CTX *ctx); +typedef int (*X509_STORE_CTX_get_crl_fn)(X509_STORE_CTX *ctx, + X509_CRL **crl, X509 *x); +typedef int (*X509_STORE_CTX_check_crl_fn)(X509_STORE_CTX *ctx, X509_CRL *crl); +typedef int (*X509_STORE_CTX_cert_crl_fn)(X509_STORE_CTX *ctx, + X509_CRL *crl, X509 *x); +typedef int (*X509_STORE_CTX_check_policy_fn)(X509_STORE_CTX *ctx); +typedef STACK_OF(X509) *(*X509_STORE_CTX_lookup_certs_fn)(X509_STORE_CTX *ctx, + X509_NAME *nm); +typedef STACK_OF(X509_CRL) *(*X509_STORE_CTX_lookup_crls_fn)(X509_STORE_CTX *ctx, + X509_NAME *nm); +typedef int (*X509_STORE_CTX_cleanup_fn)(X509_STORE_CTX *ctx); + + +void X509_STORE_CTX_set_depth(X509_STORE_CTX *ctx, int depth); + +# define X509_STORE_CTX_set_app_data(ctx,data) \ + X509_STORE_CTX_set_ex_data(ctx,0,data) +# define X509_STORE_CTX_get_app_data(ctx) \ + X509_STORE_CTX_get_ex_data(ctx,0) + +# define X509_L_FILE_LOAD 1 +# define X509_L_ADD_DIR 2 + +# define X509_LOOKUP_load_file(x,name,type) \ + X509_LOOKUP_ctrl((x),X509_L_FILE_LOAD,(name),(long)(type),NULL) + +# define X509_LOOKUP_add_dir(x,name,type) \ + X509_LOOKUP_ctrl((x),X509_L_ADD_DIR,(name),(long)(type),NULL) + +# define X509_V_OK 0 +# define X509_V_ERR_UNSPECIFIED 1 +# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT 2 +# define X509_V_ERR_UNABLE_TO_GET_CRL 3 +# define X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE 4 +# define X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE 5 +# define X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY 6 +# define X509_V_ERR_CERT_SIGNATURE_FAILURE 7 +# define X509_V_ERR_CRL_SIGNATURE_FAILURE 8 +# define X509_V_ERR_CERT_NOT_YET_VALID 9 +# define X509_V_ERR_CERT_HAS_EXPIRED 10 +# define X509_V_ERR_CRL_NOT_YET_VALID 11 +# define X509_V_ERR_CRL_HAS_EXPIRED 12 +# define X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD 13 +# define X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD 14 +# define X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD 15 +# define X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD 16 +# define X509_V_ERR_OUT_OF_MEM 17 +# define X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT 18 +# define X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN 19 +# define X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY 20 +# define X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE 21 +# define X509_V_ERR_CERT_CHAIN_TOO_LONG 22 +# define X509_V_ERR_CERT_REVOKED 23 +# define X509_V_ERR_INVALID_CA 24 +# define X509_V_ERR_PATH_LENGTH_EXCEEDED 25 +# define X509_V_ERR_INVALID_PURPOSE 26 +# define X509_V_ERR_CERT_UNTRUSTED 27 +# define X509_V_ERR_CERT_REJECTED 28 +/* These are 'informational' when looking for issuer cert */ +# define X509_V_ERR_SUBJECT_ISSUER_MISMATCH 29 +# define X509_V_ERR_AKID_SKID_MISMATCH 30 +# define X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH 31 +# define X509_V_ERR_KEYUSAGE_NO_CERTSIGN 32 +# define X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER 33 +# define X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION 34 +# define X509_V_ERR_KEYUSAGE_NO_CRL_SIGN 35 +# define X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION 36 +# define X509_V_ERR_INVALID_NON_CA 37 +# define X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED 38 +# define X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE 39 +# define X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED 40 +# define X509_V_ERR_INVALID_EXTENSION 41 +# define X509_V_ERR_INVALID_POLICY_EXTENSION 42 +# define X509_V_ERR_NO_EXPLICIT_POLICY 43 +# define X509_V_ERR_DIFFERENT_CRL_SCOPE 44 +# define X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE 45 +# define X509_V_ERR_UNNESTED_RESOURCE 46 +# define X509_V_ERR_PERMITTED_VIOLATION 47 +# define X509_V_ERR_EXCLUDED_VIOLATION 48 +# define X509_V_ERR_SUBTREE_MINMAX 49 +/* The application is not happy */ +# define X509_V_ERR_APPLICATION_VERIFICATION 50 +# define X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE 51 +# define X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX 52 +# define X509_V_ERR_UNSUPPORTED_NAME_SYNTAX 53 +# define X509_V_ERR_CRL_PATH_VALIDATION_ERROR 54 +/* Another issuer check debug option */ +# define X509_V_ERR_PATH_LOOP 55 +/* Suite B mode algorithm violation */ +# define X509_V_ERR_SUITE_B_INVALID_VERSION 56 +# define X509_V_ERR_SUITE_B_INVALID_ALGORITHM 57 +# define X509_V_ERR_SUITE_B_INVALID_CURVE 58 +# define X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM 59 +# define X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED 60 +# define X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 61 +/* Host, email and IP check errors */ +# define X509_V_ERR_HOSTNAME_MISMATCH 62 +# define X509_V_ERR_EMAIL_MISMATCH 63 +# define X509_V_ERR_IP_ADDRESS_MISMATCH 64 +/* DANE TLSA errors */ +# define X509_V_ERR_DANE_NO_MATCH 65 +/* security level errors */ +# define X509_V_ERR_EE_KEY_TOO_SMALL 66 +# define X509_V_ERR_CA_KEY_TOO_SMALL 67 +# define X509_V_ERR_CA_MD_TOO_WEAK 68 +/* Caller error */ +# define X509_V_ERR_INVALID_CALL 69 +/* Issuer lookup error */ +# define X509_V_ERR_STORE_LOOKUP 70 +/* Certificate transparency */ +# define X509_V_ERR_NO_VALID_SCTS 71 + +# define X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION 72 +/* OCSP status errors */ +# define X509_V_ERR_OCSP_VERIFY_NEEDED 73 /* Need OCSP verification */ +# define X509_V_ERR_OCSP_VERIFY_FAILED 74 /* Couldn't verify cert through OCSP */ +# define X509_V_ERR_OCSP_CERT_UNKNOWN 75 /* Certificate wasn't recognized by the OCSP responder */ +# define X509_V_ERR_SIGNATURE_ALGORITHM_MISMATCH 76 +# define X509_V_ERR_NO_ISSUER_PUBLIC_KEY 77 +# define X509_V_ERR_UNSUPPORTED_SIGNATURE_ALGORITHM 78 +# define X509_V_ERR_EC_KEY_EXPLICIT_PARAMS 79 + +/* Certificate verify flags */ + +# if OPENSSL_API_COMPAT < 0x10100000L +# define X509_V_FLAG_CB_ISSUER_CHECK 0x0 /* Deprecated */ +# endif +/* Use check time instead of current time */ +# define X509_V_FLAG_USE_CHECK_TIME 0x2 +/* Lookup CRLs */ +# define X509_V_FLAG_CRL_CHECK 0x4 +/* Lookup CRLs for whole chain */ +# define X509_V_FLAG_CRL_CHECK_ALL 0x8 +/* Ignore unhandled critical extensions */ +# define X509_V_FLAG_IGNORE_CRITICAL 0x10 +/* Disable workarounds for broken certificates */ +# define X509_V_FLAG_X509_STRICT 0x20 +/* Enable proxy certificate validation */ +# define X509_V_FLAG_ALLOW_PROXY_CERTS 0x40 +/* Enable policy checking */ +# define X509_V_FLAG_POLICY_CHECK 0x80 +/* Policy variable require-explicit-policy */ +# define X509_V_FLAG_EXPLICIT_POLICY 0x100 +/* Policy variable inhibit-any-policy */ +# define X509_V_FLAG_INHIBIT_ANY 0x200 +/* Policy variable inhibit-policy-mapping */ +# define X509_V_FLAG_INHIBIT_MAP 0x400 +/* Notify callback that policy is OK */ +# define X509_V_FLAG_NOTIFY_POLICY 0x800 +/* Extended CRL features such as indirect CRLs, alternate CRL signing keys */ +# define X509_V_FLAG_EXTENDED_CRL_SUPPORT 0x1000 +/* Delta CRL support */ +# define X509_V_FLAG_USE_DELTAS 0x2000 +/* Check self-signed CA signature */ +# define X509_V_FLAG_CHECK_SS_SIGNATURE 0x4000 +/* Use trusted store first */ +# define X509_V_FLAG_TRUSTED_FIRST 0x8000 +/* Suite B 128 bit only mode: not normally used */ +# define X509_V_FLAG_SUITEB_128_LOS_ONLY 0x10000 +/* Suite B 192 bit only mode */ +# define X509_V_FLAG_SUITEB_192_LOS 0x20000 +/* Suite B 128 bit mode allowing 192 bit algorithms */ +# define X509_V_FLAG_SUITEB_128_LOS 0x30000 +/* Allow partial chains if at least one certificate is in trusted store */ +# define X509_V_FLAG_PARTIAL_CHAIN 0x80000 +/* + * If the initial chain is not trusted, do not attempt to build an alternative + * chain. Alternate chain checking was introduced in 1.1.0. Setting this flag + * will force the behaviour to match that of previous versions. + */ +# define X509_V_FLAG_NO_ALT_CHAINS 0x100000 +/* Do not check certificate/CRL validity against current time */ +# define X509_V_FLAG_NO_CHECK_TIME 0x200000 + +# define X509_VP_FLAG_DEFAULT 0x1 +# define X509_VP_FLAG_OVERWRITE 0x2 +# define X509_VP_FLAG_RESET_FLAGS 0x4 +# define X509_VP_FLAG_LOCKED 0x8 +# define X509_VP_FLAG_ONCE 0x10 + +/* Internal use: mask of policy related options */ +# define X509_V_FLAG_POLICY_MASK (X509_V_FLAG_POLICY_CHECK \ + | X509_V_FLAG_EXPLICIT_POLICY \ + | X509_V_FLAG_INHIBIT_ANY \ + | X509_V_FLAG_INHIBIT_MAP) + +int X509_OBJECT_idx_by_subject(STACK_OF(X509_OBJECT) *h, X509_LOOKUP_TYPE type, + X509_NAME *name); +X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h, + X509_LOOKUP_TYPE type, + X509_NAME *name); +X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h, + X509_OBJECT *x); +int X509_OBJECT_up_ref_count(X509_OBJECT *a); +X509_OBJECT *X509_OBJECT_new(void); +void X509_OBJECT_free(X509_OBJECT *a); +X509_LOOKUP_TYPE X509_OBJECT_get_type(const X509_OBJECT *a); +X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a); +int X509_OBJECT_set1_X509(X509_OBJECT *a, X509 *obj); +X509_CRL *X509_OBJECT_get0_X509_CRL(X509_OBJECT *a); +int X509_OBJECT_set1_X509_CRL(X509_OBJECT *a, X509_CRL *obj); +X509_STORE *X509_STORE_new(void); +void X509_STORE_free(X509_STORE *v); +int X509_STORE_lock(X509_STORE *ctx); +int X509_STORE_unlock(X509_STORE *ctx); +int X509_STORE_up_ref(X509_STORE *v); +STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(X509_STORE *v); + +STACK_OF(X509) *X509_STORE_CTX_get1_certs(X509_STORE_CTX *st, X509_NAME *nm); +STACK_OF(X509_CRL) *X509_STORE_CTX_get1_crls(X509_STORE_CTX *st, X509_NAME *nm); +int X509_STORE_set_flags(X509_STORE *ctx, unsigned long flags); +int X509_STORE_set_purpose(X509_STORE *ctx, int purpose); +int X509_STORE_set_trust(X509_STORE *ctx, int trust); +int X509_STORE_set1_param(X509_STORE *ctx, X509_VERIFY_PARAM *pm); +X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *ctx); + +void X509_STORE_set_verify(X509_STORE *ctx, X509_STORE_CTX_verify_fn verify); +#define X509_STORE_set_verify_func(ctx, func) \ + X509_STORE_set_verify((ctx),(func)) +void X509_STORE_CTX_set_verify(X509_STORE_CTX *ctx, + X509_STORE_CTX_verify_fn verify); +X509_STORE_CTX_verify_fn X509_STORE_get_verify(X509_STORE *ctx); +void X509_STORE_set_verify_cb(X509_STORE *ctx, + X509_STORE_CTX_verify_cb verify_cb); +# define X509_STORE_set_verify_cb_func(ctx,func) \ + X509_STORE_set_verify_cb((ctx),(func)) +X509_STORE_CTX_verify_cb X509_STORE_get_verify_cb(X509_STORE *ctx); +void X509_STORE_set_get_issuer(X509_STORE *ctx, + X509_STORE_CTX_get_issuer_fn get_issuer); +X509_STORE_CTX_get_issuer_fn X509_STORE_get_get_issuer(X509_STORE *ctx); +void X509_STORE_set_check_issued(X509_STORE *ctx, + X509_STORE_CTX_check_issued_fn check_issued); +X509_STORE_CTX_check_issued_fn X509_STORE_get_check_issued(X509_STORE *ctx); +void X509_STORE_set_check_revocation(X509_STORE *ctx, + X509_STORE_CTX_check_revocation_fn check_revocation); +X509_STORE_CTX_check_revocation_fn X509_STORE_get_check_revocation(X509_STORE *ctx); +void X509_STORE_set_get_crl(X509_STORE *ctx, + X509_STORE_CTX_get_crl_fn get_crl); +X509_STORE_CTX_get_crl_fn X509_STORE_get_get_crl(X509_STORE *ctx); +void X509_STORE_set_check_crl(X509_STORE *ctx, + X509_STORE_CTX_check_crl_fn check_crl); +X509_STORE_CTX_check_crl_fn X509_STORE_get_check_crl(X509_STORE *ctx); +void X509_STORE_set_cert_crl(X509_STORE *ctx, + X509_STORE_CTX_cert_crl_fn cert_crl); +X509_STORE_CTX_cert_crl_fn X509_STORE_get_cert_crl(X509_STORE *ctx); +void X509_STORE_set_check_policy(X509_STORE *ctx, + X509_STORE_CTX_check_policy_fn check_policy); +X509_STORE_CTX_check_policy_fn X509_STORE_get_check_policy(X509_STORE *ctx); +void X509_STORE_set_lookup_certs(X509_STORE *ctx, + X509_STORE_CTX_lookup_certs_fn lookup_certs); +X509_STORE_CTX_lookup_certs_fn X509_STORE_get_lookup_certs(X509_STORE *ctx); +void X509_STORE_set_lookup_crls(X509_STORE *ctx, + X509_STORE_CTX_lookup_crls_fn lookup_crls); +#define X509_STORE_set_lookup_crls_cb(ctx, func) \ + X509_STORE_set_lookup_crls((ctx), (func)) +X509_STORE_CTX_lookup_crls_fn X509_STORE_get_lookup_crls(X509_STORE *ctx); +void X509_STORE_set_cleanup(X509_STORE *ctx, + X509_STORE_CTX_cleanup_fn cleanup); +X509_STORE_CTX_cleanup_fn X509_STORE_get_cleanup(X509_STORE *ctx); + +#define X509_STORE_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_X509_STORE, l, p, newf, dupf, freef) +int X509_STORE_set_ex_data(X509_STORE *ctx, int idx, void *data); +void *X509_STORE_get_ex_data(X509_STORE *ctx, int idx); + +X509_STORE_CTX *X509_STORE_CTX_new(void); + +int X509_STORE_CTX_get1_issuer(X509 **issuer, X509_STORE_CTX *ctx, X509 *x); + +void X509_STORE_CTX_free(X509_STORE_CTX *ctx); +int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store, + X509 *x509, STACK_OF(X509) *chain); +void X509_STORE_CTX_set0_trusted_stack(X509_STORE_CTX *ctx, STACK_OF(X509) *sk); +void X509_STORE_CTX_cleanup(X509_STORE_CTX *ctx); + +X509_STORE *X509_STORE_CTX_get0_store(X509_STORE_CTX *ctx); +X509 *X509_STORE_CTX_get0_cert(X509_STORE_CTX *ctx); +STACK_OF(X509)* X509_STORE_CTX_get0_untrusted(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set0_untrusted(X509_STORE_CTX *ctx, STACK_OF(X509) *sk); +void X509_STORE_CTX_set_verify_cb(X509_STORE_CTX *ctx, + X509_STORE_CTX_verify_cb verify); +X509_STORE_CTX_verify_cb X509_STORE_CTX_get_verify_cb(X509_STORE_CTX *ctx); +X509_STORE_CTX_verify_fn X509_STORE_CTX_get_verify(X509_STORE_CTX *ctx); +X509_STORE_CTX_get_issuer_fn X509_STORE_CTX_get_get_issuer(X509_STORE_CTX *ctx); +X509_STORE_CTX_check_issued_fn X509_STORE_CTX_get_check_issued(X509_STORE_CTX *ctx); +X509_STORE_CTX_check_revocation_fn X509_STORE_CTX_get_check_revocation(X509_STORE_CTX *ctx); +X509_STORE_CTX_get_crl_fn X509_STORE_CTX_get_get_crl(X509_STORE_CTX *ctx); +X509_STORE_CTX_check_crl_fn X509_STORE_CTX_get_check_crl(X509_STORE_CTX *ctx); +X509_STORE_CTX_cert_crl_fn X509_STORE_CTX_get_cert_crl(X509_STORE_CTX *ctx); +X509_STORE_CTX_check_policy_fn X509_STORE_CTX_get_check_policy(X509_STORE_CTX *ctx); +X509_STORE_CTX_lookup_certs_fn X509_STORE_CTX_get_lookup_certs(X509_STORE_CTX *ctx); +X509_STORE_CTX_lookup_crls_fn X509_STORE_CTX_get_lookup_crls(X509_STORE_CTX *ctx); +X509_STORE_CTX_cleanup_fn X509_STORE_CTX_get_cleanup(X509_STORE_CTX *ctx); + +#if OPENSSL_API_COMPAT < 0x10100000L +# define X509_STORE_CTX_get_chain X509_STORE_CTX_get0_chain +# define X509_STORE_CTX_set_chain X509_STORE_CTX_set0_untrusted +# define X509_STORE_CTX_trusted_stack X509_STORE_CTX_set0_trusted_stack +# define X509_STORE_get_by_subject X509_STORE_CTX_get_by_subject +# define X509_STORE_get1_certs X509_STORE_CTX_get1_certs +# define X509_STORE_get1_crls X509_STORE_CTX_get1_crls +/* the following macro is misspelled; use X509_STORE_get1_certs instead */ +# define X509_STORE_get1_cert X509_STORE_CTX_get1_certs +/* the following macro is misspelled; use X509_STORE_get1_crls instead */ +# define X509_STORE_get1_crl X509_STORE_CTX_get1_crls +#endif + +X509_LOOKUP *X509_STORE_add_lookup(X509_STORE *v, X509_LOOKUP_METHOD *m); +X509_LOOKUP_METHOD *X509_LOOKUP_hash_dir(void); +X509_LOOKUP_METHOD *X509_LOOKUP_file(void); + +typedef int (*X509_LOOKUP_ctrl_fn)(X509_LOOKUP *ctx, int cmd, const char *argc, + long argl, char **ret); +typedef int (*X509_LOOKUP_get_by_subject_fn)(X509_LOOKUP *ctx, + X509_LOOKUP_TYPE type, + X509_NAME *name, + X509_OBJECT *ret); +typedef int (*X509_LOOKUP_get_by_issuer_serial_fn)(X509_LOOKUP *ctx, + X509_LOOKUP_TYPE type, + X509_NAME *name, + ASN1_INTEGER *serial, + X509_OBJECT *ret); +typedef int (*X509_LOOKUP_get_by_fingerprint_fn)(X509_LOOKUP *ctx, + X509_LOOKUP_TYPE type, + const unsigned char* bytes, + int len, + X509_OBJECT *ret); +typedef int (*X509_LOOKUP_get_by_alias_fn)(X509_LOOKUP *ctx, + X509_LOOKUP_TYPE type, + const char *str, + int len, + X509_OBJECT *ret); + +X509_LOOKUP_METHOD *X509_LOOKUP_meth_new(const char *name); +void X509_LOOKUP_meth_free(X509_LOOKUP_METHOD *method); + +int X509_LOOKUP_meth_set_new_item(X509_LOOKUP_METHOD *method, + int (*new_item) (X509_LOOKUP *ctx)); +int (*X509_LOOKUP_meth_get_new_item(const X509_LOOKUP_METHOD* method)) + (X509_LOOKUP *ctx); + +int X509_LOOKUP_meth_set_free(X509_LOOKUP_METHOD *method, + void (*free_fn) (X509_LOOKUP *ctx)); +void (*X509_LOOKUP_meth_get_free(const X509_LOOKUP_METHOD* method)) + (X509_LOOKUP *ctx); + +int X509_LOOKUP_meth_set_init(X509_LOOKUP_METHOD *method, + int (*init) (X509_LOOKUP *ctx)); +int (*X509_LOOKUP_meth_get_init(const X509_LOOKUP_METHOD* method)) + (X509_LOOKUP *ctx); + +int X509_LOOKUP_meth_set_shutdown(X509_LOOKUP_METHOD *method, + int (*shutdown) (X509_LOOKUP *ctx)); +int (*X509_LOOKUP_meth_get_shutdown(const X509_LOOKUP_METHOD* method)) + (X509_LOOKUP *ctx); + +int X509_LOOKUP_meth_set_ctrl(X509_LOOKUP_METHOD *method, + X509_LOOKUP_ctrl_fn ctrl_fn); +X509_LOOKUP_ctrl_fn X509_LOOKUP_meth_get_ctrl(const X509_LOOKUP_METHOD *method); + +int X509_LOOKUP_meth_set_get_by_subject(X509_LOOKUP_METHOD *method, + X509_LOOKUP_get_by_subject_fn fn); +X509_LOOKUP_get_by_subject_fn X509_LOOKUP_meth_get_get_by_subject( + const X509_LOOKUP_METHOD *method); + +int X509_LOOKUP_meth_set_get_by_issuer_serial(X509_LOOKUP_METHOD *method, + X509_LOOKUP_get_by_issuer_serial_fn fn); +X509_LOOKUP_get_by_issuer_serial_fn X509_LOOKUP_meth_get_get_by_issuer_serial( + const X509_LOOKUP_METHOD *method); + +int X509_LOOKUP_meth_set_get_by_fingerprint(X509_LOOKUP_METHOD *method, + X509_LOOKUP_get_by_fingerprint_fn fn); +X509_LOOKUP_get_by_fingerprint_fn X509_LOOKUP_meth_get_get_by_fingerprint( + const X509_LOOKUP_METHOD *method); + +int X509_LOOKUP_meth_set_get_by_alias(X509_LOOKUP_METHOD *method, + X509_LOOKUP_get_by_alias_fn fn); +X509_LOOKUP_get_by_alias_fn X509_LOOKUP_meth_get_get_by_alias( + const X509_LOOKUP_METHOD *method); + + +int X509_STORE_add_cert(X509_STORE *ctx, X509 *x); +int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x); + +int X509_STORE_CTX_get_by_subject(X509_STORE_CTX *vs, X509_LOOKUP_TYPE type, + X509_NAME *name, X509_OBJECT *ret); +X509_OBJECT *X509_STORE_CTX_get_obj_by_subject(X509_STORE_CTX *vs, + X509_LOOKUP_TYPE type, + X509_NAME *name); + +int X509_LOOKUP_ctrl(X509_LOOKUP *ctx, int cmd, const char *argc, + long argl, char **ret); + +int X509_load_cert_file(X509_LOOKUP *ctx, const char *file, int type); +int X509_load_crl_file(X509_LOOKUP *ctx, const char *file, int type); +int X509_load_cert_crl_file(X509_LOOKUP *ctx, const char *file, int type); + +X509_LOOKUP *X509_LOOKUP_new(X509_LOOKUP_METHOD *method); +void X509_LOOKUP_free(X509_LOOKUP *ctx); +int X509_LOOKUP_init(X509_LOOKUP *ctx); +int X509_LOOKUP_by_subject(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type, + X509_NAME *name, X509_OBJECT *ret); +int X509_LOOKUP_by_issuer_serial(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type, + X509_NAME *name, ASN1_INTEGER *serial, + X509_OBJECT *ret); +int X509_LOOKUP_by_fingerprint(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type, + const unsigned char *bytes, int len, + X509_OBJECT *ret); +int X509_LOOKUP_by_alias(X509_LOOKUP *ctx, X509_LOOKUP_TYPE type, + const char *str, int len, X509_OBJECT *ret); +int X509_LOOKUP_set_method_data(X509_LOOKUP *ctx, void *data); +void *X509_LOOKUP_get_method_data(const X509_LOOKUP *ctx); +X509_STORE *X509_LOOKUP_get_store(const X509_LOOKUP *ctx); +int X509_LOOKUP_shutdown(X509_LOOKUP *ctx); + +int X509_STORE_load_locations(X509_STORE *ctx, + const char *file, const char *dir); +int X509_STORE_set_default_paths(X509_STORE *ctx); + +#define X509_STORE_CTX_get_ex_new_index(l, p, newf, dupf, freef) \ + CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_X509_STORE_CTX, l, p, newf, dupf, freef) +int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx, int idx, void *data); +void *X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx, int idx); +int X509_STORE_CTX_get_error(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set_error(X509_STORE_CTX *ctx, int s); +int X509_STORE_CTX_get_error_depth(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set_error_depth(X509_STORE_CTX *ctx, int depth); +X509 *X509_STORE_CTX_get_current_cert(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set_current_cert(X509_STORE_CTX *ctx, X509 *x); +X509 *X509_STORE_CTX_get0_current_issuer(X509_STORE_CTX *ctx); +X509_CRL *X509_STORE_CTX_get0_current_crl(X509_STORE_CTX *ctx); +X509_STORE_CTX *X509_STORE_CTX_get0_parent_ctx(X509_STORE_CTX *ctx); +STACK_OF(X509) *X509_STORE_CTX_get0_chain(X509_STORE_CTX *ctx); +STACK_OF(X509) *X509_STORE_CTX_get1_chain(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set_cert(X509_STORE_CTX *c, X509 *x); +void X509_STORE_CTX_set0_verified_chain(X509_STORE_CTX *c, STACK_OF(X509) *sk); +void X509_STORE_CTX_set0_crls(X509_STORE_CTX *c, STACK_OF(X509_CRL) *sk); +int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose); +int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust); +int X509_STORE_CTX_purpose_inherit(X509_STORE_CTX *ctx, int def_purpose, + int purpose, int trust); +void X509_STORE_CTX_set_flags(X509_STORE_CTX *ctx, unsigned long flags); +void X509_STORE_CTX_set_time(X509_STORE_CTX *ctx, unsigned long flags, + time_t t); + +X509_POLICY_TREE *X509_STORE_CTX_get0_policy_tree(X509_STORE_CTX *ctx); +int X509_STORE_CTX_get_explicit_policy(X509_STORE_CTX *ctx); +int X509_STORE_CTX_get_num_untrusted(X509_STORE_CTX *ctx); + +X509_VERIFY_PARAM *X509_STORE_CTX_get0_param(X509_STORE_CTX *ctx); +void X509_STORE_CTX_set0_param(X509_STORE_CTX *ctx, X509_VERIFY_PARAM *param); +int X509_STORE_CTX_set_default(X509_STORE_CTX *ctx, const char *name); + +/* + * Bridge opacity barrier between libcrypt and libssl, also needed to support + * offline testing in test/danetest.c + */ +void X509_STORE_CTX_set0_dane(X509_STORE_CTX *ctx, SSL_DANE *dane); +#define DANE_FLAG_NO_DANE_EE_NAMECHECKS (1L << 0) + +/* X509_VERIFY_PARAM functions */ + +X509_VERIFY_PARAM *X509_VERIFY_PARAM_new(void); +void X509_VERIFY_PARAM_free(X509_VERIFY_PARAM *param); +int X509_VERIFY_PARAM_inherit(X509_VERIFY_PARAM *to, + const X509_VERIFY_PARAM *from); +int X509_VERIFY_PARAM_set1(X509_VERIFY_PARAM *to, + const X509_VERIFY_PARAM *from); +int X509_VERIFY_PARAM_set1_name(X509_VERIFY_PARAM *param, const char *name); +int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param, + unsigned long flags); +int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param, + unsigned long flags); +unsigned long X509_VERIFY_PARAM_get_flags(X509_VERIFY_PARAM *param); +int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose); +int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust); +void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth); +void X509_VERIFY_PARAM_set_auth_level(X509_VERIFY_PARAM *param, int auth_level); +time_t X509_VERIFY_PARAM_get_time(const X509_VERIFY_PARAM *param); +void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t); +int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param, + ASN1_OBJECT *policy); +int X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param, + STACK_OF(ASN1_OBJECT) *policies); + +int X509_VERIFY_PARAM_set_inh_flags(X509_VERIFY_PARAM *param, + uint32_t flags); +uint32_t X509_VERIFY_PARAM_get_inh_flags(const X509_VERIFY_PARAM *param); + +int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param, + const char *name, size_t namelen); +int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param, + const char *name, size_t namelen); +void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param, + unsigned int flags); +unsigned int X509_VERIFY_PARAM_get_hostflags(const X509_VERIFY_PARAM *param); +char *X509_VERIFY_PARAM_get0_peername(X509_VERIFY_PARAM *); +void X509_VERIFY_PARAM_move_peername(X509_VERIFY_PARAM *, X509_VERIFY_PARAM *); +int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param, + const char *email, size_t emaillen); +int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param, + const unsigned char *ip, size_t iplen); +int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param, + const char *ipasc); + +int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param); +int X509_VERIFY_PARAM_get_auth_level(const X509_VERIFY_PARAM *param); +const char *X509_VERIFY_PARAM_get0_name(const X509_VERIFY_PARAM *param); + +int X509_VERIFY_PARAM_add0_table(X509_VERIFY_PARAM *param); +int X509_VERIFY_PARAM_get_count(void); +const X509_VERIFY_PARAM *X509_VERIFY_PARAM_get0(int id); +const X509_VERIFY_PARAM *X509_VERIFY_PARAM_lookup(const char *name); +void X509_VERIFY_PARAM_table_cleanup(void); + +/* Non positive return values are errors */ +#define X509_PCY_TREE_FAILURE -2 /* Failure to satisfy explicit policy */ +#define X509_PCY_TREE_INVALID -1 /* Inconsistent or invalid extensions */ +#define X509_PCY_TREE_INTERNAL 0 /* Internal error, most likely malloc */ + +/* + * Positive return values form a bit mask, all but the first are internal to + * the library and don't appear in results from X509_policy_check(). + */ +#define X509_PCY_TREE_VALID 1 /* The policy tree is valid */ +#define X509_PCY_TREE_EMPTY 2 /* The policy tree is empty */ +#define X509_PCY_TREE_EXPLICIT 4 /* Explicit policy required */ + +int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy, + STACK_OF(X509) *certs, + STACK_OF(ASN1_OBJECT) *policy_oids, unsigned int flags); + +void X509_policy_tree_free(X509_POLICY_TREE *tree); + +int X509_policy_tree_level_count(const X509_POLICY_TREE *tree); +X509_POLICY_LEVEL *X509_policy_tree_get0_level(const X509_POLICY_TREE *tree, + int i); + +STACK_OF(X509_POLICY_NODE) *X509_policy_tree_get0_policies(const + X509_POLICY_TREE + *tree); + +STACK_OF(X509_POLICY_NODE) *X509_policy_tree_get0_user_policies(const + X509_POLICY_TREE + *tree); + +int X509_policy_level_node_count(X509_POLICY_LEVEL *level); + +X509_POLICY_NODE *X509_policy_level_get0_node(X509_POLICY_LEVEL *level, + int i); + +const ASN1_OBJECT *X509_policy_node_get0_policy(const X509_POLICY_NODE *node); + +STACK_OF(POLICYQUALINFO) *X509_policy_node_get0_qualifiers(const + X509_POLICY_NODE + *node); +const X509_POLICY_NODE *X509_policy_node_get0_parent(const X509_POLICY_NODE + *node); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/x509err.h b/third_party/openssl/uos/sw_64/include/openssl/x509err.h new file mode 100644 index 00000000..cd08673f --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/x509err.h @@ -0,0 +1,129 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_X509ERR_H +# define HEADER_X509ERR_H + +# include + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_X509_strings(void); + +/* + * X509 function codes. + */ +# define X509_F_ADD_CERT_DIR 100 +# define X509_F_BUILD_CHAIN 106 +# define X509_F_BY_FILE_CTRL 101 +# define X509_F_CHECK_NAME_CONSTRAINTS 149 +# define X509_F_CHECK_POLICY 145 +# define X509_F_DANE_I2D 107 +# define X509_F_DIR_CTRL 102 +# define X509_F_GET_CERT_BY_SUBJECT 103 +# define X509_F_I2D_X509_AUX 151 +# define X509_F_LOOKUP_CERTS_SK 152 +# define X509_F_NETSCAPE_SPKI_B64_DECODE 129 +# define X509_F_NETSCAPE_SPKI_B64_ENCODE 130 +# define X509_F_NEW_DIR 153 +# define X509_F_X509AT_ADD1_ATTR 135 +# define X509_F_X509V3_ADD_EXT 104 +# define X509_F_X509_ATTRIBUTE_CREATE_BY_NID 136 +# define X509_F_X509_ATTRIBUTE_CREATE_BY_OBJ 137 +# define X509_F_X509_ATTRIBUTE_CREATE_BY_TXT 140 +# define X509_F_X509_ATTRIBUTE_GET0_DATA 139 +# define X509_F_X509_ATTRIBUTE_SET1_DATA 138 +# define X509_F_X509_CHECK_PRIVATE_KEY 128 +# define X509_F_X509_CRL_DIFF 105 +# define X509_F_X509_CRL_METHOD_NEW 154 +# define X509_F_X509_CRL_PRINT_FP 147 +# define X509_F_X509_EXTENSION_CREATE_BY_NID 108 +# define X509_F_X509_EXTENSION_CREATE_BY_OBJ 109 +# define X509_F_X509_GET_PUBKEY_PARAMETERS 110 +# define X509_F_X509_LOAD_CERT_CRL_FILE 132 +# define X509_F_X509_LOAD_CERT_FILE 111 +# define X509_F_X509_LOAD_CRL_FILE 112 +# define X509_F_X509_LOOKUP_METH_NEW 160 +# define X509_F_X509_LOOKUP_NEW 155 +# define X509_F_X509_NAME_ADD_ENTRY 113 +# define X509_F_X509_NAME_CANON 156 +# define X509_F_X509_NAME_ENTRY_CREATE_BY_NID 114 +# define X509_F_X509_NAME_ENTRY_CREATE_BY_TXT 131 +# define X509_F_X509_NAME_ENTRY_SET_OBJECT 115 +# define X509_F_X509_NAME_ONELINE 116 +# define X509_F_X509_NAME_PRINT 117 +# define X509_F_X509_OBJECT_NEW 150 +# define X509_F_X509_PRINT_EX_FP 118 +# define X509_F_X509_PUBKEY_DECODE 148 +# define X509_F_X509_PUBKEY_GET 161 +# define X509_F_X509_PUBKEY_GET0 119 +# define X509_F_X509_PUBKEY_SET 120 +# define X509_F_X509_REQ_CHECK_PRIVATE_KEY 144 +# define X509_F_X509_REQ_PRINT_EX 121 +# define X509_F_X509_REQ_PRINT_FP 122 +# define X509_F_X509_REQ_TO_X509 123 +# define X509_F_X509_STORE_ADD_CERT 124 +# define X509_F_X509_STORE_ADD_CRL 125 +# define X509_F_X509_STORE_ADD_LOOKUP 157 +# define X509_F_X509_STORE_CTX_GET1_ISSUER 146 +# define X509_F_X509_STORE_CTX_INIT 143 +# define X509_F_X509_STORE_CTX_NEW 142 +# define X509_F_X509_STORE_CTX_PURPOSE_INHERIT 134 +# define X509_F_X509_STORE_NEW 158 +# define X509_F_X509_TO_X509_REQ 126 +# define X509_F_X509_TRUST_ADD 133 +# define X509_F_X509_TRUST_SET 141 +# define X509_F_X509_VERIFY_CERT 127 +# define X509_F_X509_VERIFY_PARAM_NEW 159 + +/* + * X509 reason codes. + */ +# define X509_R_AKID_MISMATCH 110 +# define X509_R_BAD_SELECTOR 133 +# define X509_R_BAD_X509_FILETYPE 100 +# define X509_R_BASE64_DECODE_ERROR 118 +# define X509_R_CANT_CHECK_DH_KEY 114 +# define X509_R_CERT_ALREADY_IN_HASH_TABLE 101 +# define X509_R_CRL_ALREADY_DELTA 127 +# define X509_R_CRL_VERIFY_FAILURE 131 +# define X509_R_IDP_MISMATCH 128 +# define X509_R_INVALID_ATTRIBUTES 138 +# define X509_R_INVALID_DIRECTORY 113 +# define X509_R_INVALID_FIELD_NAME 119 +# define X509_R_INVALID_TRUST 123 +# define X509_R_ISSUER_MISMATCH 129 +# define X509_R_KEY_TYPE_MISMATCH 115 +# define X509_R_KEY_VALUES_MISMATCH 116 +# define X509_R_LOADING_CERT_DIR 103 +# define X509_R_LOADING_DEFAULTS 104 +# define X509_R_METHOD_NOT_SUPPORTED 124 +# define X509_R_NAME_TOO_LONG 134 +# define X509_R_NEWER_CRL_NOT_NEWER 132 +# define X509_R_NO_CERTIFICATE_FOUND 135 +# define X509_R_NO_CERTIFICATE_OR_CRL_FOUND 136 +# define X509_R_NO_CERT_SET_FOR_US_TO_VERIFY 105 +# define X509_R_NO_CRL_FOUND 137 +# define X509_R_NO_CRL_NUMBER 130 +# define X509_R_PUBLIC_KEY_DECODE_ERROR 125 +# define X509_R_PUBLIC_KEY_ENCODE_ERROR 126 +# define X509_R_SHOULD_RETRY 106 +# define X509_R_UNABLE_TO_FIND_PARAMETERS_IN_CHAIN 107 +# define X509_R_UNABLE_TO_GET_CERTS_PUBLIC_KEY 108 +# define X509_R_UNKNOWN_KEY_TYPE 117 +# define X509_R_UNKNOWN_NID 109 +# define X509_R_UNKNOWN_PURPOSE_ID 121 +# define X509_R_UNKNOWN_TRUST_ID 120 +# define X509_R_UNSUPPORTED_ALGORITHM 111 +# define X509_R_WRONG_LOOKUP_TYPE 112 +# define X509_R_WRONG_TYPE 122 + +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/x509v3.h b/third_party/openssl/uos/sw_64/include/openssl/x509v3.h new file mode 100644 index 00000000..3a4f04c1 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/x509v3.h @@ -0,0 +1,938 @@ +/* + * Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_X509V3_H +# define HEADER_X509V3_H + +# include +# include +# include +# include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Forward reference */ +struct v3_ext_method; +struct v3_ext_ctx; + +/* Useful typedefs */ + +typedef void *(*X509V3_EXT_NEW)(void); +typedef void (*X509V3_EXT_FREE) (void *); +typedef void *(*X509V3_EXT_D2I)(void *, const unsigned char **, long); +typedef int (*X509V3_EXT_I2D) (void *, unsigned char **); +typedef STACK_OF(CONF_VALUE) * + (*X509V3_EXT_I2V) (const struct v3_ext_method *method, void *ext, + STACK_OF(CONF_VALUE) *extlist); +typedef void *(*X509V3_EXT_V2I)(const struct v3_ext_method *method, + struct v3_ext_ctx *ctx, + STACK_OF(CONF_VALUE) *values); +typedef char *(*X509V3_EXT_I2S)(const struct v3_ext_method *method, + void *ext); +typedef void *(*X509V3_EXT_S2I)(const struct v3_ext_method *method, + struct v3_ext_ctx *ctx, const char *str); +typedef int (*X509V3_EXT_I2R) (const struct v3_ext_method *method, void *ext, + BIO *out, int indent); +typedef void *(*X509V3_EXT_R2I)(const struct v3_ext_method *method, + struct v3_ext_ctx *ctx, const char *str); + +/* V3 extension structure */ + +struct v3_ext_method { + int ext_nid; + int ext_flags; +/* If this is set the following four fields are ignored */ + ASN1_ITEM_EXP *it; +/* Old style ASN1 calls */ + X509V3_EXT_NEW ext_new; + X509V3_EXT_FREE ext_free; + X509V3_EXT_D2I d2i; + X509V3_EXT_I2D i2d; +/* The following pair is used for string extensions */ + X509V3_EXT_I2S i2s; + X509V3_EXT_S2I s2i; +/* The following pair is used for multi-valued extensions */ + X509V3_EXT_I2V i2v; + X509V3_EXT_V2I v2i; +/* The following are used for raw extensions */ + X509V3_EXT_I2R i2r; + X509V3_EXT_R2I r2i; + void *usr_data; /* Any extension specific data */ +}; + +typedef struct X509V3_CONF_METHOD_st { + char *(*get_string) (void *db, const char *section, const char *value); + STACK_OF(CONF_VALUE) *(*get_section) (void *db, const char *section); + void (*free_string) (void *db, char *string); + void (*free_section) (void *db, STACK_OF(CONF_VALUE) *section); +} X509V3_CONF_METHOD; + +/* Context specific info */ +struct v3_ext_ctx { +# define CTX_TEST 0x1 +# define X509V3_CTX_REPLACE 0x2 + int flags; + X509 *issuer_cert; + X509 *subject_cert; + X509_REQ *subject_req; + X509_CRL *crl; + X509V3_CONF_METHOD *db_meth; + void *db; +/* Maybe more here */ +}; + +typedef struct v3_ext_method X509V3_EXT_METHOD; + +DEFINE_STACK_OF(X509V3_EXT_METHOD) + +/* ext_flags values */ +# define X509V3_EXT_DYNAMIC 0x1 +# define X509V3_EXT_CTX_DEP 0x2 +# define X509V3_EXT_MULTILINE 0x4 + +typedef BIT_STRING_BITNAME ENUMERATED_NAMES; + +typedef struct BASIC_CONSTRAINTS_st { + int ca; + ASN1_INTEGER *pathlen; +} BASIC_CONSTRAINTS; + +typedef struct PKEY_USAGE_PERIOD_st { + ASN1_GENERALIZEDTIME *notBefore; + ASN1_GENERALIZEDTIME *notAfter; +} PKEY_USAGE_PERIOD; + +typedef struct otherName_st { + ASN1_OBJECT *type_id; + ASN1_TYPE *value; +} OTHERNAME; + +typedef struct EDIPartyName_st { + ASN1_STRING *nameAssigner; + ASN1_STRING *partyName; +} EDIPARTYNAME; + +typedef struct GENERAL_NAME_st { +# define GEN_OTHERNAME 0 +# define GEN_EMAIL 1 +# define GEN_DNS 2 +# define GEN_X400 3 +# define GEN_DIRNAME 4 +# define GEN_EDIPARTY 5 +# define GEN_URI 6 +# define GEN_IPADD 7 +# define GEN_RID 8 + int type; + union { + char *ptr; + OTHERNAME *otherName; /* otherName */ + ASN1_IA5STRING *rfc822Name; + ASN1_IA5STRING *dNSName; + ASN1_STRING *x400Address; + X509_NAME *directoryName; + EDIPARTYNAME *ediPartyName; + ASN1_IA5STRING *uniformResourceIdentifier; + ASN1_OCTET_STRING *iPAddress; + ASN1_OBJECT *registeredID; + /* Old names */ + ASN1_OCTET_STRING *ip; /* iPAddress */ + X509_NAME *dirn; /* dirn */ + ASN1_IA5STRING *ia5; /* rfc822Name, dNSName, + * uniformResourceIdentifier */ + ASN1_OBJECT *rid; /* registeredID */ + ASN1_TYPE *other; /* x400Address */ + } d; +} GENERAL_NAME; + +typedef struct ACCESS_DESCRIPTION_st { + ASN1_OBJECT *method; + GENERAL_NAME *location; +} ACCESS_DESCRIPTION; + +typedef STACK_OF(ACCESS_DESCRIPTION) AUTHORITY_INFO_ACCESS; + +typedef STACK_OF(ASN1_OBJECT) EXTENDED_KEY_USAGE; + +typedef STACK_OF(ASN1_INTEGER) TLS_FEATURE; + +DEFINE_STACK_OF(GENERAL_NAME) +typedef STACK_OF(GENERAL_NAME) GENERAL_NAMES; +DEFINE_STACK_OF(GENERAL_NAMES) + +DEFINE_STACK_OF(ACCESS_DESCRIPTION) + +typedef struct DIST_POINT_NAME_st { + int type; + union { + GENERAL_NAMES *fullname; + STACK_OF(X509_NAME_ENTRY) *relativename; + } name; +/* If relativename then this contains the full distribution point name */ + X509_NAME *dpname; +} DIST_POINT_NAME; +/* All existing reasons */ +# define CRLDP_ALL_REASONS 0x807f + +# define CRL_REASON_NONE -1 +# define CRL_REASON_UNSPECIFIED 0 +# define CRL_REASON_KEY_COMPROMISE 1 +# define CRL_REASON_CA_COMPROMISE 2 +# define CRL_REASON_AFFILIATION_CHANGED 3 +# define CRL_REASON_SUPERSEDED 4 +# define CRL_REASON_CESSATION_OF_OPERATION 5 +# define CRL_REASON_CERTIFICATE_HOLD 6 +# define CRL_REASON_REMOVE_FROM_CRL 8 +# define CRL_REASON_PRIVILEGE_WITHDRAWN 9 +# define CRL_REASON_AA_COMPROMISE 10 + +struct DIST_POINT_st { + DIST_POINT_NAME *distpoint; + ASN1_BIT_STRING *reasons; + GENERAL_NAMES *CRLissuer; + int dp_reasons; +}; + +typedef STACK_OF(DIST_POINT) CRL_DIST_POINTS; + +DEFINE_STACK_OF(DIST_POINT) + +struct AUTHORITY_KEYID_st { + ASN1_OCTET_STRING *keyid; + GENERAL_NAMES *issuer; + ASN1_INTEGER *serial; +}; + +/* Strong extranet structures */ + +typedef struct SXNET_ID_st { + ASN1_INTEGER *zone; + ASN1_OCTET_STRING *user; +} SXNETID; + +DEFINE_STACK_OF(SXNETID) + +typedef struct SXNET_st { + ASN1_INTEGER *version; + STACK_OF(SXNETID) *ids; +} SXNET; + +typedef struct NOTICEREF_st { + ASN1_STRING *organization; + STACK_OF(ASN1_INTEGER) *noticenos; +} NOTICEREF; + +typedef struct USERNOTICE_st { + NOTICEREF *noticeref; + ASN1_STRING *exptext; +} USERNOTICE; + +typedef struct POLICYQUALINFO_st { + ASN1_OBJECT *pqualid; + union { + ASN1_IA5STRING *cpsuri; + USERNOTICE *usernotice; + ASN1_TYPE *other; + } d; +} POLICYQUALINFO; + +DEFINE_STACK_OF(POLICYQUALINFO) + +typedef struct POLICYINFO_st { + ASN1_OBJECT *policyid; + STACK_OF(POLICYQUALINFO) *qualifiers; +} POLICYINFO; + +typedef STACK_OF(POLICYINFO) CERTIFICATEPOLICIES; + +DEFINE_STACK_OF(POLICYINFO) + +typedef struct POLICY_MAPPING_st { + ASN1_OBJECT *issuerDomainPolicy; + ASN1_OBJECT *subjectDomainPolicy; +} POLICY_MAPPING; + +DEFINE_STACK_OF(POLICY_MAPPING) + +typedef STACK_OF(POLICY_MAPPING) POLICY_MAPPINGS; + +typedef struct GENERAL_SUBTREE_st { + GENERAL_NAME *base; + ASN1_INTEGER *minimum; + ASN1_INTEGER *maximum; +} GENERAL_SUBTREE; + +DEFINE_STACK_OF(GENERAL_SUBTREE) + +struct NAME_CONSTRAINTS_st { + STACK_OF(GENERAL_SUBTREE) *permittedSubtrees; + STACK_OF(GENERAL_SUBTREE) *excludedSubtrees; +}; + +typedef struct POLICY_CONSTRAINTS_st { + ASN1_INTEGER *requireExplicitPolicy; + ASN1_INTEGER *inhibitPolicyMapping; +} POLICY_CONSTRAINTS; + +/* Proxy certificate structures, see RFC 3820 */ +typedef struct PROXY_POLICY_st { + ASN1_OBJECT *policyLanguage; + ASN1_OCTET_STRING *policy; +} PROXY_POLICY; + +typedef struct PROXY_CERT_INFO_EXTENSION_st { + ASN1_INTEGER *pcPathLengthConstraint; + PROXY_POLICY *proxyPolicy; +} PROXY_CERT_INFO_EXTENSION; + +DECLARE_ASN1_FUNCTIONS(PROXY_POLICY) +DECLARE_ASN1_FUNCTIONS(PROXY_CERT_INFO_EXTENSION) + +struct ISSUING_DIST_POINT_st { + DIST_POINT_NAME *distpoint; + int onlyuser; + int onlyCA; + ASN1_BIT_STRING *onlysomereasons; + int indirectCRL; + int onlyattr; +}; + +/* Values in idp_flags field */ +/* IDP present */ +# define IDP_PRESENT 0x1 +/* IDP values inconsistent */ +# define IDP_INVALID 0x2 +/* onlyuser true */ +# define IDP_ONLYUSER 0x4 +/* onlyCA true */ +# define IDP_ONLYCA 0x8 +/* onlyattr true */ +# define IDP_ONLYATTR 0x10 +/* indirectCRL true */ +# define IDP_INDIRECT 0x20 +/* onlysomereasons present */ +# define IDP_REASONS 0x40 + +# define X509V3_conf_err(val) ERR_add_error_data(6, \ + "section:", (val)->section, \ + ",name:", (val)->name, ",value:", (val)->value) + +# define X509V3_set_ctx_test(ctx) \ + X509V3_set_ctx(ctx, NULL, NULL, NULL, NULL, CTX_TEST) +# define X509V3_set_ctx_nodb(ctx) (ctx)->db = NULL; + +# define EXT_BITSTRING(nid, table) { nid, 0, ASN1_ITEM_ref(ASN1_BIT_STRING), \ + 0,0,0,0, \ + 0,0, \ + (X509V3_EXT_I2V)i2v_ASN1_BIT_STRING, \ + (X509V3_EXT_V2I)v2i_ASN1_BIT_STRING, \ + NULL, NULL, \ + table} + +# define EXT_IA5STRING(nid) { nid, 0, ASN1_ITEM_ref(ASN1_IA5STRING), \ + 0,0,0,0, \ + (X509V3_EXT_I2S)i2s_ASN1_IA5STRING, \ + (X509V3_EXT_S2I)s2i_ASN1_IA5STRING, \ + 0,0,0,0, \ + NULL} + +# define EXT_END { -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +/* X509_PURPOSE stuff */ + +# define EXFLAG_BCONS 0x1 +# define EXFLAG_KUSAGE 0x2 +# define EXFLAG_XKUSAGE 0x4 +# define EXFLAG_NSCERT 0x8 + +# define EXFLAG_CA 0x10 +/* Really self issued not necessarily self signed */ +# define EXFLAG_SI 0x20 +# define EXFLAG_V1 0x40 +# define EXFLAG_INVALID 0x80 +/* EXFLAG_SET is set to indicate that some values have been precomputed */ +# define EXFLAG_SET 0x100 +# define EXFLAG_CRITICAL 0x200 +# define EXFLAG_PROXY 0x400 + +# define EXFLAG_INVALID_POLICY 0x800 +# define EXFLAG_FRESHEST 0x1000 +# define EXFLAG_SS 0x2000 /* cert is apparently self-signed */ + +# define EXFLAG_NO_FINGERPRINT 0x100000 + +# define KU_DIGITAL_SIGNATURE 0x0080 +# define KU_NON_REPUDIATION 0x0040 +# define KU_KEY_ENCIPHERMENT 0x0020 +# define KU_DATA_ENCIPHERMENT 0x0010 +# define KU_KEY_AGREEMENT 0x0008 +# define KU_KEY_CERT_SIGN 0x0004 +# define KU_CRL_SIGN 0x0002 +# define KU_ENCIPHER_ONLY 0x0001 +# define KU_DECIPHER_ONLY 0x8000 + +# define NS_SSL_CLIENT 0x80 +# define NS_SSL_SERVER 0x40 +# define NS_SMIME 0x20 +# define NS_OBJSIGN 0x10 +# define NS_SSL_CA 0x04 +# define NS_SMIME_CA 0x02 +# define NS_OBJSIGN_CA 0x01 +# define NS_ANY_CA (NS_SSL_CA|NS_SMIME_CA|NS_OBJSIGN_CA) + +# define XKU_SSL_SERVER 0x1 +# define XKU_SSL_CLIENT 0x2 +# define XKU_SMIME 0x4 +# define XKU_CODE_SIGN 0x8 +# define XKU_SGC 0x10 +# define XKU_OCSP_SIGN 0x20 +# define XKU_TIMESTAMP 0x40 +# define XKU_DVCS 0x80 +# define XKU_ANYEKU 0x100 + +# define X509_PURPOSE_DYNAMIC 0x1 +# define X509_PURPOSE_DYNAMIC_NAME 0x2 + +typedef struct x509_purpose_st { + int purpose; + int trust; /* Default trust ID */ + int flags; + int (*check_purpose) (const struct x509_purpose_st *, const X509 *, int); + char *name; + char *sname; + void *usr_data; +} X509_PURPOSE; + +# define X509_PURPOSE_SSL_CLIENT 1 +# define X509_PURPOSE_SSL_SERVER 2 +# define X509_PURPOSE_NS_SSL_SERVER 3 +# define X509_PURPOSE_SMIME_SIGN 4 +# define X509_PURPOSE_SMIME_ENCRYPT 5 +# define X509_PURPOSE_CRL_SIGN 6 +# define X509_PURPOSE_ANY 7 +# define X509_PURPOSE_OCSP_HELPER 8 +# define X509_PURPOSE_TIMESTAMP_SIGN 9 + +# define X509_PURPOSE_MIN 1 +# define X509_PURPOSE_MAX 9 + +/* Flags for X509V3_EXT_print() */ + +# define X509V3_EXT_UNKNOWN_MASK (0xfL << 16) +/* Return error for unknown extensions */ +# define X509V3_EXT_DEFAULT 0 +/* Print error for unknown extensions */ +# define X509V3_EXT_ERROR_UNKNOWN (1L << 16) +/* ASN1 parse unknown extensions */ +# define X509V3_EXT_PARSE_UNKNOWN (2L << 16) +/* BIO_dump unknown extensions */ +# define X509V3_EXT_DUMP_UNKNOWN (3L << 16) + +/* Flags for X509V3_add1_i2d */ + +# define X509V3_ADD_OP_MASK 0xfL +# define X509V3_ADD_DEFAULT 0L +# define X509V3_ADD_APPEND 1L +# define X509V3_ADD_REPLACE 2L +# define X509V3_ADD_REPLACE_EXISTING 3L +# define X509V3_ADD_KEEP_EXISTING 4L +# define X509V3_ADD_DELETE 5L +# define X509V3_ADD_SILENT 0x10 + +DEFINE_STACK_OF(X509_PURPOSE) + +DECLARE_ASN1_FUNCTIONS(BASIC_CONSTRAINTS) + +DECLARE_ASN1_FUNCTIONS(SXNET) +DECLARE_ASN1_FUNCTIONS(SXNETID) + +int SXNET_add_id_asc(SXNET **psx, const char *zone, const char *user, int userlen); +int SXNET_add_id_ulong(SXNET **psx, unsigned long lzone, const char *user, + int userlen); +int SXNET_add_id_INTEGER(SXNET **psx, ASN1_INTEGER *izone, const char *user, + int userlen); + +ASN1_OCTET_STRING *SXNET_get_id_asc(SXNET *sx, const char *zone); +ASN1_OCTET_STRING *SXNET_get_id_ulong(SXNET *sx, unsigned long lzone); +ASN1_OCTET_STRING *SXNET_get_id_INTEGER(SXNET *sx, ASN1_INTEGER *zone); + +DECLARE_ASN1_FUNCTIONS(AUTHORITY_KEYID) + +DECLARE_ASN1_FUNCTIONS(PKEY_USAGE_PERIOD) + +DECLARE_ASN1_FUNCTIONS(GENERAL_NAME) +GENERAL_NAME *GENERAL_NAME_dup(GENERAL_NAME *a); +int GENERAL_NAME_cmp(GENERAL_NAME *a, GENERAL_NAME *b); + +ASN1_BIT_STRING *v2i_ASN1_BIT_STRING(X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, + STACK_OF(CONF_VALUE) *nval); +STACK_OF(CONF_VALUE) *i2v_ASN1_BIT_STRING(X509V3_EXT_METHOD *method, + ASN1_BIT_STRING *bits, + STACK_OF(CONF_VALUE) *extlist); +char *i2s_ASN1_IA5STRING(X509V3_EXT_METHOD *method, ASN1_IA5STRING *ia5); +ASN1_IA5STRING *s2i_ASN1_IA5STRING(X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, const char *str); + +STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME(X509V3_EXT_METHOD *method, + GENERAL_NAME *gen, + STACK_OF(CONF_VALUE) *ret); +int GENERAL_NAME_print(BIO *out, GENERAL_NAME *gen); + +DECLARE_ASN1_FUNCTIONS(GENERAL_NAMES) + +STACK_OF(CONF_VALUE) *i2v_GENERAL_NAMES(X509V3_EXT_METHOD *method, + GENERAL_NAMES *gen, + STACK_OF(CONF_VALUE) *extlist); +GENERAL_NAMES *v2i_GENERAL_NAMES(const X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, STACK_OF(CONF_VALUE) *nval); + +DECLARE_ASN1_FUNCTIONS(OTHERNAME) +DECLARE_ASN1_FUNCTIONS(EDIPARTYNAME) +int OTHERNAME_cmp(OTHERNAME *a, OTHERNAME *b); +void GENERAL_NAME_set0_value(GENERAL_NAME *a, int type, void *value); +void *GENERAL_NAME_get0_value(const GENERAL_NAME *a, int *ptype); +int GENERAL_NAME_set0_othername(GENERAL_NAME *gen, + ASN1_OBJECT *oid, ASN1_TYPE *value); +int GENERAL_NAME_get0_otherName(const GENERAL_NAME *gen, + ASN1_OBJECT **poid, ASN1_TYPE **pvalue); + +char *i2s_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, + const ASN1_OCTET_STRING *ia5); +ASN1_OCTET_STRING *s2i_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, const char *str); + +DECLARE_ASN1_FUNCTIONS(EXTENDED_KEY_USAGE) +int i2a_ACCESS_DESCRIPTION(BIO *bp, const ACCESS_DESCRIPTION *a); + +DECLARE_ASN1_ALLOC_FUNCTIONS(TLS_FEATURE) + +DECLARE_ASN1_FUNCTIONS(CERTIFICATEPOLICIES) +DECLARE_ASN1_FUNCTIONS(POLICYINFO) +DECLARE_ASN1_FUNCTIONS(POLICYQUALINFO) +DECLARE_ASN1_FUNCTIONS(USERNOTICE) +DECLARE_ASN1_FUNCTIONS(NOTICEREF) + +DECLARE_ASN1_FUNCTIONS(CRL_DIST_POINTS) +DECLARE_ASN1_FUNCTIONS(DIST_POINT) +DECLARE_ASN1_FUNCTIONS(DIST_POINT_NAME) +DECLARE_ASN1_FUNCTIONS(ISSUING_DIST_POINT) + +int DIST_POINT_set_dpname(DIST_POINT_NAME *dpn, X509_NAME *iname); + +int NAME_CONSTRAINTS_check(X509 *x, NAME_CONSTRAINTS *nc); +int NAME_CONSTRAINTS_check_CN(X509 *x, NAME_CONSTRAINTS *nc); + +DECLARE_ASN1_FUNCTIONS(ACCESS_DESCRIPTION) +DECLARE_ASN1_FUNCTIONS(AUTHORITY_INFO_ACCESS) + +DECLARE_ASN1_ITEM(POLICY_MAPPING) +DECLARE_ASN1_ALLOC_FUNCTIONS(POLICY_MAPPING) +DECLARE_ASN1_ITEM(POLICY_MAPPINGS) + +DECLARE_ASN1_ITEM(GENERAL_SUBTREE) +DECLARE_ASN1_ALLOC_FUNCTIONS(GENERAL_SUBTREE) + +DECLARE_ASN1_ITEM(NAME_CONSTRAINTS) +DECLARE_ASN1_ALLOC_FUNCTIONS(NAME_CONSTRAINTS) + +DECLARE_ASN1_ALLOC_FUNCTIONS(POLICY_CONSTRAINTS) +DECLARE_ASN1_ITEM(POLICY_CONSTRAINTS) + +GENERAL_NAME *a2i_GENERAL_NAME(GENERAL_NAME *out, + const X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, int gen_type, + const char *value, int is_nc); + +# ifdef HEADER_CONF_H +GENERAL_NAME *v2i_GENERAL_NAME(const X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, CONF_VALUE *cnf); +GENERAL_NAME *v2i_GENERAL_NAME_ex(GENERAL_NAME *out, + const X509V3_EXT_METHOD *method, + X509V3_CTX *ctx, CONF_VALUE *cnf, + int is_nc); +void X509V3_conf_free(CONF_VALUE *val); + +X509_EXTENSION *X509V3_EXT_nconf_nid(CONF *conf, X509V3_CTX *ctx, int ext_nid, + const char *value); +X509_EXTENSION *X509V3_EXT_nconf(CONF *conf, X509V3_CTX *ctx, const char *name, + const char *value); +int X509V3_EXT_add_nconf_sk(CONF *conf, X509V3_CTX *ctx, const char *section, + STACK_OF(X509_EXTENSION) **sk); +int X509V3_EXT_add_nconf(CONF *conf, X509V3_CTX *ctx, const char *section, + X509 *cert); +int X509V3_EXT_REQ_add_nconf(CONF *conf, X509V3_CTX *ctx, const char *section, + X509_REQ *req); +int X509V3_EXT_CRL_add_nconf(CONF *conf, X509V3_CTX *ctx, const char *section, + X509_CRL *crl); + +X509_EXTENSION *X509V3_EXT_conf_nid(LHASH_OF(CONF_VALUE) *conf, + X509V3_CTX *ctx, int ext_nid, + const char *value); +X509_EXTENSION *X509V3_EXT_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx, + const char *name, const char *value); +int X509V3_EXT_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx, + const char *section, X509 *cert); +int X509V3_EXT_REQ_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx, + const char *section, X509_REQ *req); +int X509V3_EXT_CRL_add_conf(LHASH_OF(CONF_VALUE) *conf, X509V3_CTX *ctx, + const char *section, X509_CRL *crl); + +int X509V3_add_value_bool_nf(const char *name, int asn1_bool, + STACK_OF(CONF_VALUE) **extlist); +int X509V3_get_value_bool(const CONF_VALUE *value, int *asn1_bool); +int X509V3_get_value_int(const CONF_VALUE *value, ASN1_INTEGER **aint); +void X509V3_set_nconf(X509V3_CTX *ctx, CONF *conf); +void X509V3_set_conf_lhash(X509V3_CTX *ctx, LHASH_OF(CONF_VALUE) *lhash); +# endif + +char *X509V3_get_string(X509V3_CTX *ctx, const char *name, const char *section); +STACK_OF(CONF_VALUE) *X509V3_get_section(X509V3_CTX *ctx, const char *section); +void X509V3_string_free(X509V3_CTX *ctx, char *str); +void X509V3_section_free(X509V3_CTX *ctx, STACK_OF(CONF_VALUE) *section); +void X509V3_set_ctx(X509V3_CTX *ctx, X509 *issuer, X509 *subject, + X509_REQ *req, X509_CRL *crl, int flags); + +int X509V3_add_value(const char *name, const char *value, + STACK_OF(CONF_VALUE) **extlist); +int X509V3_add_value_uchar(const char *name, const unsigned char *value, + STACK_OF(CONF_VALUE) **extlist); +int X509V3_add_value_bool(const char *name, int asn1_bool, + STACK_OF(CONF_VALUE) **extlist); +int X509V3_add_value_int(const char *name, const ASN1_INTEGER *aint, + STACK_OF(CONF_VALUE) **extlist); +char *i2s_ASN1_INTEGER(X509V3_EXT_METHOD *meth, const ASN1_INTEGER *aint); +ASN1_INTEGER *s2i_ASN1_INTEGER(X509V3_EXT_METHOD *meth, const char *value); +char *i2s_ASN1_ENUMERATED(X509V3_EXT_METHOD *meth, const ASN1_ENUMERATED *aint); +char *i2s_ASN1_ENUMERATED_TABLE(X509V3_EXT_METHOD *meth, + const ASN1_ENUMERATED *aint); +int X509V3_EXT_add(X509V3_EXT_METHOD *ext); +int X509V3_EXT_add_list(X509V3_EXT_METHOD *extlist); +int X509V3_EXT_add_alias(int nid_to, int nid_from); +void X509V3_EXT_cleanup(void); + +const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *ext); +const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int nid); +int X509V3_add_standard_extensions(void); +STACK_OF(CONF_VALUE) *X509V3_parse_list(const char *line); +void *X509V3_EXT_d2i(X509_EXTENSION *ext); +void *X509V3_get_d2i(const STACK_OF(X509_EXTENSION) *x, int nid, int *crit, + int *idx); + +X509_EXTENSION *X509V3_EXT_i2d(int ext_nid, int crit, void *ext_struc); +int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, + int crit, unsigned long flags); + +#if OPENSSL_API_COMPAT < 0x10100000L +/* The new declarations are in crypto.h, but the old ones were here. */ +# define hex_to_string OPENSSL_buf2hexstr +# define string_to_hex OPENSSL_hexstr2buf +#endif + +void X509V3_EXT_val_prn(BIO *out, STACK_OF(CONF_VALUE) *val, int indent, + int ml); +int X509V3_EXT_print(BIO *out, X509_EXTENSION *ext, unsigned long flag, + int indent); +#ifndef OPENSSL_NO_STDIO +int X509V3_EXT_print_fp(FILE *out, X509_EXTENSION *ext, int flag, int indent); +#endif +int X509V3_extensions_print(BIO *out, const char *title, + const STACK_OF(X509_EXTENSION) *exts, + unsigned long flag, int indent); + +int X509_check_ca(X509 *x); +int X509_check_purpose(X509 *x, int id, int ca); +int X509_supported_extension(X509_EXTENSION *ex); +int X509_PURPOSE_set(int *p, int purpose); +int X509_check_issued(X509 *issuer, X509 *subject); +int X509_check_akid(X509 *issuer, AUTHORITY_KEYID *akid); +void X509_set_proxy_flag(X509 *x); +void X509_set_proxy_pathlen(X509 *x, long l); +long X509_get_proxy_pathlen(X509 *x); + +uint32_t X509_get_extension_flags(X509 *x); +uint32_t X509_get_key_usage(X509 *x); +uint32_t X509_get_extended_key_usage(X509 *x); +const ASN1_OCTET_STRING *X509_get0_subject_key_id(X509 *x); +const ASN1_OCTET_STRING *X509_get0_authority_key_id(X509 *x); +const GENERAL_NAMES *X509_get0_authority_issuer(X509 *x); +const ASN1_INTEGER *X509_get0_authority_serial(X509 *x); + +int X509_PURPOSE_get_count(void); +X509_PURPOSE *X509_PURPOSE_get0(int idx); +int X509_PURPOSE_get_by_sname(const char *sname); +int X509_PURPOSE_get_by_id(int id); +int X509_PURPOSE_add(int id, int trust, int flags, + int (*ck) (const X509_PURPOSE *, const X509 *, int), + const char *name, const char *sname, void *arg); +char *X509_PURPOSE_get0_name(const X509_PURPOSE *xp); +char *X509_PURPOSE_get0_sname(const X509_PURPOSE *xp); +int X509_PURPOSE_get_trust(const X509_PURPOSE *xp); +void X509_PURPOSE_cleanup(void); +int X509_PURPOSE_get_id(const X509_PURPOSE *); + +STACK_OF(OPENSSL_STRING) *X509_get1_email(X509 *x); +STACK_OF(OPENSSL_STRING) *X509_REQ_get1_email(X509_REQ *x); +void X509_email_free(STACK_OF(OPENSSL_STRING) *sk); +STACK_OF(OPENSSL_STRING) *X509_get1_ocsp(X509 *x); +/* Flags for X509_check_* functions */ + +/* + * Always check subject name for host match even if subject alt names present + */ +# define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1 +/* Disable wildcard matching for dnsName fields and common name. */ +# define X509_CHECK_FLAG_NO_WILDCARDS 0x2 +/* Wildcards must not match a partial label. */ +# define X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS 0x4 +/* Allow (non-partial) wildcards to match multiple labels. */ +# define X509_CHECK_FLAG_MULTI_LABEL_WILDCARDS 0x8 +/* Constraint verifier subdomain patterns to match a single labels. */ +# define X509_CHECK_FLAG_SINGLE_LABEL_SUBDOMAINS 0x10 +/* Never check the subject CN */ +# define X509_CHECK_FLAG_NEVER_CHECK_SUBJECT 0x20 +/* + * Match reference identifiers starting with "." to any sub-domain. + * This is a non-public flag, turned on implicitly when the subject + * reference identity is a DNS name. + */ +# define _X509_CHECK_FLAG_DOT_SUBDOMAINS 0x8000 + +int X509_check_host(X509 *x, const char *chk, size_t chklen, + unsigned int flags, char **peername); +int X509_check_email(X509 *x, const char *chk, size_t chklen, + unsigned int flags); +int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags); +int X509_check_ip_asc(X509 *x, const char *ipasc, unsigned int flags); + +ASN1_OCTET_STRING *a2i_IPADDRESS(const char *ipasc); +ASN1_OCTET_STRING *a2i_IPADDRESS_NC(const char *ipasc); +int X509V3_NAME_from_section(X509_NAME *nm, STACK_OF(CONF_VALUE) *dn_sk, + unsigned long chtype); + +void X509_POLICY_NODE_print(BIO *out, X509_POLICY_NODE *node, int indent); +DEFINE_STACK_OF(X509_POLICY_NODE) + +#ifndef OPENSSL_NO_RFC3779 +typedef struct ASRange_st { + ASN1_INTEGER *min, *max; +} ASRange; + +# define ASIdOrRange_id 0 +# define ASIdOrRange_range 1 + +typedef struct ASIdOrRange_st { + int type; + union { + ASN1_INTEGER *id; + ASRange *range; + } u; +} ASIdOrRange; + +typedef STACK_OF(ASIdOrRange) ASIdOrRanges; +DEFINE_STACK_OF(ASIdOrRange) + +# define ASIdentifierChoice_inherit 0 +# define ASIdentifierChoice_asIdsOrRanges 1 + +typedef struct ASIdentifierChoice_st { + int type; + union { + ASN1_NULL *inherit; + ASIdOrRanges *asIdsOrRanges; + } u; +} ASIdentifierChoice; + +typedef struct ASIdentifiers_st { + ASIdentifierChoice *asnum, *rdi; +} ASIdentifiers; + +DECLARE_ASN1_FUNCTIONS(ASRange) +DECLARE_ASN1_FUNCTIONS(ASIdOrRange) +DECLARE_ASN1_FUNCTIONS(ASIdentifierChoice) +DECLARE_ASN1_FUNCTIONS(ASIdentifiers) + +typedef struct IPAddressRange_st { + ASN1_BIT_STRING *min, *max; +} IPAddressRange; + +# define IPAddressOrRange_addressPrefix 0 +# define IPAddressOrRange_addressRange 1 + +typedef struct IPAddressOrRange_st { + int type; + union { + ASN1_BIT_STRING *addressPrefix; + IPAddressRange *addressRange; + } u; +} IPAddressOrRange; + +typedef STACK_OF(IPAddressOrRange) IPAddressOrRanges; +DEFINE_STACK_OF(IPAddressOrRange) + +# define IPAddressChoice_inherit 0 +# define IPAddressChoice_addressesOrRanges 1 + +typedef struct IPAddressChoice_st { + int type; + union { + ASN1_NULL *inherit; + IPAddressOrRanges *addressesOrRanges; + } u; +} IPAddressChoice; + +typedef struct IPAddressFamily_st { + ASN1_OCTET_STRING *addressFamily; + IPAddressChoice *ipAddressChoice; +} IPAddressFamily; + +typedef STACK_OF(IPAddressFamily) IPAddrBlocks; +DEFINE_STACK_OF(IPAddressFamily) + +DECLARE_ASN1_FUNCTIONS(IPAddressRange) +DECLARE_ASN1_FUNCTIONS(IPAddressOrRange) +DECLARE_ASN1_FUNCTIONS(IPAddressChoice) +DECLARE_ASN1_FUNCTIONS(IPAddressFamily) + +/* + * API tag for elements of the ASIdentifer SEQUENCE. + */ +# define V3_ASID_ASNUM 0 +# define V3_ASID_RDI 1 + +/* + * AFI values, assigned by IANA. It'd be nice to make the AFI + * handling code totally generic, but there are too many little things + * that would need to be defined for other address families for it to + * be worth the trouble. + */ +# define IANA_AFI_IPV4 1 +# define IANA_AFI_IPV6 2 + +/* + * Utilities to construct and extract values from RFC3779 extensions, + * since some of the encodings (particularly for IP address prefixes + * and ranges) are a bit tedious to work with directly. + */ +int X509v3_asid_add_inherit(ASIdentifiers *asid, int which); +int X509v3_asid_add_id_or_range(ASIdentifiers *asid, int which, + ASN1_INTEGER *min, ASN1_INTEGER *max); +int X509v3_addr_add_inherit(IPAddrBlocks *addr, + const unsigned afi, const unsigned *safi); +int X509v3_addr_add_prefix(IPAddrBlocks *addr, + const unsigned afi, const unsigned *safi, + unsigned char *a, const int prefixlen); +int X509v3_addr_add_range(IPAddrBlocks *addr, + const unsigned afi, const unsigned *safi, + unsigned char *min, unsigned char *max); +unsigned X509v3_addr_get_afi(const IPAddressFamily *f); +int X509v3_addr_get_range(IPAddressOrRange *aor, const unsigned afi, + unsigned char *min, unsigned char *max, + const int length); + +/* + * Canonical forms. + */ +int X509v3_asid_is_canonical(ASIdentifiers *asid); +int X509v3_addr_is_canonical(IPAddrBlocks *addr); +int X509v3_asid_canonize(ASIdentifiers *asid); +int X509v3_addr_canonize(IPAddrBlocks *addr); + +/* + * Tests for inheritance and containment. + */ +int X509v3_asid_inherits(ASIdentifiers *asid); +int X509v3_addr_inherits(IPAddrBlocks *addr); +int X509v3_asid_subset(ASIdentifiers *a, ASIdentifiers *b); +int X509v3_addr_subset(IPAddrBlocks *a, IPAddrBlocks *b); + +/* + * Check whether RFC 3779 extensions nest properly in chains. + */ +int X509v3_asid_validate_path(X509_STORE_CTX *); +int X509v3_addr_validate_path(X509_STORE_CTX *); +int X509v3_asid_validate_resource_set(STACK_OF(X509) *chain, + ASIdentifiers *ext, + int allow_inheritance); +int X509v3_addr_validate_resource_set(STACK_OF(X509) *chain, + IPAddrBlocks *ext, int allow_inheritance); + +#endif /* OPENSSL_NO_RFC3779 */ + +DEFINE_STACK_OF(ASN1_STRING) + +/* + * Admission Syntax + */ +typedef struct NamingAuthority_st NAMING_AUTHORITY; +typedef struct ProfessionInfo_st PROFESSION_INFO; +typedef struct Admissions_st ADMISSIONS; +typedef struct AdmissionSyntax_st ADMISSION_SYNTAX; +DECLARE_ASN1_FUNCTIONS(NAMING_AUTHORITY) +DECLARE_ASN1_FUNCTIONS(PROFESSION_INFO) +DECLARE_ASN1_FUNCTIONS(ADMISSIONS) +DECLARE_ASN1_FUNCTIONS(ADMISSION_SYNTAX) +DEFINE_STACK_OF(ADMISSIONS) +DEFINE_STACK_OF(PROFESSION_INFO) +typedef STACK_OF(PROFESSION_INFO) PROFESSION_INFOS; + +const ASN1_OBJECT *NAMING_AUTHORITY_get0_authorityId( + const NAMING_AUTHORITY *n); +const ASN1_IA5STRING *NAMING_AUTHORITY_get0_authorityURL( + const NAMING_AUTHORITY *n); +const ASN1_STRING *NAMING_AUTHORITY_get0_authorityText( + const NAMING_AUTHORITY *n); +void NAMING_AUTHORITY_set0_authorityId(NAMING_AUTHORITY *n, + ASN1_OBJECT* namingAuthorityId); +void NAMING_AUTHORITY_set0_authorityURL(NAMING_AUTHORITY *n, + ASN1_IA5STRING* namingAuthorityUrl); +void NAMING_AUTHORITY_set0_authorityText(NAMING_AUTHORITY *n, + ASN1_STRING* namingAuthorityText); + +const GENERAL_NAME *ADMISSION_SYNTAX_get0_admissionAuthority( + const ADMISSION_SYNTAX *as); +void ADMISSION_SYNTAX_set0_admissionAuthority( + ADMISSION_SYNTAX *as, GENERAL_NAME *aa); +const STACK_OF(ADMISSIONS) *ADMISSION_SYNTAX_get0_contentsOfAdmissions( + const ADMISSION_SYNTAX *as); +void ADMISSION_SYNTAX_set0_contentsOfAdmissions( + ADMISSION_SYNTAX *as, STACK_OF(ADMISSIONS) *a); +const GENERAL_NAME *ADMISSIONS_get0_admissionAuthority(const ADMISSIONS *a); +void ADMISSIONS_set0_admissionAuthority(ADMISSIONS *a, GENERAL_NAME *aa); +const NAMING_AUTHORITY *ADMISSIONS_get0_namingAuthority(const ADMISSIONS *a); +void ADMISSIONS_set0_namingAuthority(ADMISSIONS *a, NAMING_AUTHORITY *na); +const PROFESSION_INFOS *ADMISSIONS_get0_professionInfos(const ADMISSIONS *a); +void ADMISSIONS_set0_professionInfos(ADMISSIONS *a, PROFESSION_INFOS *pi); +const ASN1_OCTET_STRING *PROFESSION_INFO_get0_addProfessionInfo( + const PROFESSION_INFO *pi); +void PROFESSION_INFO_set0_addProfessionInfo( + PROFESSION_INFO *pi, ASN1_OCTET_STRING *aos); +const NAMING_AUTHORITY *PROFESSION_INFO_get0_namingAuthority( + const PROFESSION_INFO *pi); +void PROFESSION_INFO_set0_namingAuthority( + PROFESSION_INFO *pi, NAMING_AUTHORITY *na); +const STACK_OF(ASN1_STRING) *PROFESSION_INFO_get0_professionItems( + const PROFESSION_INFO *pi); +void PROFESSION_INFO_set0_professionItems( + PROFESSION_INFO *pi, STACK_OF(ASN1_STRING) *as); +const STACK_OF(ASN1_OBJECT) *PROFESSION_INFO_get0_professionOIDs( + const PROFESSION_INFO *pi); +void PROFESSION_INFO_set0_professionOIDs( + PROFESSION_INFO *pi, STACK_OF(ASN1_OBJECT) *po); +const ASN1_PRINTABLESTRING *PROFESSION_INFO_get0_registrationNumber( + const PROFESSION_INFO *pi); +void PROFESSION_INFO_set0_registrationNumber( + PROFESSION_INFO *pi, ASN1_PRINTABLESTRING *rn); + +# ifdef __cplusplus +} +# endif +#endif diff --git a/third_party/openssl/uos/sw_64/include/openssl/x509v3err.h b/third_party/openssl/uos/sw_64/include/openssl/x509v3err.h new file mode 100644 index 00000000..3b9f7139 --- /dev/null +++ b/third_party/openssl/uos/sw_64/include/openssl/x509v3err.h @@ -0,0 +1,164 @@ +/* + * Generated by util/mkerr.pl DO NOT EDIT + * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#ifndef HEADER_X509V3ERR_H +# define HEADER_X509V3ERR_H + +# ifndef HEADER_SYMHACKS_H +# include +# endif + +# ifdef __cplusplus +extern "C" +# endif +int ERR_load_X509V3_strings(void); + +/* + * X509V3 function codes. + */ +# define X509V3_F_A2I_GENERAL_NAME 164 +# define X509V3_F_ADDR_VALIDATE_PATH_INTERNAL 166 +# define X509V3_F_ASIDENTIFIERCHOICE_CANONIZE 161 +# define X509V3_F_ASIDENTIFIERCHOICE_IS_CANONICAL 162 +# define X509V3_F_BIGNUM_TO_STRING 167 +# define X509V3_F_COPY_EMAIL 122 +# define X509V3_F_COPY_ISSUER 123 +# define X509V3_F_DO_DIRNAME 144 +# define X509V3_F_DO_EXT_I2D 135 +# define X509V3_F_DO_EXT_NCONF 151 +# define X509V3_F_GNAMES_FROM_SECTNAME 156 +# define X509V3_F_I2S_ASN1_ENUMERATED 121 +# define X509V3_F_I2S_ASN1_IA5STRING 149 +# define X509V3_F_I2S_ASN1_INTEGER 120 +# define X509V3_F_I2V_AUTHORITY_INFO_ACCESS 138 +# define X509V3_F_I2V_AUTHORITY_KEYID 173 +# define X509V3_F_LEVEL_ADD_NODE 168 +# define X509V3_F_NOTICE_SECTION 132 +# define X509V3_F_NREF_NOS 133 +# define X509V3_F_POLICY_CACHE_CREATE 169 +# define X509V3_F_POLICY_CACHE_NEW 170 +# define X509V3_F_POLICY_DATA_NEW 171 +# define X509V3_F_POLICY_SECTION 131 +# define X509V3_F_PROCESS_PCI_VALUE 150 +# define X509V3_F_R2I_CERTPOL 130 +# define X509V3_F_R2I_PCI 155 +# define X509V3_F_S2I_ASN1_IA5STRING 100 +# define X509V3_F_S2I_ASN1_INTEGER 108 +# define X509V3_F_S2I_ASN1_OCTET_STRING 112 +# define X509V3_F_S2I_SKEY_ID 115 +# define X509V3_F_SET_DIST_POINT_NAME 158 +# define X509V3_F_SXNET_ADD_ID_ASC 125 +# define X509V3_F_SXNET_ADD_ID_INTEGER 126 +# define X509V3_F_SXNET_ADD_ID_ULONG 127 +# define X509V3_F_SXNET_GET_ID_ASC 128 +# define X509V3_F_SXNET_GET_ID_ULONG 129 +# define X509V3_F_TREE_INIT 172 +# define X509V3_F_V2I_ASIDENTIFIERS 163 +# define X509V3_F_V2I_ASN1_BIT_STRING 101 +# define X509V3_F_V2I_AUTHORITY_INFO_ACCESS 139 +# define X509V3_F_V2I_AUTHORITY_KEYID 119 +# define X509V3_F_V2I_BASIC_CONSTRAINTS 102 +# define X509V3_F_V2I_CRLD 134 +# define X509V3_F_V2I_EXTENDED_KEY_USAGE 103 +# define X509V3_F_V2I_GENERAL_NAMES 118 +# define X509V3_F_V2I_GENERAL_NAME_EX 117 +# define X509V3_F_V2I_IDP 157 +# define X509V3_F_V2I_IPADDRBLOCKS 159 +# define X509V3_F_V2I_ISSUER_ALT 153 +# define X509V3_F_V2I_NAME_CONSTRAINTS 147 +# define X509V3_F_V2I_POLICY_CONSTRAINTS 146 +# define X509V3_F_V2I_POLICY_MAPPINGS 145 +# define X509V3_F_V2I_SUBJECT_ALT 154 +# define X509V3_F_V2I_TLS_FEATURE 165 +# define X509V3_F_V3_GENERIC_EXTENSION 116 +# define X509V3_F_X509V3_ADD1_I2D 140 +# define X509V3_F_X509V3_ADD_LEN_VALUE 174 +# define X509V3_F_X509V3_ADD_VALUE 105 +# define X509V3_F_X509V3_EXT_ADD 104 +# define X509V3_F_X509V3_EXT_ADD_ALIAS 106 +# define X509V3_F_X509V3_EXT_I2D 136 +# define X509V3_F_X509V3_EXT_NCONF 152 +# define X509V3_F_X509V3_GET_SECTION 142 +# define X509V3_F_X509V3_GET_STRING 143 +# define X509V3_F_X509V3_GET_VALUE_BOOL 110 +# define X509V3_F_X509V3_PARSE_LIST 109 +# define X509V3_F_X509_PURPOSE_ADD 137 +# define X509V3_F_X509_PURPOSE_SET 141 + +/* + * X509V3 reason codes. + */ +# define X509V3_R_BAD_IP_ADDRESS 118 +# define X509V3_R_BAD_OBJECT 119 +# define X509V3_R_BN_DEC2BN_ERROR 100 +# define X509V3_R_BN_TO_ASN1_INTEGER_ERROR 101 +# define X509V3_R_DIRNAME_ERROR 149 +# define X509V3_R_DISTPOINT_ALREADY_SET 160 +# define X509V3_R_DUPLICATE_ZONE_ID 133 +# define X509V3_R_ERROR_CONVERTING_ZONE 131 +# define X509V3_R_ERROR_CREATING_EXTENSION 144 +# define X509V3_R_ERROR_IN_EXTENSION 128 +# define X509V3_R_EXPECTED_A_SECTION_NAME 137 +# define X509V3_R_EXTENSION_EXISTS 145 +# define X509V3_R_EXTENSION_NAME_ERROR 115 +# define X509V3_R_EXTENSION_NOT_FOUND 102 +# define X509V3_R_EXTENSION_SETTING_NOT_SUPPORTED 103 +# define X509V3_R_EXTENSION_VALUE_ERROR 116 +# define X509V3_R_ILLEGAL_EMPTY_EXTENSION 151 +# define X509V3_R_INCORRECT_POLICY_SYNTAX_TAG 152 +# define X509V3_R_INVALID_ASNUMBER 162 +# define X509V3_R_INVALID_ASRANGE 163 +# define X509V3_R_INVALID_BOOLEAN_STRING 104 +# define X509V3_R_INVALID_EXTENSION_STRING 105 +# define X509V3_R_INVALID_INHERITANCE 165 +# define X509V3_R_INVALID_IPADDRESS 166 +# define X509V3_R_INVALID_MULTIPLE_RDNS 161 +# define X509V3_R_INVALID_NAME 106 +# define X509V3_R_INVALID_NULL_ARGUMENT 107 +# define X509V3_R_INVALID_NULL_NAME 108 +# define X509V3_R_INVALID_NULL_VALUE 109 +# define X509V3_R_INVALID_NUMBER 140 +# define X509V3_R_INVALID_NUMBERS 141 +# define X509V3_R_INVALID_OBJECT_IDENTIFIER 110 +# define X509V3_R_INVALID_OPTION 138 +# define X509V3_R_INVALID_POLICY_IDENTIFIER 134 +# define X509V3_R_INVALID_PROXY_POLICY_SETTING 153 +# define X509V3_R_INVALID_PURPOSE 146 +# define X509V3_R_INVALID_SAFI 164 +# define X509V3_R_INVALID_SECTION 135 +# define X509V3_R_INVALID_SYNTAX 143 +# define X509V3_R_ISSUER_DECODE_ERROR 126 +# define X509V3_R_MISSING_VALUE 124 +# define X509V3_R_NEED_ORGANIZATION_AND_NUMBERS 142 +# define X509V3_R_NO_CONFIG_DATABASE 136 +# define X509V3_R_NO_ISSUER_CERTIFICATE 121 +# define X509V3_R_NO_ISSUER_DETAILS 127 +# define X509V3_R_NO_POLICY_IDENTIFIER 139 +# define X509V3_R_NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED 154 +# define X509V3_R_NO_PUBLIC_KEY 114 +# define X509V3_R_NO_SUBJECT_DETAILS 125 +# define X509V3_R_OPERATION_NOT_DEFINED 148 +# define X509V3_R_OTHERNAME_ERROR 147 +# define X509V3_R_POLICY_LANGUAGE_ALREADY_DEFINED 155 +# define X509V3_R_POLICY_PATH_LENGTH 156 +# define X509V3_R_POLICY_PATH_LENGTH_ALREADY_DEFINED 157 +# define X509V3_R_POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY 159 +# define X509V3_R_SECTION_NOT_FOUND 150 +# define X509V3_R_UNABLE_TO_GET_ISSUER_DETAILS 122 +# define X509V3_R_UNABLE_TO_GET_ISSUER_KEYID 123 +# define X509V3_R_UNKNOWN_BIT_STRING_ARGUMENT 111 +# define X509V3_R_UNKNOWN_EXTENSION 129 +# define X509V3_R_UNKNOWN_EXTENSION_NAME 130 +# define X509V3_R_UNKNOWN_OPTION 120 +# define X509V3_R_UNSUPPORTED_OPTION 117 +# define X509V3_R_UNSUPPORTED_TYPE 167 +# define X509V3_R_USER_TOO_LONG 132 + +#endif diff --git a/third_party/openssl/uos/sw_64/lib/libcrypto.a b/third_party/openssl/uos/sw_64/lib/libcrypto.a new file mode 100644 index 00000000..8d663cbc Binary files /dev/null and b/third_party/openssl/uos/sw_64/lib/libcrypto.a differ diff --git a/third_party/openssl/uos/sw_64/lib/libssl.a b/third_party/openssl/uos/sw_64/lib/libssl.a new file mode 100644 index 00000000..41c06937 Binary files /dev/null and b/third_party/openssl/uos/sw_64/lib/libssl.a differ diff --git a/third_party/openssl/uos/sw_64/lib/pkgconfig/libcrypto.pc b/third_party/openssl/uos/sw_64/lib/pkgconfig/libcrypto.pc new file mode 100644 index 00000000..d20ae4a5 --- /dev/null +++ b/third_party/openssl/uos/sw_64/lib/pkgconfig/libcrypto.pc @@ -0,0 +1,12 @@ +prefix=/home/SW2023/sane/code_app/third_party/openssl/openssl-1.1.1v/openssl-1.1.1v/release +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include +enginesdir=${libdir}/engines-1.1 + +Name: OpenSSL-libcrypto +Description: OpenSSL cryptography library +Version: 1.1.1v +Libs: -L${libdir} -lcrypto +Libs.private: -ldl -pthread +Cflags: -I${includedir} diff --git a/third_party/openssl/uos/sw_64/lib/pkgconfig/libssl.pc b/third_party/openssl/uos/sw_64/lib/pkgconfig/libssl.pc new file mode 100644 index 00000000..4f3e048c --- /dev/null +++ b/third_party/openssl/uos/sw_64/lib/pkgconfig/libssl.pc @@ -0,0 +1,11 @@ +prefix=/home/SW2023/sane/code_app/third_party/openssl/openssl-1.1.1v/openssl-1.1.1v/release +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: OpenSSL-libssl +Description: Secure Sockets Layer and cryptography libraries +Version: 1.1.1v +Requires.private: libcrypto +Libs: -L${libdir} -lssl +Cflags: -I${includedir} diff --git a/third_party/openssl/uos/sw_64/lib/pkgconfig/openssl.pc b/third_party/openssl/uos/sw_64/lib/pkgconfig/openssl.pc new file mode 100644 index 00000000..07af1bfd --- /dev/null +++ b/third_party/openssl/uos/sw_64/lib/pkgconfig/openssl.pc @@ -0,0 +1,9 @@ +prefix=/home/SW2023/sane/code_app/third_party/openssl/openssl-1.1.1v/openssl-1.1.1v/release +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: OpenSSL +Description: Secure Sockets Layer and cryptography libraries and tools +Version: 1.1.1v +Requires: libssl libcrypto diff --git a/third_party/pdflib/uos/sw_64/lib/libpdf.so b/third_party/pdflib/uos/sw_64/lib/libpdf.so new file mode 100755 index 00000000..657cae4d Binary files /dev/null and b/third_party/pdflib/uos/sw_64/lib/libpdf.so differ