# phoneGap에서는 default는 Build Active Architecture Only = No가되어있는 것 같습니다만 ...
# phoneGap에서는 default는 Build Active Architecture Only = No가되어있는 것 같습니다만 ...
http://cuby5566.pixnet.net/blog/post/44131382-%5B-opencv-%5D-affine-%26-perspective
IplImage* eig_image = cvCreateImage( cvGetSize( iplT0gray ) , IPL_DEPTH_32F , 1 ); IplImage* temp_image = cvCreateImage( cvGetSize( iplT0gray ) , IPL_DEPTH_32F , 1 ); //--------------------------------------------------------- const int MAX_CORNERS = INT_POINT_SIZE; //--------------------------------------------------------- CvPoint2D32f corners[MAX_CORNERS] = {0}, corners2[MAX_CORNERS] = {0}; //--------------------------------------------------------- double quality_level = 0.05, min_distance = 2, k = 0.04; int eig_block_size = 3, use_harris = false, corner_count = MAX_CORNERS; //--------------------------------------------------------- IplImage* pyramid1 = cvCreateImage( cvGetSize( iplT0gray ) , IPL_DEPTH_8U , 1 ); IplImage* pyramid2 = cvCreateImage( cvGetSize( iplT0gray ) , IPL_DEPTH_8U , 1 ); //--------------------------------------------------------- char OF_FoundFeature[MAX_CORNERS]; float OF_FeatureError[MAX_CORNERS]; CvTermCriteria OF_TerminationCriteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ); //--------------------------------------------------------- cvGoodFeaturesToTrack( iplT0gray , eig_image, temp_image , corners , & corner_count , quality_level , min_distance , NULL , eig_block_size , use_harris , k ); cvCalcOpticalFlowPyrLK( iplT0gray , iplT1gray , pyramid1 , pyramid2 , corners , corners2 , corner_count , cvSize(3,3) , 5 , OF_FoundFeature , OF_FeatureError, OF_TerminationCriteria , 0 );
Using the corners, we can get these two frame's transformation matrix, and warping back.
* You need to using SIFT or other methods to get these two frame's corners ( feature points ), when these two frame with big change, like scaling, rotation, etc.
1. ) Affine Transformation :
CvMat* H = cvCreateMat( 2 , 3 , CV_32FC1 ); cvGetAffineTransform( corners2 , corners , H ); cvWarpAffine( iplSrc , iplObj , H );
2. ) Perspective Transformation :
CvMat* H = cvCreateMat( 3 , 3 , CV_32FC1 ); cvGetPerspectiveTransform( corners2 , corners , H ); cvWarpPerspective( iplSrc , iplObj , H , CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS , CV_RGB(255,0,0) );
3. ) Perspective Transformation ( with RANSAC ) :
CvMat* H = cvCreateMat( 3 , 3 , CV_32FC1 ); CvMat c = cvMat( 1 , 100 , CV_32FC2 , corners ); CvMat c2 = cvMat( 1 , 100 , CV_32FC2 , corners2 ); cvFindHomography( & c2 , & c , H , CV_RANSAC , 30 ); cvWarpPerspective( iplSrc , iplObj , H , CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS , CV_RGB(255,0,0) );
'Calibration' 카테고리의 다른 글
Homography to camera pose(extrinsic) (0) | 2013.04.01 |
---|---|
Affine Transform Patch - opencv example (0) | 2013.04.01 |
OpenCV의 Affine Matrix 생성. (0) | 2013.03.28 |
Calibration (0) | 2013.03.25 |
Camera Pose 란? (0) | 2013.03.22 |
Mastering OpenCV with Practical Computer Vision Projects의 3장에 보면 Homography refinement의 예제가 있다. 여러방법이 있겠지만, 책의 예를 보면, 대충 감이 잡히지 않을까?
bool PatternDetector::findPattern(const cv::Mat& image, PatternTrackingInfo& info)
{
// Convert input image to gray
getGray(image, m_grayImg);
// Extract feature points from input gray image
extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors);
// Get matches with current pattern
getMatches(m_queryDescriptors, m_matches);
// Find homography transformation and detect good matches
bool homographyFound = refineMatchesWithHomography(
m_queryKeypoints,
m_pattern.keypoints,
homographyReprojectionThreshold,
m_matches,
m_roughHomography);
if (homographyFound) // 현재 매칭된 Homography을 찾았다.
{
// If homography refinement enabled improve found transformation
if (enableHomographyRefinement)
{
// Warp image using found homography
// m_roughHomography을 이용하여 Warping - Reference Image(완전 사각형) 모양으로 와핑
cv::warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
// Get refined matches:
std::vector<cv::KeyPoint> warpedKeypoints;
std::vector<cv::DMatch> refinedMatches;
// 위에서 와핑한 이미지에 대해 다시 추출하고 매칭
// Detect features on warped image
extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors);
// Match with pattern
getMatches(m_queryDescriptors, refinedMatches);
// Estimate new refinement homography
homographyFound = refineMatchesWithHomography(
warpedKeypoints,
m_pattern.keypoints,
homographyReprojectionThreshold,
refinedMatches,
m_refinedHomography); // 교정된 Homography을 계산
// Get a result homography as result of matrix product of refined and rough homographies:
// 이부분이 조금 이해 안감? 교정된 Homography를 얻으려고 하는 것 같은데? 왜 곱하지?
info.homography = m_roughHomography * m_refinedHomography;
// Transform contour with precise homography
cv::perspectiveTransform(m_pattern.points2d, info.points2d, info.homography);
}
else
{
// 그냥 새로운것으로 대체
info.homography = m_roughHomography;
// Transform contour with rough homography
cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
}
}
return homographyFound;
}
'NFT' 카테고리의 다른 글
Camera Tracking - Tukey biweight estimator (0) | 2013.04.01 |
---|---|
What is Tracking 3D Rigid Objects ? (0) | 2013.04.01 |
Ransac pose estimator using EPnP (0) | 2013.03.25 |
Homography Error 체크 (0) | 2013.03.22 |