运用klman对视频中的移动物体跟踪,程序是用来处理事先录好的视频,移动物体的初始坐标也知道,会改的朋友可以把它叠加到我们的FPV返回视频中,那样效果我想回更好的哦!
#include <stdio.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <iostream>
using namespace std;
int main( int argc, char** argv )
{
IplImage* pFrame = NULL;
IplImage* pFrImg = NULL;
IplImage* pBkImg = NULL;
//定义一个模板
IplImage* MBImg = NULL;
int MBSIZE=20;
//初始坐标,模板中心点
int state_ptxy[]={230,70};
CvCapture* pCapture = NULL;
int nFrmNum = 0;
cvNamedWindow("video", 1);
//klman的设置
CvKalman* kalman = cvCreateKalman( 2, 2, 0 );
CvMat* measurement = cvCreateMat( 2, 1, CV_32FC1 );
const float A[] = { 1, 0, 0, 1};
memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );
cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );
cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(0.00001) );
cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));
//初始化x(0)
kalman->state_post->data.fl[0]=state_ptxy[0];
kalman->state_post->data.fl[1]=state_ptxy[1];
pCapture=cvCaptureFromFile("test.avi");
if(pCapture==0)
{
cout<<"不能打开视频"<<endl;
}
//逐帧读取视频
while(pFrame = cvQueryFrame( pCapture ))
{
nFrmNum++; //如果是第一帧,需要申请内存,并初始化
if(nFrmNum == 1)
{
pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
MBImg = cvCreateImage(cvSize(MBSIZE,MBSIZE), IPL_DEPTH_8U,1);
//控制图像是否倒转的
pFrImg->origin = pFrame->origin;
MBImg->origin = pFrame->origin;
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY); //将pFrImg转为灰度图
//获取一个模板矩阵
for(int row=0;row<MBSIZE;row++)
for(int col=0;col<MBSIZE;col++)
CV_IMAGE_ELEM( MBImg,unsigned char, row, col )=
CV_IMAGE_ELEM(pFrImg,unsigned char, state_ptxy[1]-MBSIZE/2+row,state_ptxy[0]-MBSIZE/2+col);
//保存模板与第一帧图像
cvSaveImage("mb.jpg",MBImg);
cvSaveImage("first.bmp",pFrImg);
}
else
{
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
//klman预测
const CvMat* prediction = cvKalmanPredict( kalman, 0 );
CvPoint predict_pt = cvPoint(cvRound(prediction->data.fl[0]),cvRound(prediction->data.fl[1]));
//测量
// 运用模板匹配,找出最相近
CvPoint measurement_pt = cvPoint(0,0);
CvSize sizeMBImg = cvGetSize(MBImg);
CvSize sizepFrImg = cvGetSize(pFrImg);
double min_val,max_val;
CvPoint min_loc;
CvPoint max_loc;
CvSize sizeResult = cvSize(sizepFrImg.width-sizeMBImg.width+1,sizepFrImg.height-sizeMBImg.height+1);
IplImage* imgResult = cvCreateImage(sizeResult,IPL_DEPTH_32F,1);
cvMatchTemplate(pFrImg,MBImg,imgResult,CV_TM_CCORR_NORMED);
cvMinMaxLoc(imgResult, &min_val, &max_val,& min_loc,& max_loc, 0 );
//得到测量点坐标
measurement_pt.x=max_loc.x+MBSIZE/2;
measurement_pt.y=max_loc.y+MBSIZE/2;
measurement->data.fl[0]=measurement_pt.x;
measurement->data.fl[1]=measurement_pt.y;
//调整klman的状态
cvKalmanCorrect( kalman, measurement);
CvPoint post_pt=cvPoint(cvRound(kalman->state_post->data.fl[0]),cvRound(kalman->state_post->data.fl[1]));
//画点
#define draw_cross( center, color, d ) \
cvLine( pFrame, cvPoint( center.x - d, center.y - d ), \
cvPoint( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
cvLine( pFrame, cvPoint( center.x + d, center.y - d ), \
cvPoint( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
// draw_cross( predict_pt, CV_RGB(255,255,0), 5 ); //画出预测点 黄点
draw_cross(measurement_pt, CV_RGB(255,0,0), 5 ); //画测量点 红点
draw_cross(cvPoint(cvRound(kalman->state_post->data.fl[0]),
cvRound(kalman->state_post->data.fl[1])),CV_RGB(0,255,0), 5); //画矫正点,绿点
printf("测量点坐标: %d,%d\n", measurement_pt.x,measurement_pt.y);
printf("预测点坐标:%d,%d\n", predict_pt.x, predict_pt.y);
printf("矫正点坐标:%d,%d\n ",post_pt.x,post_pt.y);
printf("相关系数最大值:%f\n\n",max_val);
cvShowImage("video", pFrame);
if( cvWaitKey(2) >= 0 )
break;
}
}
//销毁窗口
cvDestroyWindow("video");
//释放图像和矩阵
cvReleaseImage(&pFrImg);
cvReleaseImage(&MBImg);
cvReleaseCapture(&pCapture);
cvReleaseKalman(&kalman);
return 0;
} |