第八章 目标跟踪

mac2025-12-31  2

第八章 目标跟踪

8.1 检测移动的目标

基本的运动检测

计算帧之间的差异,或考虑“背景”帧与其他帧之间的差异。

例:

 

import cv2import numpy as np camera = cv2.VideoCapture(0) es =  cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,4)) kernel = np.ones((5,5),np.uint8)#通过摄像头读入帧,将第一帧设置为背景background = None while(True):     rat,frame = camera.read()     if background is None:         background = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)         background = cv2.GaussianBlur(background,(21,21),0)         continue     #对帧进行简单处理:转化灰阶、模糊处理(减少噪声影响)     gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)     gray_frame = cv2.GaussianBlur(gray_frame,(21,21),0)#差分图difference map,     diff = cv2.absdiff(background,gray_frame)     diff = cv2.threshold(diff,25,255,cv2.THRESH_BINARY)[1]     diff = cv2.dilate(diff,es,iterations=2)#显示矩形     image,cnts,hierarchy = cv2.findContours(diff.copy(),cv2.RETR_EXTERNAL,                                             cv2.CHAIN_APPROX_SIMPLE)     for c in cnts:         if cv2.contourArea(c)<1500:             continue         (x,y,w,h) = cv2.boundingRect(c)         cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)     cv2.imshow("contours",frame)     cv2.imshow("dif",diff)     if cv2.waitKey(82) & 0xff == ord("q"):         breakcv2.destroyAllWindows()

 

8.2 背景分割器:KNN、MOG2和GMG

OpenCV提供了一个BackgroundSubtractor的类,在分割前景和背景时很方便。BackgroundSubtractor类 可以计算阴影。

例子BackgroundSubtractor

import numpy as npimport cv2 cap = cv2.VideoCapture(0) mog = cv2.createBackgroundSubtractorMOG2()while(1):     rat,frame = cap.read()     fgmask = mog.apply(frame)     cv2.imshow('frame',fgmask)     if cv2.waitKey(30) & 0xff == ord("q"):         breakcap.release() cv2.destroyAllWindows()

 

例:使用BackgroundSubtractorKNN来实现运动检测

import  cv2 import numpy as np bs = cv2.createBackgroundSubtractorKNN(detectShadows=True) camera = cv2.VideoCapture('traffic.flv') # traffic.flv一段公路上的交通情况 while True:     rat,frame = camera.read()     fgmask = bs.apply(frame)     th = cv2.threshold(fgmask.copy(),244,255,cv2.THRESH_BINARY)[1]     dilated = cv2.dilate(th,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),     iterations = 2)     image,contours,hier = cv2.findContours(dilated,cv2.RETR_EXTERNAL,                                            cv2.CHAIN_APPROX_SIMPLE)     for c in contours:         if cv2.contourArea(c)>1600:             (x,y,w,h) = cv2.boundingRect(c)             cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),2)     cv2.imshow("mog",fgmask)     cv2.imshow("thresh",th)     cv2.imshow("detection",frame)     if cv2.waitKey(30) & 0xff ==27:         break camera.release() cv2.destroyAllWindows()

 

 

8.2.1 均值漂移和CAMShift

均值漂移Meanshift是一种目标跟踪算法,该算法寻找概率函数离散样本的最大密度,并且重新计算下一帧的最大密度,该算法给出了目标的移动方向。

均值漂移在追踪视频中感兴趣的区域时非常有用。

例:标记并追踪感兴趣的区域:

import numpy as npimport cv2 cap = cv2.VideoCapture(0) ret,frame = cap.read() r,h,c,w = 10,200,10,200track_window = (c,r,w,h) roi = frame[r:r+h,c:c+w] hsv_roi = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi,np.array((100.,30.,32.)),                    np.array((180.,120.,255.))) roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,1)while True:     ret,frame = cap.read()     if ret == True:         hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)         dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)         ret,track_window = cv2.meanShift(dst,track_window,term_crit)         x,y,w,h = track_window         img2 = cv2.rectangle(frame,(x,y),(x+w,y+h),255,2)         cv2.imshow('img2',img2)         if cv2.waitKey(60) & 0xff == ord("q"):             break     else:         breakcv2.destroyAllWindows() cap.release()如果有颜色范围内的物体进入窗口,就会追踪物体。 8.2.2 彩色直方图

calcHist 函数用来计算图像的彩色直方图。彩色直方图是图像的颜色分布。x轴是色彩值,y轴是像素数量。

calcBackProject 函数 在均值漂移算法中非常重要。称为直方图方向投影,它得到直方图并将其投影到一副图像上,其结果是每个像素属于生成原始直方图的原图像的概率。

import numpy as npimport cv2 cap = cv2.VideoCapture(0) ret,frame = cap.read()#标记感兴趣的区域r,h,c,w = 10,200,10,200track_window = (c,r,w,h)#提取roi并将其转换为HSV空间roi = frame[r:r+h,c:c+w] hsv_roi = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi,np.array((100.,30.,32.)),                    np.array((180.,120.,255.)))#roi的直方图roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)#均值漂移停止条件 :迭代10次或 中心移动1个像素term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,1)while True:     ret,frame = cap.read()     if ret == True:         hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)         #反向投影         dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)         ret,track_window = cv2.meanShift(dst,track_window,term_crit)         x,y,w,h = track_window         img2 = cv2.rectangle(frame,(x,y),(x+w,y+h),255,2)         cv2.imshow('img2',img2)         if cv2.waitKey(60) & 0xff == ord("q"):             break     else:         breakcv2.destroyAllWindows() cap.release()  

8.3CAMShift

import numpy as npimport cv2 cap = cv2.VideoCapture(0) ret,frame = cap.read()#标记感兴趣的区域r,h,c,w = 10,200,10,200track_window = (c,r,w,h)#提取roi并将其转换为HSV空间roi = frame[r:r+h,c:c+w] hsv_roi = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi,np.array((100.,30.,32.)),                    np.array((180.,120.,255.)))#roi的直方图roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)#均值漂移停止条件 :迭代10次或 中心移动1个像素term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,10,1)while True:     ret,frame = cap.read()     if ret == True:         hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)         #反向投影         dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)         ret,track_window = cv2.CamShift(dst,track_window,term_crit)         pts = cv2.boxPoints(ret)         pts = np.int0(pts)         img2 = cv2.polylines(frame,[pts],True,255,2)         cv2.imshow('img2',img2)         if cv2.waitKey(60) & 0xff == ord("q"):             break     else:         breakcv2.destroyAllWindows() cap.release()  

 

 

8.4 卡尔曼滤波器

卡尔曼滤波器对含有噪声的输入数据流进行递归操作,产生底层系统状态在统计意义上的最优估计。

8.4.1 预测和更新

可将卡尔曼滤波算法分为两个阶段:

预测:使用当前点计算的协方差来估计目标的新位置

更新:记录目标位置,为下一次循环计算修正协方差

例:卡尔曼滤波器预测鼠标轨迹 import cv2, numpy as np measurements = [] predictions = [] frame = np.zeros((800, 800, 3), np.uint8) last_measurement = current_measurement = np.array((2,1), np.float32) last_prediction = current_prediction = np.zeros((2,1), np.float32) def mousemove(event, x, y, s, p):     global frame, current_measurement, measurements, last_measurement, current_prediction, last_prediction     last_prediction = current_prediction     last_measurement = current_measurement     current_measurement = np.array([[np.float32(x)],[np.float32(y)]])     kalman.correct(current_measurement)     current_prediction = kalman.predict()     lmx, lmy = last_measurement[0], last_measurement[1]     cmx, cmy = current_measurement[0], current_measurement[1]     lpx, lpy = last_prediction[0], last_prediction[1]     cpx, cpy = current_prediction[0], current_prediction[1]     cv2.line(frame, (lmx, lmy), (cmx, cmy), (0,100,0))     cv2.line(frame, (lpx, lpy), (cpx, cpy), (0,0,200)) cv2.namedWindow("kalman_tracker") cv2.setMouseCallback("kalman_tracker", mousemove); kalman = cv2.KalmanFilter(4,2,1) kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32) kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32) kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03 while True:     cv2.imshow("kalman_tracker", frame)     if (cv2.waitKey(30) & 0xFF) == 27:         break     if (cv2.waitKey(30) & 0xFF) == ord('q'):         cv2.imwrite('kalman.jpg', frame)         break cv2.destroyAllWindows()

 

 

 

8.3 CAMShift

 

 

 

 

最新回复(0)