function opticalflow_hornschunk()Object=VideoReader('Crowd3.avi');obj = setupSystemObjects();NumFrames = Object.NumberOfFrames;h=8;theta=60;dphi=0.628;nextId = 1; hMean1 = vision.Mean;hMean2 = vision.Mean('RunningMean',true);tracks = initializeTracks(); blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ... 'AreaOutputPort',true, 'CentroidOutputPort', true, ... 'MinimumBlobArea',600);videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]);maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]);OrVideo = vision.VideoPlayer ('Name','Original video','Position', [10,450,340,260]);BwVideo1 = vision.VideoPlayer ('Name','Segmentation done use filter','Position', [790,450,340,260]);BwVideo = vision.VideoPlayer ('Name','Segmentation After Using Medium Filter','Position',[10,100,340,260]);ReVideo = vision.VideoPlayer ('Name','Result','Position',[400,100,340,260]);OpticalFlow = vision.OpticalFlow('ReferenceFrameDelay',3);blobAnalysis = vision.BlobAnalysis('BoundingBoxOutputPort', true, ... 'AreaOutputPort',false , 'CentroidOutputPort',false, ... 'MinimumBlobArea',600);totaldistB=0;v=0;nf=1;OpticalFlow.OutputValue = 'Horizontal and vertical components in complex form';videor= vision.VideoFileReader('Crowd3.mov', 'ImageColorSpace','RGB',... 'VideoOutputDataType','uint8'); OfVideo = vision.VideoPlayer ('Name','motion vector', 'Position', [400,450,340,260]);shapeInserter = vision.ShapeInserter('Shape','lines','BorderColor','Custom','CustomBorderColor',uint8([0 0 255])); markerInserter = vision.MarkerInserter('Shape','X-mark','BorderColor','Custom','CustomBorderColor',uint8([0 0 255]));converter = vision.ImageDataTypeConverter; hMedianFilt = vision.MedianFilter;hClose = vision.MorphologicalClose('Neighborhood', strel('rectangle',[25 16]));hOpen = vision.MorphologicalOpen('Neighborhood', strel('square',3));hErode = vision.MorphologicalErode('Neighborhood', strel('square',3));hDilate = vision.MorphologicalDilate('Neighborhood', strel('square',3));ticfor k = 2:1:NumFrames% disp(k);
% disp(NumFrames);
time=toc; frame = readFrame(); C=frame; for i=1:size(C,1)-2 for j=1:size(C,2)-2 Gx=((2*C(i+2,j+1)+C(i+2,j)+C(i+2,j+2))-(2*C(i,j+1)+C(i,j)+C(i,j+2))); Gy=((2*C(i+1,j+2)+C(i,j+2)+C(i+2,j+2))-(2*C(i+1,j)+C(i,j)+C(i+2,j))); B(i,j)=sqrt(Gx.^2+Gy.^2); end end Frame1 = rgb2gray (frame); im = step(converter,B); I=rgb2gray(frame); of = step(OpticalFlow, im); y1 = of .* conj(of); vel_th = step(hMean2, step(hMean1, y1));% disp(k);% disp(vel_th);
% if(vel_th>0.0140)
% uiwait(msgbox('Operation Completed','Success','modal'));
% return;
% end
mask = step(hMedianFilt, y1 > vel_th);% imshow(mask);
mask1 = step(hClose, step(hDilate,step(hOpen,mask))); [~, centroids, bboxes] =blobAnalyser.step(mask); bbox = step(blobAnalysis,mask); [a b]=size(centroids); time=toc;%Predict new locations of tracks
for i = 1:length(tracks) bbox = tracks(i).bbox; centroidd=tracks(i).centroid; predictedCentroid = predict(tracks(i).kalmanFilter); predictedCentroid = int32(predictedCentroid) - bbox(3:4) / 2; tracks(i).bbox = [predictedCentroid, bbox(3:4)]; tracks(i).speed(1)=abs(double(predictedCentroid(1)*(10/240))-double(tracks(i).centroid(1)*(10/240))); tracks(i).speed(2)= abs(double(predictedCentroid(2)*(30/320))-double(tracks(i).centroid(2)*(30/320))); tracks(i).centroid=predictedCentroid ; tracks(i).distance=sqrt((pow2(tracks(i).speed(1)))+(pow2(tracks(i).speed(1))));% temp=double(vid.FrameRate*vid.Duration);
% tracks(i).distance=((double(tracks(i).distance)/temp));
% disp(i);
t=25; scale=1/10; dt=1/Object.FrameRate; tracks(i).distance=(tracks(i).distance/dt)*3.6;% tracks(i).distance=3.6*0.145*(tracks(i).distance*t);
% disp(tracks(i).distance);
% tracks(i).distance=median(tracks(i).distance);
% tracks(i).distance=(tracks(i).distance*Object.NumberOfFrames/Object.Duration);
end nTracks = length(tracks); nDetections = size(centroids, 1); cost = zeros(nTracks, nDetections); for i = 1:nTracks cost(i, :) = distance(tracks(i).kalmanFilter, centroids); end costOfNonAssignment = 20; [assignments, unassignedTracks, unassignedDetections] = ... assignDetectionsToTracks(cost, costOfNonAssignment); numAssignedTracks = size(assignments, 1); for i = 1:numAssignedTracks trackIdx = assignments(i, 1); detectionIdx = assignments(i, 2); centroid = centroids(detectionIdx, :); bbox = bboxes(detectionIdx, :); correct(tracks(trackIdx).kalmanFilter, centroid); tracks(trackIdx).bbox = bbox; tracks(trackIdx).age = tracks(trackIdx).age + 1; tracks(trackIdx).totalVisibleCount = ... tracks(trackIdx).totalVisibleCount + 1; tracks(trackIdx).consecutiveInvisibleCount = 0; end for i = 1:length(unassignedTracks) ind = unassignedTracks(i); tracks(ind).age = tracks(ind).age + 1; tracks(ind).consecutiveInvisibleCount = ... tracks(ind).consecutiveInvisibleCount + 1; end if isempty(tracks) ab=5; else invisibleForTooLong = 20; ageThreshold = 8; ages = [tracks(:).age]; totalVisibleCounts = [tracks(:).totalVisibleCount]; visibility = totalVisibleCounts ./ ages; lostInds = (ages < ageThreshold & visibility < 0.6) | ... [tracks(:).consecutiveInvisibleCount] >= invisibleForTooLong; tracks = tracks(~lostInds); end createNewTracks(); displayTrackingResults(); if size(centroids)~=[0 0] if rem(nf,2)==1 distA=0;totaldistA=0; for i=1:a distA = h*(tan((theta+(240-centroids(1,2))*dphi))*pi/180); totaldistA=(totaldistA+distA)/a; end end S= abs(totaldistB - totaldistA); v = S/NumFrames;% result = insertShape(frame, 'Rectangle' , bbox, 'Color' ,'yellow');
lines = videooptflowlines(of, 15); [rw cl]=size(lines); bw=1; for mn=1:rw-1 if(lines(mn,1)==lines(mn+1,1)) a=a+1; else new(bw,:)=lines(mn,:); bw=bw+1; end end [rows cols]=size(new); for ab=1:rw for j=1:2 vec(ab,j)=lines(ab,j); end end if ~isempty(lines) ofVector = step(shapeInserter, frame,uint8(lines));% Vector = step(markerInserter,im, uint8(vec));
% step(OrVideo,frame);
% step(OfVideo,ofVector);
% % % % step(BwVideo1,mask);
% % % % step(BwVideo,mask1);
% % % % % step(ReVideo,result);
end endend function frame = readFrame() frame = obj.reader.step(); end function obj = setupSystemObjects() obj.reader = vision.VideoFileReader('Crowd3.avi'); obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]); obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]); obj.detector = vision.ForegroundDetector('NumGaussians', 3, ... 'NumTrainingFrames', 40, 'MinimumBackgroundRatio', 0.7); obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, ... 'AreaOutputPort',true, 'CentroidOutputPort', true, ... 'MinimumBlobArea',10); end function tracks = initializeTracks() tracks = struct(... 'id', {}, ... 'bbox', {}, ... 'kalmanFilter', {}, ... 'age', {}, ... 'totalVisibleCount', {}, ... 'consecutiveInvisibleCount', {}, ... 'centroid',{}, ... 'speed',{}, ... 'distance',{}); end function createNewTracks() centroids = centroids(unassignedDetections, :); bboxes = bboxes(unassignedDetections, :); for i = 1:size(centroids, 1) centroid = centroids(i,:); bbox = bboxes(i, :); kalmanFilter = configureKalmanFilter('ConstantVelocity', ... centroid, [200, 50], [100, 25], 100); newTrack = struct(... 'id', nextId, ... 'bbox', bbox, ... 'kalmanFilter', kalmanFilter, ... 'age', 1, ... 'totalVisibleCount', 1, ... 'consecutiveInvisibleCount', 0,'centroid',centroid,'speed',[0 0],'distance',0); tracks(end + 1) = newTrack; nextId = nextId + 1; end end function displayTrackingResults() frame = im2uint8(frame); mask = uint8(repmat(mask, [1, 1, 3])) .* 255; minVisibleCount = 8; if ~isempty(tracks) reliableTrackInds = ... [tracks(:).totalVisibleCount] > minVisibleCount; reliableTracks = tracks(reliableTrackInds); if ~isempty(reliableTracks) bboxes = cat(1,reliableTracks.bbox); distance= [reliableTracks(:).distance]; disp('Calculated Speed'); disp(distance); bx=bboxes; dis=distance; ids = int32([reliableTracks(:).id]); % labels=cellstr(int2str(size(bboxes,1)));
labels = cat(1,cellstr(num2str(dis','%2.2f')));% predictedTrackInds = ...
% [reliableTracks(:).consecutiveInvisibleCount] > 0;
% isPredicted = cell(size(labels));
% isPredicted(predictedTrackInds) = {''};
% labels = strcat(labels, isPredicted);
[m n]=size(bx); frame = insertObjectAnnotation(frame, 'rectangle', ... bboxes, labels); mask = insertObjectAnnotation(mask, 'rectangle', ... bboxes, labels); numpeople=size(bboxes,1); result=insertText(mask,[10 10],numpeople,'BoxOpacity',1,'FontSize',18); imshow(result); end end maskPlayer.step(mask); videoPlayer.step(frame); endend
MATLAB: Undefined variable “vision” or class “vision.OpticalFlow”. Error in opticalflow_hornschunk (line 21) OpticalFlow = vision.OpticalFlow(‘ReferenceFrameDelay’,3) This is the error message i am getting please resolve… thanks in advance :)
cornschunkoptical flowvideo processing
Best Answer