[英]How to write the position (x,y) of the object being tracked into text file?
I am trying to record the position(x,y) of the object being tracked into text file. 我正在尝试记录被跟踪到文本文件中的对象的位置(x,y)。 I am using opencv and c++ visual 2010. till now i can save a data but this data is the initial position but repeated. 我正在使用opencv和c ++ visual2010。到目前为止,我可以保存数据,但该数据是初始位置,但重复出现。 i want it to save the actual position at every frame. 我希望它保存每一帧的实际位置。
In short, how can i write the exact data writen by PutText() on the screen but to a file? 简而言之,我该如何将PutText()在屏幕上写入的确切数据写入文件? see what puttext() write to the screen 看看puttext()写入屏幕的内容
//write the position of the object to the screen
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);
I think the problem is in this portion: 我认为问题出在这部分:
//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n";
}
file_.close();
the full code is this: 完整的代码是这样的:
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>
using namespace cv;
using namespace std;
//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used
const static int SENSITIVITY_VALUE = 50;
string intToString(int number){
//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
}
void searchForMovement(Mat binaryImage, Mat &framein){
//notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish
//to take the values passed into the function and manipulate them, rather t han just working with a copy.
//eg. we draw to the cameraFeed to be displayed in the main() function.
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours
//if contours vector is not empty, we have found some objects
if(contours.size()>0)objectDetected=true;
else objectDetected = false;
if(objectDetected){
//the largest contour is found at the end of the contours vector
//we will simply assume that the biggest contour is the object we are looking for.
vector< vector<Point> > largestContourVec;
largestContourVec.push_back(contours.at(contours.size()-1));
//make a bounding rectangle around the largest contour then find its centroid
//this will be the object's final estimated position.
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;
//update the objects positions by changing the 'theObject' array values
theObject[0] = xpos , theObject[1] = ypos;
}
//make some temp x and y variables so we dont have to type out so much
int x = theObject[0];
int y = theObject[1];
//draw some crosshairs around the object
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);
//write the position of the object to the screen
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);
//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n";
}
file_.close();
//std::cin.get();
}
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
int main(int, char**)
{
//some boolean variables for added functionality
bool objectDetected = false;
//these two can be toggled by pressing 'd' or 't'
bool debugMode = true;
bool trackingEnabled = true;
//pause and resume code
bool pause = false;
//video capture object.
VideoCapture capture;
while(1){
//we can loop the video by re-opening the capture every time the video reaches its last frame
capture.open("Video_003.avi");
//capture.open(0);
if(!capture.isOpened()){
cout<<"ERROR ACQUIRING VIDEO FEED\n";
getchar();
return -1;
}
double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
pMOG = new BackgroundSubtractorMOG();
//morphology element
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );
int count = -1;
//check if the video has reach its last frame.
//we add '-1' because we are reading two frames from the video at a time.
//if this is not included, we get a memory error!
while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
// Get frame from camera
capture.read(frame);
// Update counter
++count;
//Resize
resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
//Blur
blur(frame, frame, Size(5,5) );
// Background subtraction
pMOG->operator()(frame, fg_mask,0.05);
////////
//pre procesing
//1 point delete
morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);
// threshold
//threshold intensity image at a given sensitivity value
cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
morphOps(binaryImage);
if(debugMode==true){
imshow("frame", frame);
imshow("fg_mask", fg_mask);
imshow("final", binaryImage);
}else{
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("frame");
cv::destroyWindow("fg_mask");
cv::destroyWindow("final");
}
//if tracking enabled, search for contours in our thresholded image
if(trackingEnabled){
searchForMovement(binaryImage,frame);
//Find contour
ContourImg = binaryImage.clone();
//less blob delete
vector< vector< Point> > contours;
findContours(ContourImg,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE); // all pixels of each contours
vector< Rect > output;
vector< vector< Point> >::iterator itc= contours.begin();
while (itc!=contours.end()) {
//Create bounding rect of object
//rect draw on origin image
Rect mr= boundingRect(Mat(*itc));
rectangle(frame, mr, CV_RGB(255,0,0));
++itc;
}
}
imshow("frame", frame);
// Save foreground mask
string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
imwrite("D:\\SO\\temp\\" + name, fg_mask);
switch(waitKey(10)){
case 27: //'esc' key has been pressed, exit program.
return 0;
case 116: //'t' has been pressed. this will toggle tracking
trackingEnabled = !trackingEnabled;
if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
else cout<<"Tracking enabled."<<endl;
break;
case 100: //'d' has been pressed. this will debug mode
debugMode = !debugMode;
if(debugMode == true) cout<<"Debug mode enabled."<<endl;
else cout<<"Debug mode disabled."<<endl;
break;
case 112: //'p' has been pressed. this will pause/resume the code.
pause = !pause;
if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
while (pause == true){
//stay in this loop until
switch (waitKey()){
//a switch statement inside a switch statement? Mind blown.
case 112:
//change pause back to false
pause = false;
cout<<"Code Resumed"<<endl;
break;
}
}
// the camera will be deinitialized automatically in VideoCapture destructor
}
}
}
//release the capture before re-opening and looping again.
capture.release();
}
return 0;
}
OK I see several strange things in your code. 好的,我在您的代码中看到了一些奇怪的事情。 But to answer your question: 但是要回答你的问题:
In your code, you are opening file, outputting the same values for x and y 10000 times and closing file for each frame . 在您的代码中,您正在打开文件,为x和y输出10000次相同的值,并为每个frame关闭文件。 Instead what you should do is open file in start, output only one pair of coordinates per frame then close file in end. 相反,您应该做的是在开始时打开文件,每帧仅输出一对坐标 ,然后在结束时关闭文件。
Example code: 示例代码:
Before main loop starts 在主循环开始之前
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
In main loop 在主循环中
file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n";
After main loop ends 主循环结束后
file_.close();
EDIT: Added full code the way I meant for it to be: 编辑:添加完整的代码,我的意思是:
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>
using namespace cv;
using namespace std;
ofstream file_;
//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used
const static int SENSITIVITY_VALUE = 50;
string intToString(int number){
//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
}
void searchForMovement(Mat binaryImage, Mat &framein){
//notice how we use the '&' operator for objectDetected and cameraFeed. This is because we wish
//to take the values passed into the function and manipulate them, rather t han just working with a copy.
//eg. we draw to the cameraFeed to be displayed in the main() function.
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours
//if contours vector is not empty, we have found some objects
if(contours.size()>0)objectDetected=true;
else objectDetected = false;
if(objectDetected){
//the largest contour is found at the end of the contours vector
//we will simply assume that the biggest contour is the object we are looking for.
vector< vector<Point> > largestContourVec;
largestContourVec.push_back(contours.at(contours.size()-1));
//make a bounding rectangle around the largest contour then find its centroid
//this will be the object's final estimated position.
objectBoundingRectangle = boundingRect(largestContourVec.at(0));
int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;
//update the objects positions by changing the 'theObject' array values
theObject[0] = xpos , theObject[1] = ypos;
}
//make some temp x and y variables so we dont have to type out so much
int x = theObject[0];
int y = theObject[1];
//draw some crosshairs around the object
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);
//write the position of the object to the screen
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);
//save position
file_ <<"X:"<<intToString(x)<<" "<<"Y:"<<intToString(y)<<"\n";
//std::cin.get();
}
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
int main(int, char**)
{
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
//some boolean variables for added functionality
bool objectDetected = false;
//these two can be toggled by pressing 'd' or 't'
bool debugMode = true;
bool trackingEnabled = true;
//pause and resume code
bool pause = false;
//video capture object.
VideoCapture capture;
while(1){
//we can loop the video by re-opening the capture every time the video reaches its last frame
capture.open("Video_003.avi");
//capture.open(0);
if(!capture.isOpened()){
cout<<"ERROR ACQUIRING VIDEO FEED\n";
getchar();
return -1;
}
double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
pMOG = new BackgroundSubtractorMOG();
//morphology element
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );
int count = -1;
//check if the video has reach its last frame.
//we add '-1' because we are reading two frames from the video at a time.
//if this is not included, we get a memory error!
while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
// Get frame from camera
capture.read(frame);
// Update counter
++count;
//Resize
resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
//Blur
blur(frame, frame, Size(5,5) );
// Background subtraction
pMOG->operator()(frame, fg_mask,0.05);
////////
//pre procesing
//1 point delete
morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);
// threshold
//threshold intensity image at a given sensitivity value
cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
morphOps(binaryImage);
if(debugMode==true){
imshow("frame", frame);
imshow("fg_mask", fg_mask);
imshow("final", binaryImage);
}else{
//if not in debug mode, destroy the windows so we don't see them anymore
cv::destroyWindow("frame");
cv::destroyWindow("fg_mask");
cv::destroyWindow("final");
}
//if tracking enabled, search for contours in our thresholded image
if(trackingEnabled){
searchForMovement(binaryImage,frame);
//Find contour
ContourImg = binaryImage.clone();
//less blob delete
vector< vector< Point> > contours;
findContours(ContourImg,
contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE); // all pixels of each contours
vector< Rect > output;
vector< vector< Point> >::iterator itc= contours.begin();
while (itc!=contours.end()) {
//Create bounding rect of object
//rect draw on origin image
Rect mr= boundingRect(Mat(*itc));
rectangle(frame, mr, CV_RGB(255,0,0));
++itc;
}
}
imshow("frame", frame);
// Save foreground mask
string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
imwrite("D:\\SO\\temp\\" + name, fg_mask);
switch(waitKey(10)){
case 27: //'esc' key has been pressed, exit program.
return 0;
case 116: //'t' has been pressed. this will toggle tracking
trackingEnabled = !trackingEnabled;
if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
else cout<<"Tracking enabled."<<endl;
break;
case 100: //'d' has been pressed. this will debug mode
debugMode = !debugMode;
if(debugMode == true) cout<<"Debug mode enabled."<<endl;
else cout<<"Debug mode disabled."<<endl;
break;
case 112: //'p' has been pressed. this will pause/resume the code.
pause = !pause;
if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
while (pause == true){
//stay in this loop until
switch (waitKey()){
//a switch statement inside a switch statement? Mind blown.
case 112:
//change pause back to false
pause = false;
cout<<"Code Resumed"<<endl;
break;
}
}
// the camera will be deinitialized automatically in VideoCapture destructor
}
}
}
//release the capture before re-opening and looping again.
capture.release();
//Close position log
file_.close();
}
return 0;
}
声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.