How to write the position (x,y) of the object being tracked into text file?

偶尔善良 提交于 2019-12-13 06:09:54

问题


I am trying to record the position(x,y) of the object being tracked into text file. I am using opencv and c++ visual 2010. till now i can save a data but this data is the initial position but repeated. i want it to save the actual position at every frame.

In short, how can i write the exact data writen by PutText() on the screen but to a file? see what puttext() write to the screen

 //write the position of the object to the screen
 putText(framein,"Tracking object at (" +   intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

I think the problem is in this portion:

//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";


}
file_.close();  

the full code is this:

 #include < opencv2/opencv.hpp>
 #include < opencv2/core/core.hpp>
 #include < opencv2/highgui/highgui.hpp>
 #include < opencv2/video/background_segm.hpp>
 #include < opencv2/imgproc/imgproc.hpp>
 #include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>

using namespace cv;
using namespace std;

//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its    position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used 
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

//this function has a number input and string output
std::stringstream ss;
ss << number;
return ss.str();
 }
 void searchForMovement(Mat binaryImage, Mat &framein){
//notice how we use the '&' operator for objectDetected and cameraFeed. This    is because we wish
 //to take the values passed into the function and manipulate them, rather t  han just working with a copy.
 //eg. we draw to the cameraFeed to be displayed in the main() function.
bool objectDetected = false;
Mat temp;
binaryImage.copyTo(temp);
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours of filtered image using openCV findContours function
//findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

//if contours vector is not empty, we have found some objects
if(contours.size()>0)objectDetected=true;
else objectDetected = false;

 if(objectDetected){
    //the largest contour is found at the end of the contours vector
    //we will simply assume that the biggest contour is the object we are looking for.
    vector< vector<Point> > largestContourVec;
    largestContourVec.push_back(contours.at(contours.size()-1));
    //make a bounding rectangle around the largest contour then find its centroid
    //this will be the object's final estimated position.
    objectBoundingRectangle = boundingRect(largestContourVec.at(0));
    int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
    int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

    //update the objects positions by changing the 'theObject' array values
    theObject[0] = xpos , theObject[1] = ypos;
}
//make some temp x and y variables so we dont have to type out so much
int x = theObject[0];
int y = theObject[1];

//draw some crosshairs around the object
circle(framein,Point(x,y),20,Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);

//write the position of the object to the screen
putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

//save position
ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";
for( int count = -1; count < 10000; count++ )
{
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";


}
file_.close();
//std::cin.get();


  }

   void morphOps(Mat &thresh){

    //create structuring element that will be used to "dilate" and "erode" image.
   //the element chosen here is a 3px by 3px rectangle

  Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
  //dilate with larger element so make sure object is nicely visible
  Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8

    erode(thresh,thresh,erodeElement);
   erode(thresh,thresh,erodeElement);


   dilate(thresh,thresh,dilateElement);
   dilate(thresh,thresh,dilateElement);

   }
 int main(int, char**)
 {
//some boolean variables for added functionality
bool objectDetected = false;
//these two can be toggled by pressing 'd' or 't'
bool debugMode = true;
bool trackingEnabled = true;
//pause and resume code
bool pause = false;
//video capture object.
VideoCapture capture;
while(1){

    //we can loop the video by re-opening the capture every time the video reaches its last frame

    capture.open("Video_003.avi");
    //capture.open(0);

    if(!capture.isOpened()){
        cout<<"ERROR ACQUIRING VIDEO FEED\n";   
        getchar();
        return -1;
    }
    double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
     cout << "Frame per seconds : " << fps << endl;

pMOG = new BackgroundSubtractorMOG();

 //morphology element  
 Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );

int count = -1;

 //check if the video has reach its last frame.
    //we add '-1' because we are reading two frames from the video at a time.
    //if this is not included, we get a memory error!
    while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
    // Get frame from camera
        capture.read(frame);
    // Update counter
    ++count;
    //Resize  
 resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
    //Blur  
    blur(frame, frame, Size(5,5) );

    // Background subtraction
    pMOG->operator()(frame, fg_mask,0.05);

    ////////
    //pre procesing  
    //1 point delete    
     morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);  

     // threshold
     //threshold intensity image at a given sensitivity value
             cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
        morphOps(binaryImage);

     if(debugMode==true){

    imshow("frame", frame);
    imshow("fg_mask", fg_mask);
    imshow("final", binaryImage);
    }else{
            //if not in debug mode, destroy the windows so we don't see them anymore
            cv::destroyWindow("frame");
            cv::destroyWindow("fg_mask");
            cv::destroyWindow("final");
     }

     //if tracking enabled, search for contours in our thresholded image
        if(trackingEnabled){

            searchForMovement(binaryImage,frame);

     //Find contour  
  ContourImg = binaryImage.clone();  
  //less blob delete  
  vector< vector< Point> > contours;  

   findContours(ContourImg,  
        contours, // a vector of contours  
       CV_RETR_EXTERNAL, // retrieve the external contours  
       CV_CHAIN_APPROX_NONE); // all pixels of each contours  


 vector< Rect > output;  
  vector< vector< Point> >::iterator itc= contours.begin();  
  while (itc!=contours.end()) {  

 //Create bounding rect of object  
 //rect draw on origin image  
 Rect mr= boundingRect(Mat(*itc));  
 rectangle(frame, mr, CV_RGB(255,0,0));  
 ++itc;  
  }  
        }
        imshow("frame", frame);
    // Save foreground mask
    string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
    imwrite("D:\\SO\\temp\\" + name, fg_mask);

    switch(waitKey(10)){

        case 27: //'esc' key has been pressed, exit program.
            return 0;
        case 116: //'t' has been pressed. this will toggle tracking
            trackingEnabled = !trackingEnabled;
            if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
            else cout<<"Tracking enabled."<<endl;
            break;
        case 100: //'d' has been pressed. this will debug mode
            debugMode = !debugMode;
            if(debugMode == true) cout<<"Debug mode enabled."<<endl;
            else cout<<"Debug mode disabled."<<endl;
            break;
        case 112: //'p' has been pressed. this will pause/resume the code.
            pause = !pause;
            if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
            while (pause == true){
                //stay in this loop until 
                switch (waitKey()){
                    //a switch statement inside a switch statement? Mind blown.
                case 112: 
                    //change pause back to false
                    pause = false;
                    cout<<"Code Resumed"<<endl;
                    break;
                }

}
// the camera will be deinitialized automatically in VideoCapture destructor

            }
    }
    }
    //release the capture before re-opening and looping again.
    capture.release();
    }
     return 0;
    }

回答1:


OK I see several strange things in your code. But to answer your question:

In your code, you are opening file, outputting the same values for x and y 10000 times and closing file for each frame. Instead what you should do is open file in start, output only one pair of coordinates per frame then close file in end.

Example code:

Before main loop starts

ofstream file_;
file_.open("position.txt");
file_ <<"these are the position pattern made by the foreground object \n";

In main loop

file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";

After main loop ends

file_.close();

EDIT: Added full code the way I meant for it to be:

#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//#include < opencv2/videoio.hpp>
//#include < opencv2/imgcodecs.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
#include <fstream>

using namespace cv;
using namespace std;

ofstream file_;

//global variables
cv::Mat fg_mask;
cv::Mat frame;
cv::Mat binaryImage;
cv::Mat ContourImg;
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
//we'll have just one object to search for
//and keep track of its position.
int theObject[2] = {0,0};
//bounding rectangle of the object, we will use the center of this as its    position.
Rect objectBoundingRectangle = Rect(0,0,0,0);
//our sensitivity value to be used
const static int SENSITIVITY_VALUE = 50;

string intToString(int number){

    //this function has a number input and string output
    std::stringstream ss;
    ss << number;
    return ss.str();
}
void searchForMovement(Mat binaryImage, Mat &framein){
    //notice how we use the '&' operator for objectDetected and cameraFeed. This    is because we wish
    //to take the values passed into the function and manipulate them, rather t  han just working with a copy.
    //eg. we draw to the cameraFeed to be displayed in the main() function.
    bool objectDetected = false;
    Mat temp;
    binaryImage.copyTo(temp);
    //these two vectors needed for output of findContours
    vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    //find contours of filtered image using openCV findContours function
    //findContours(temp,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );// retrieves all contours
    findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );// retrieves external contours

    //if contours vector is not empty, we have found some objects
    if(contours.size()>0)objectDetected=true;
    else objectDetected = false;

    if(objectDetected){
        //the largest contour is found at the end of the contours vector
        //we will simply assume that the biggest contour is the object we are looking for.
        vector< vector<Point> > largestContourVec;
        largestContourVec.push_back(contours.at(contours.size()-1));
        //make a bounding rectangle around the largest contour then find its centroid
        //this will be the object's final estimated position.
        objectBoundingRectangle = boundingRect(largestContourVec.at(0));
        int xpos = objectBoundingRectangle.x+objectBoundingRectangle.width/2;
        int ypos = objectBoundingRectangle.y+objectBoundingRectangle.height/2;

        //update the objects positions by changing the 'theObject' array values
        theObject[0] = xpos , theObject[1] = ypos;
    }
    //make some temp x and y variables so we dont have to type out so much
    int x = theObject[0];
    int y = theObject[1];

    //draw some crosshairs around the object
    circle(framein,Point(x,y),20,Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x,y-25),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x,y+25),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x-25,y),Scalar(0,255,0),2);
    line(framein,Point(x,y),Point(x+25,y),Scalar(0,255,0),2);

    //write the position of the object to the screen
    putText(framein,"Tracking object at (" + intToString(x)+","+intToString(y)+")",Point(x,y),1,1,Scalar(255,0,0),2);

    //save position
    file_ <<"X:"<<intToString(x)<<"  "<<"Y:"<<intToString(y)<<"\n";

    //std::cin.get();


}

void morphOps(Mat &thresh){

    //create structuring element that will be used to "dilate" and "erode" image.
    //the element chosen here is a 3px by 3px rectangle

    Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
    //dilate with larger element so make sure object is nicely visible
    Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8

    erode(thresh,thresh,erodeElement);
    erode(thresh,thresh,erodeElement);


    dilate(thresh,thresh,dilateElement);
    dilate(thresh,thresh,dilateElement);

}
int main(int, char**)
{

    file_.open("position.txt");
    file_ <<"these are the position pattern made by the foreground object \n";

    //some boolean variables for added functionality
    bool objectDetected = false;
    //these two can be toggled by pressing 'd' or 't'
    bool debugMode = true;
    bool trackingEnabled = true;
    //pause and resume code
    bool pause = false;
    //video capture object.
    VideoCapture capture;

    while(1){

        //we can loop the video by re-opening the capture every time the video reaches its last frame

        capture.open("Video_003.avi");
        //capture.open(0);

        if(!capture.isOpened()){
            cout<<"ERROR ACQUIRING VIDEO FEED\n";
            getchar();
            return -1;
        }
        double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
        cout << "Frame per seconds : " << fps << endl;

        pMOG = new BackgroundSubtractorMOG();

        //morphology element
        Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );

        int count = -1;

        //check if the video has reach its last frame.
        //we add '-1' because we are reading two frames from the video at a time.
        //if this is not included, we get a memory error!
        while(capture.get(CV_CAP_PROP_POS_FRAMES) <capture.get(CV_CAP_PROP_FRAME_COUNT)-1){
            // Get frame from camera
            capture.read(frame);
            // Update counter
            ++count;
            //Resize
            resize(frame, frame, Size(frame.size().width/2, frame.size().height/2) );
            //Blur
            blur(frame, frame, Size(5,5) );

            // Background subtraction
            pMOG->operator()(frame, fg_mask,0.05);

            ////////
            //pre procesing
            //1 point delete
            morphologyEx(fg_mask, binaryImage, CV_MOP_CLOSE, element);

            // threshold
            //threshold intensity image at a given sensitivity value
            cv::threshold(binaryImage,binaryImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
            morphOps(binaryImage);

            if(debugMode==true){

                imshow("frame", frame);
                imshow("fg_mask", fg_mask);
                imshow("final", binaryImage);
            }else{
                //if not in debug mode, destroy the windows so we don't see them anymore
                cv::destroyWindow("frame");
                cv::destroyWindow("fg_mask");
                cv::destroyWindow("final");
            }

            //if tracking enabled, search for contours in our thresholded image
            if(trackingEnabled){

                searchForMovement(binaryImage,frame);

                //Find contour
                ContourImg = binaryImage.clone();
                //less blob delete
                vector< vector< Point> > contours;

                findContours(ContourImg,
                             contours, // a vector of contours
                             CV_RETR_EXTERNAL, // retrieve the external contours
                             CV_CHAIN_APPROX_NONE); // all pixels of each contours


                vector< Rect > output;
                vector< vector< Point> >::iterator itc= contours.begin();
                while (itc!=contours.end()) {

                    //Create bounding rect of object
                    //rect draw on origin image
                    Rect mr= boundingRect(Mat(*itc));
                    rectangle(frame, mr, CV_RGB(255,0,0));
                    ++itc;
                }
            }
            imshow("frame", frame);
            // Save foreground mask
            string name = "mask_" + std::to_string(static_cast<long long>(count)) + ".png";
            imwrite("D:\\SO\\temp\\" + name, fg_mask);

            switch(waitKey(10)){

                case 27: //'esc' key has been pressed, exit program.
                    return 0;
                case 116: //'t' has been pressed. this will toggle tracking
                    trackingEnabled = !trackingEnabled;
                    if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
                    else cout<<"Tracking enabled."<<endl;
                    break;
                case 100: //'d' has been pressed. this will debug mode
                    debugMode = !debugMode;
                    if(debugMode == true) cout<<"Debug mode enabled."<<endl;
                    else cout<<"Debug mode disabled."<<endl;
                    break;
                case 112: //'p' has been pressed. this will pause/resume the code.
                    pause = !pause;
                    if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
                        while (pause == true){
                            //stay in this loop until
                            switch (waitKey()){
                                //a switch statement inside a switch statement? Mind blown.
                                case 112:
                                    //change pause back to false
                                    pause = false;
                                    cout<<"Code Resumed"<<endl;
                                    break;
                            }

                        }
                        // the camera will be deinitialized automatically in VideoCapture destructor

                    }
            }
        }
        //release the capture before re-opening and looping again.
        capture.release();
        //Close position log
        file_.close();
    }
    return 0;
}


来源:https://stackoverflow.com/questions/37095624/how-to-write-the-position-x-y-of-the-object-being-tracked-into-text-file

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!