Proper inter process communication method between Opencv C++ webcam program and C server

早过忘川 提交于 2019-12-13 18:37:03

问题


Consider a simple OpenCV program that captures webcam frames:

#ifndef __OPENCV__
#define __OPENCV__
#include "opencv2/opencv.hpp"
#endif
#include <iostream>
#include "utils.hpp"
#include "constants.hpp"
#include <unistd.h>
#include <vector>
#include "InputStateContext.hpp"
#include <SDL.h>
#include <SDL_events.h>
#include "MiddlewareConnector.hpp"

using namespace cv;
using namespace std;

void onTrackbar_changed(int, void* data);
//void onThreshold_changed(int, void* data);
void onMouse(int evt, int x, int y, int flags, void* param);
int keyboardCallback(SDL_KeyboardEvent ev);
int mouseCallback(SDL_MouseButtonEvent ev, float scaleX, float scaleY, Mat frame);

InputStateContext context;

int main(int argc, char* argv[])
{
  printVersion();
    /* Initialise SDL */
    if( SDL_Init( SDL_INIT_VIDEO ) < 0)
    {
        fprintf( stderr, "Could not initialise SDL: %s\n", SDL_GetError() );
        exit( -1 );
    }

    string host;
    unsigned int port;
    const String sourceReference = argv[1];
    int camNum;
    string sensorName;
    try
    {
        camNum = stoi(sourceReference); // throws std::length_error
    }
    catch (const std::exception& e)// reference to the base of a polymorphic object
    {
        std::cout<<"Exception: " << e.what()<<endl; // information from length_error printed
        return -1;
    }
    if (argc>4)
    {
        try
        {
            host = argv[2];
            port = atoi(argv[3]);
            sensorName = argv[4];
        }
        catch (const std::exception& e)
        {
            cout<<"impossible to convert host or port"<<endl;
            return -1;
        }
    }
    else if(argc>2)
    {
        cout<<"argumetns less than 4"<<endl;
        host = "http://localhost";
        port = 3000;

        sensorName = argv[2];
        cout<<argc<<endl;
        cout<<"sensor name set from arguments: "<< sensorName<<endl;

    }
    else
    {
        cout<<"stopping execution: too few arguments."<<endl;
        return -1;
    }
    MiddlewareConnector middleware(host, port, sensorName, &context);
    context.Attach(&middleware);
    context.Notify(); //register on middleware
    cout<<"camera initializing\n";
    VideoSettings cam(camNum + CAP_V4L);
    cout<<"camera initialized\n";
    /* or
    VideoCapture captUndTst;
    captUndTst.open(sourceCompareWith);*/
    cout<<"Ch3ck c4m3ra is 0p3n3d\n";
    if ( !cam.isOpened())
    {
        cout << "Could not open reference " << sourceReference << endl;
        return -1;
    }

    cout<<"===================================="<<endl<<endl;
    cout<<"Default Brightness %-------> "<<cam.getBrightness()<<endl;
    cout<<"Default Contrast %---------> "<<cam.getContrast()<<endl;
    cout<<"Default Saturation %-------> "<<cam.getSaturation()<<endl;
    cout<<"Default Gain %-------------> "<<cam.getGain()<<endl;
    cout<<"Default hue %--------------> "<<cam.getHue()<<endl<<endl;
    cout<<"====================================\n\n"<<endl;
    Mat frame;
    cam>>frame;
    //resize(frame, frame, cv::Size(frame.cols/WINDOW_SCALE, frame.rows/WINDOW_SCALE));
    //resizeWindow("Camera", cv::Size(frame.cols/WINDOW_SCALE, frame.rows/WINDOW_SCALE));
    SDL_Window* win = SDL_CreateWindow("Camera", 100, 100, frame.cols, frame.rows,
        SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE);
    int oldWidth = frame.cols, oldHeight= frame.rows;
    int width, height;
    float scaleX=1, scaleY=1;
    SDL_SetWindowTitle(win, "Camera");
    SDL_Renderer * renderer = SDL_CreateRenderer(win, -1, 0);
    /*
    createTrackbar( "Brightness","Camera", &cam.brightness, 100, onTrackbar_changed, &cam );
    createTrackbar( "Contrast","Camera", &cam.contrast, 100,onTrackbar_changed, &cam );
    createTrackbar( "Saturation","Camera", &cam.saturation, 100,onTrackbar_changed, &cam);
    createTrackbar( "Gain","Camera", &cam.gain, 100,onTrackbar_changed, &cam);
    createTrackbar( "Hue","Camera", &cam.hue, 100,onTrackbar_changed, &cam);
    */
    SDL_Event genericEvent;

    //setMouseCallback("Camera", onMouse, &frame);
    SDL_Surface* frameSurface;
    SDL_Texture* frameTexture;
    cv::Size blur_kernel = cv::Size(5, 5);
    while(cam.isOpened())
    {
        while( SDL_PollEvent(&genericEvent) )
        {
            //cout<<genericEvent.type<<endl;
            switch( genericEvent.type )
            {
                /* Keyboard event */
                /* Pass the event data onto PrintKeyInfo() */

                case SDL_KEYDOWN:
                    break;
                case SDL_KEYUP:
                    keyboardCallback(genericEvent.key);
                    break;
                case SDL_MOUSEBUTTONDOWN:
                {
                    mouseCallback(genericEvent.button, scaleX, scaleY, frame);
                    break;
                }
                case SDL_WINDOWEVENT:
                {
                    if (genericEvent.window.event==SDL_WINDOWEVENT_RESIZED)
                    {
                        //oldWidth = width;
                        //oldHeight = height;
                        SDL_GetWindowSize(win, &width, &height);
                        scaleX =  (float)(width)/ float(oldWidth);
                        scaleY = (float)(height) / (float)(oldHeight);
                    }
                    break;
                }
                /* SDL_QUIT event (window close) */
                case SDL_QUIT:
                    return 0;
                    break;
                default:
                    break;
            }
        }
        Mat blurred_frame, frame_out;
        frame_out = frame.clone();
        cv::cvtColor(frame, blurred_frame, cv::COLOR_BGR2GRAY);
        cv::GaussianBlur(blurred_frame, blurred_frame, blur_kernel, 3, 3);
        Mat roi, laplacian;
        Scalar delta;
        for (int ii=0; ii< context.getPolysNumber(); ii++)
        {
            roi = blurred_frame(context.getParks().at(ii).getBoundingRect());
            cv::Laplacian(roi, laplacian, CV_64F);
            delta = cv::mean(cv::abs(laplacian), context.getParks().at(ii).getMask());
            context.setParkingStatus(ii,  abs(delta[0] - context.getParks().at(ii).getThreshold())<0.35 );
        }
        cam>>frame;
        // /resize(frame_out, frame_out, cv::Size(frame.cols/WINDOW_SCALE, frame.rows/WINDOW_SCALE));
        context.draw(frame_out);

        //Convert to SDL_Surface
        frameSurface = SDL_CreateRGBSurfaceFrom((void*)frame_out.data,
            frame_out.size().width, frame_out.size().height,
            24, frame_out.cols *3,
            0xff0000, 0x00ff00, 0x0000ff, 0);

        if(frameSurface == NULL)
        {
            SDL_Log("Couldn't convert Mat to Surface.");
            return -2;
        }

        //Convert to SDL_Texture
        frameTexture = SDL_CreateTextureFromSurface(renderer, frameSurface);
        if(frameTexture == NULL)
        {
            SDL_Log("Couldn't convert Mat(converted to surface) to Texture."); //<- ERROR!!
            return -1;
        }
        //imshow("Camera", frame_out);
        SDL_RenderCopy(renderer, frameTexture, NULL, NULL);
        SDL_RenderPresent(renderer);
        /* A delay is needed to show (it actually wait for an input)*/
        if(waitKey(delay)>delay){;}

    }
    SDL_DestroyTexture(frameTexture);
    SDL_FreeSurface(frameSurface);
    SDL_DestroyRenderer(renderer);
    SDL_DestroyWindow(win);
    return 0;
}

void onTrackbar_changed(int, void* data)
{
 VideoSettings cam = *((VideoSettings*)data);
 cam.update();
}

void onMouse(int evt, int x, int y, int flags, void* param)
{

    if(evt == EVENT_LBUTTONDOWN)
    {
      context.mouseLeft( x, y);

    }
    else if(evt == EVENT_RBUTTONDOWN)
    {
        try
        {
            Mat* fr = (Mat* ) param;
            context.setFrame(*fr);
        }
        catch (const std::exception& e)
        {
            cout<<"onMouse frame not converted"<<endl;
        }
        context.mouseRight();
    }
}

int keyboardCallback(SDL_KeyboardEvent ev)
{
    switch(ev.keysym.sym)
    {
        case(SDLK_a):
        {
            cout<<"calling context keyboardA"<<endl;
            context.keyboardA();
            break;
        }
        case(SDLK_e):
        {
            cout<<"calling context keyboardE"<<endl;
            context.keyboardE();
            break;
        }
        case(SDLK_m):
        {
            cout<<"calling context keyboardM"<<endl;
            context.keyboardM();
            break;
        }
        case SDLK_UP:
        case SDLK_RIGHT:
        {
            cout<<"calling context RIGHT ARROW"<<endl;
            context.keyboardArrows(1);
            break;
        }
        case SDLK_DOWN:
        case SDLK_LEFT:
        {
            cout<<"calling context LEFT ARROW"<<endl;
            context.keyboardArrows(-1);
            break;
        }
        case (SDLK_RETURN):
        {
            cout<<"calling context RETURN ARROW"<<endl;
            context.keyboardReturn();
            break;
        }
        default:
        break;

    }
    return 0;
}


int mouseCallback(SDL_MouseButtonEvent ev, float scaleX, float scaleY, Mat frame)
{
    if(ev.button == SDL_BUTTON_LEFT)
    {
        cout<<scaleX<<" "<<scaleY<<endl;
        int scaled_x =  static_cast<int> ((float)(ev.x)/scaleX);
        int scaled_y = static_cast<int> ((float)(ev.y)/ scaleY);
        std::cout<<"scaled x: "<<scaled_x<<", scaled y: "<<scaled_y<<endl;
        context.mouseLeft( scaled_x,scaled_y);
    }
    else if(ev.button == SDL_BUTTON_RIGHT)
    {
        try
        {
            //Mat* fr = (Mat* ) param;
            context.setFrame(frame);
        }
        catch (const std::exception& e)
        {
            cout<<"onMouse frame not converted"<<endl;
        }
        context.mouseRight();
    }
}

I have a separate server/client application (in C) (currently using TCP but I will change it to UDP) where the server will run in the same host of the program streaming from the webcam. I would like, when the client connects, the server to take the webcam frame (for example every n seconds`) and send it to the client(s) through websockets.

I consider living the two parts separate for easiness and because one might exist without the other.

However I do not exactly how to make the webcam program and the server program communicate (possibly bi-directionally) using the standard POSIX methds.

At first I thought calling fork() from the Opencv program and then run exec to launch the server program. However the server program is programmed to become a daemon process so I cannot use typical parent-child process communication (pipe).

I might call fork from the server program and then run exec with the openCV program so that they have-parent-child relationship, that would allow to use pipes. But I do not think this is correct.

Other solutions can be:

  • FIFO (named pipes)
  • message queues
  • shared memory (might memory be a problem in this way?)

回答1:


Following on from what I said on my comment, I would suggest you consider Redis which is a very fast, in-memory, data-structure server. It can serve strings, hashes, list, sets, queues and so on. It is simple to install and has a small footprint. It can be accessed from C/C++, bash, Perl, Python, PHP, Java, Ruby and others.

Also, it is networked so you can dump data into it from one, or more hosts, and collect it from any other host with any other language.

So, this is a sample generator that generates 1000 images and stuffs them into Redis as fast as possible, naming the frames f-0, f-1, f-2 . Each frame is given a "Time-to-live" so that it automatically gets deleted after 10 seconds and doesn't sit around in memory too long.

#!/bin/bash
################################################################################
# generate - generate "video" frames and dump into Redis
################################################################################

# Redis host
host=127.0.0.1

# Clear out Redis of data from any previous run
redis-cli -h $host flushall > /dev/null

# Generate 1000 frames, with Redis key "f-0", "f-1", "f-2" also setting expiration
ttl=10

for ((i=0;i<1000;i++)) ; do
   frameName=f-$i
   echo Generating frame: $frameName
   convert -size 640x480 -background magenta -fill white -gravity center label:"Frame: $i" JPG: | redis-cli -h $host -x setex "$frameName" $ttl > /dev/null
   redis-cli -h $host set currFrame $i > /dev/null
done

The convert command in the middle is ImageMagick and it just generates a frame of "video" like this:


Then there is a monitor script that checks how much memory Redis is using, like this:

#!/bin/bash
################################################################################
# monitor
################################################################################
host=127.0.0.1

while :; do
   redis-cli -h $host -r 100 -i 1 info | grep used_memory:
done

And then there is a latest script that gets the latest frame from Redis and displays it:

#!/bin/bash
################################################################################
# latest - display latest video frame from Redis cache
################################################################################

# Redis host
host=127.0.0.1

# Get currFrame from Redis
currFrame=$(redis-cli --raw -h $host get currFrame)

redis-cli -h $host get f-$currFrame > current.jpg
open current.jpg

Here is a video of it in action:



来源:https://stackoverflow.com/questions/54030359/proper-inter-process-communication-method-between-opencv-c-webcam-program-and

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!