ubuntu环境下opencv学习+踩坑

橙三吉。 提交于 2019-11-27 12:10:24

opencv学习+踩坑

环境

  • ubuntu 19.04
  • vscode 1.37.0
  • opencv 3.4.7
  • cmake 3.13.4

拜一下julao的数字图像处理提纲
https://bitlecture.github.io/notes/数字图像处理/

然后开始跟着毛星云的blog跑demo来学opencv
实际上如果论实用性的话,以下的系列blog可能还会更好一些?
https://blog.csdn.net/morewindows/article/category/1291764

文件读取和输出
https://blog.csdn.net/poem_qianmo/article/details/20537737

定义图像

Mat image = imread("Filename");
namedWindow("Windowname");
imshow("Windowname",image);

需要注意的是图片要放到build的文件夹里面,如果没能成功imread的话,会报错——

error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'imshow'

视频读取

VideoCapture cap;
cap.open("Filename");
打开摄像头
cap.open(0);

检测是否读取到的方法:

//方法1
if(!image.data){printf("未能读取")};
//方法2
if(image.empty()){printf("未能读取")};

划定特定区域(ROI)
https://blog.csdn.net/poem_qianmo/article/details/20911629

 Mat imageROI;
//方法一
imageROI= srcImage4(Rect(200,250,logoImage.cols,logoImage.rows));
//方法二
imageROI= srcImage4(Range(250,250+logoImage.rows),Range(200,200+logoImage.cols));

图像变换应该也挺重要的
https://blog.csdn.net/xiaowei_cqu/article/details/7616044

图像线性混合
使用addWeighted可以直接混合两张图片,

int main()
{
    double alphavalue = 0.5;
    double betavalue;
    
    Mat satori = imread("satori.jpg");
    Mat name = imread("name.png");

    if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;};
    if(name.empty()){cout << "未能成功读取图片satori2" << endl;exit;};

    betavalue = 1 - alphavalue;
    //在satori上划出ROI
    Mat ROI = satori(Rect(0,0,name.cols,name.rows));
    //将划出了ROI的satori和name做合并
    addWeighted(ROI,alphavalue,name,betavalue,0.,ROI);

    namedWindow("混合效果");
    imshow("混合效果",satori);

    waitKey();
    return 0;
}

分离/合并颜色通道
split()/merge()

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main()
{    
    Mat satori = imread("satori.jpg");
    Mat name = imread("name.png",0);
    vector<Mat>channels;
    Mat blue_channel;

    if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;};
    if(name.empty()){cout << "未能成功读取图片satori2" << endl;exit;};

    //分割成几个颜色通道
    split(satori,channels);
    blue_channel = channels.at(0);

    addWeighted(blue_channel(Rect(0,0,name.cols,name.rows)),1.0,name,0.5,0,blue_channel(Rect(0,0,name.cols,name.rows)));
    //混合通道
    merge(channels,satori);

    namedWindow("混合效果");
    imshow("混合效果",satori);

    waitKey();
    return 0;
}

从颜色通道的角度来说,可以扒掉另外两个通道,只留一个通道做合成来形成单色图片
opencv里面可以设置图片类型,比如CV_8UC1,就是unsigned int8+channel_1,所以这里的操作还是挺简单的,就是用black来取代掉另外两个通道(black意味着灰度值为0),把它给另外两个通道即可

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main()
{    
    Mat satori = imread("satori.jpg");
    vector<Mat> channels(satori.channels());
    vector<Mat> channels_mix(satori.channels());
    Mat mixed;

    if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;}

    int w = satori.cols;
    int h = satori.rows;

    split(satori,channels);

    Mat black;
    black.create(h,w,CV_8UC1);
    black = Scalar(0);

    channels_mix[0] = channels[0];
    channels_mix[1] = black;
    channels_mix[2] = black;

    merge(channels_mix,mixed);

     imshow("mixed",mixed);

    waitKey();
    return 0;
}

颜色通道和ROI,以及线性混合的内容再补充一个画矩形?
https://blog.csdn.net/wc781708249/article/details/78518447

会用rectangle就行了

这个是边缘查找,感觉也是个有意思的demo
https://www.cnblogs.com/skyfsm/p/6890863.html

还是继续跑demo,拿小圆当看板是有点东西的,tracebar的话,应该相当于提供了类似嵌入式开发中的在线debug一样的功能?
https://blog.csdn.net/poem_qianmo/article/details/21479533

关于向量这个数据类型
https://www.cnblogs.com/mr-wid/archive/2013/01/22/2871105.html

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

Mat satori;
int threval = 160;

static void trace_bar(int,void*)
{
    Mat image = threval > 128? (satori < threval) : (satori > threval);
    vector<vector<Point>> contours;
    vector<Vec4i> hierarchy;

    findContours(image,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);

    Mat dst = Mat::zeros(satori.size(),CV_8UC3);

    if(!contours.empty() && !hierarchy.empty())
    {
        for (int i = 0; i >=0; i=hierarchy[i][0])
        {
            Scalar color((rand()&255),(rand()&255),(rand()&255));
            drawContours(dst,contours,i,color,CV_FILLED,8,hierarchy);
        }
    }

    imshow("satori",dst);
}

int main()
{    
    satori = imread("satori.jpg",0);

    if(satori.empty()){cout << "未能成功读取图片satori" << endl;exit;}

    namedWindow("satori");
    createTrackbar("treashould","satori",&threval,255,trace_bar);

    trace_bar(threval,0);

    waitKey();
    return 0;
}

所以实际上主要就是找轮廓+填色,有点意思

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int Contrast,Bright;
Mat srcImage,dstImage;

static void trace_bar(int,void *)
{
    for (int i = 0; i < srcImage.cols; i++)
    {
        for (int j = 0; j < srcImage.rows; j++)
        {
            for (int k = 0; k < 3; k++)
            {
                dstImage.at<Vec3b>(j,i)[k] = saturate_cast<uchar>((Contrast*0.01)*srcImage.at<Vec3b>(j,i)[k] + Bright);
            }   
        }
    }
    
    imshow("satori",dstImage);
};

int main()
{    
   srcImage = imread("satori.jpg");

    if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

    dstImage = Mat::zeros(srcImage.size(),srcImage.type());

    Contrast = 80;
    Bright = 80;

    namedWindow("satori");
    createTrackbar("contrast","satori",&Contrast,255,trace_bar);
    createTrackbar("bright","satori",&Bright,255,trace_bar);

    trace_bar(Contrast,0);
    trace_bar(Bright,0);

    waitKey();
    return 0;
}

这个demo主要是试了一下针对像素调bright和contrast,我没想到居然就是这么简单的线性运算关系,另外就是对单独的像素操作
其实我们已经看出来了,图片的一种表现方式就是每个Image.at(width,height)[channel]的集合,这个值的大小包含了像素的位置,色度这两个关键信息

到滤波了
https://blog.csdn.net/poem_qianmo/article/details/22745559
https://blog.csdn.net/xiaowei_cqu/article/details/7785365

方框滤波——boxblur函数
均值滤波(邻域平均滤波)——blur函数
高斯滤波——GaussianBlur函数
中值滤波——medianBlur函数
双边滤波——bilateralFilter函数

https://wenku.baidu.com/view/f55e1bc6f90f76c661371ac5.html
二维卷积挺有用的,包括之后做边沿检测用的sobel算子等

https://blog.csdn.net/dang_boy/article/details/76150067

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;


int main()
{    
    Mat srcImage = imread("satori.jpg");
    if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

    Mat dstImage1,dstImage2,dstImage3,dstImage4,dstImage5;

    dstImage1 = srcImage.clone();
    dstImage2 = srcImage.clone();
    dstImage3 = srcImage.clone();
    dstImage4 = srcImage.clone();
    dstImage5 = srcImage.clone();

    imshow("原图",srcImage);

     boxFilter(srcImage,dstImage1,-1,Size(5,5));
     imshow("方框滤波",dstImage1);

    blur(srcImage,dstImage2,Size(5,5));
    imshow("均值滤波",dstImage2);
 
    GaussianBlur(srcImage,dstImage3,Size(3,3),0,0);
    imshow("高斯滤波",dstImage3);

    medianBlur(srcImage,dstImage4,5);
    imshow("中值滤波",dstImage4);

    bilateralFilter(srcImage,dstImage5,25,25*2,25/2);
    imshow("双边滤波",dstImage5);

    waitKey();
    destroyAllWindows();

    return 0;
}

简单的滤波跑了一下而已,Size(w,h)规定了卷积核的大小,卷积核的大小会影响模糊的效果

然后是非线性滤波
中值滤波和双线性滤波

双线性滤波的效果非常神奇,把原图上一些类似于陈旧的纹理一样的效果给修没了,非常6p(磨皮?)

膨胀腐蚀

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int Elem_Size = 3;
int value1;

Mat srcImage,dstImage;

static void tracebar(int,void*)
{
    Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
    
    if(value1 == 0)
    {
        erode(srcImage,dstImage,element);
    }
   else
   {
       dilate(srcImage,dstImage,element);
   }
   
    
    imshow("satori",dstImage);
}

int main()
{    
    srcImage = imread("satori.jpg");
    if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

    Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
    erode(srcImage,dstImage,element);

    imshow("satori",dstImage);

    createTrackbar("腐蚀/膨胀","satori",&value1,1,tracebar);
    createTrackbar("内核尺寸","satori",&Elem_Size,21,tracebar);

    tracebar(value1,0);
    tracebar(Elem_Size,0);

    while(char(waitKey(1)) != 'q');
    
    return 0;
}

腐蚀是将暗的像素扩大,膨胀是将亮的像素扩大

在这个基础上还有开运算,闭运算,黑帽运算…
开运算其实就是分开细微链接的像素,闭运算是填平小的裂痕
https://blog.csdn.net/hanshanbuleng/article/details/80657148

Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));

    morphologyEx(srcImage,dstImage,MORPH_OPEN,element);

更改第三个参数即可

终于到快乐的算子环节了

在具体介绍之前,先来一起看看边缘检测的一般步骤吧。

  • 1)滤波:边缘检测的算法主要是基于图像强度的一阶和二阶导数,但导数通常对噪声很敏感,因此必须采用滤波器来改善与噪声有关的边缘检测器的性能。常见的滤波方法主要有高斯滤波,即采用离散化的高斯函数产生一组归一化的高斯核(具体见“高斯滤波原理及其编程离散化实现方法”一文),然后基于高斯核函数对图像灰度矩阵的每一点进行加权求和(具体程序实现见下文)。

  • 2)增强:增强边缘的基础是确定图像各点邻域强度的变化值。增强算法可以将图像灰度点邻域强度值有显著变化的点凸显出来。在具体编程实现时,可通过计算梯度幅值来确定。

  • 3)检测:经过增强的图像,往往邻域中有很多点的梯度值比较大,而在特定的应用中,这些点并不是我们要找的边缘点,所以应该采用某种方法来对这些点进行取舍。实际工程中,常用的方法是通过阈值化方法来检测。

边缘检测应该是RM里面非常常用的算法了,识别装甲板应该主要就用了这个,识别到边缘之后solvepnp

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int Elem_Size = 1;
int value1;

Mat srcImage,dstImage;

int main()
{    
    srcImage = imread("satori.jpg");
    if(srcImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

    Canny(srcImage,dstImage,300,100);

    imshow("satori",dstImage);
    
    while (char(waitKey(1)) != 'q');
    
    return 0;
}

用canny很容易就可以看到效果,调节一下两个阈值则可以起到抑制噪声的作用

sobel算子可以计算x方向和y方向各自的梯度方向,相比canny而言,可以在一些相对比较特定(特征在x/y方向)的场景起到作用

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"

    using namespace cv;
    using namespace std;

    int Elem_Size = 1;
    int value1;

    Mat srcImage,dstImage,dstImage2,dstImage3;

    int main()
    {    
        Mat satori;

        satori = imread("satori.jpg");
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

        imshow("image",satori);

        bilateralFilter(satori,srcImage,25,25*2,25/2);

        cvtColor(srcImage,srcImage,CV_RGB2GRAY);


        Sobel(srcImage,dstImage,srcImage.depth(),1,0,3,1,0,BORDER_DEFAULT);

        Sobel(srcImage,dstImage2,srcImage.depth(),0,1,3,1,0,BORDER_DEFAULT);

        imshow("satori",dstImage);
        imshow("satori2",dstImage2);
        
        addWeighted(dstImage,1,dstImage2,1,1,dstImage3);

        imshow("satori3",dstImage3);

        while (char(waitKey(1)) != 'q');
        
        return 0;
    }

结合了双边滤波后在x,y方向做sobel检测,然后合成,效果还行

结果试了一下双边滤波后做laplace检测,效果更好,啧啧

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"

    using namespace cv;
    using namespace std;

    int Elem_Size = 1;
    int value1;

    Mat srcImage,dstImage,dstImage2,dstImage3;

    int main()
    {    
        Mat satori;

        satori = imread("satori.jpg");
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

        imshow("image",satori);

        bilateralFilter(satori,srcImage,25,25*2,25/2);

        cvtColor(srcImage,srcImage,CV_RGB2GRAY);

        Laplacian(srcImage,dstImage,srcImage.depth());

        imshow("satori",dstImage);
        
        while (char(waitKey(1)) != 'q');
        
        return 0;
    }

但是还有Scharr,可以看成对sobel的进一步优化?试试看效果

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"

    using namespace cv;
    using namespace std;

    int Elem_Size = 1;
    int value1;

    Mat srcImage,dstImage,dstImage2,dstImage3;

    int main()
    {    
        Mat satori;

        satori = imread("satori.jpg");
        if(satori.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

        bilateralFilter(satori,srcImage,25,25*2,25/2);
       imshow("image",srcImage);

        cvtColor(srcImage,srcImage,CV_RGB2GRAY);


        Scharr(srcImage,dstImage,srcImage.depth(),1,0,1,0,BORDER_DEFAULT);

        Scharr(srcImage,dstImage2,srcImage.depth(),0,1,1,0,BORDER_DEFAULT);

        imshow("satori",dstImage);
        imshow("satori2",dstImage2);
        
        addWeighted(dstImage,1,dstImage2,1,1,dstImage3);

        imshow("satori3",dstImage3);

        while (char(waitKey(1)) != 'q');
        
        return 0;
    }

结果没看出了什么优化,反而引入了更多的噪声…可能是我参数没继续调吧(另一层面上来说更加敏锐?

好了,我们到了快乐的resize阶段,还有pryUp,pryDown这两个金字塔放大缩小函数
https://blog.csdn.net/poem_qianmo/article/details/26157633

我觉得没啥特别好说的,就是研究怎么样尽可能合理的采样或者插值,然后高斯函数的优势又一次被体现出来了。不得不这是个非常伟大的函数(我刚学到大数定律时就被这个函数的神奇性给吓到了)

    #include "opencv2/core/core.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/opencv.hpp"
    #include "opencv2/imgproc/imgproc.hpp"
    #include "iostream"

    using namespace cv;
    using namespace std;

    int main()
    {    
        Mat tmpImage,dstImage;

        tmpImage = imread("satori.jpg");
        if(tmpImage.empty()){cout << "未能成功读取图片satori" << endl;return -1;}

        dstImage = tmpImage;
        while (1)
        {
            char key = waitKey(1);

            switch (key)
            {
            case 'q':
                return 0;
                break;
            
            case 'w':
                resize(tmpImage,dstImage,Size(tmpImage.cols*2,tmpImage.rows*2));
                break;

            case 's':
                resize(tmpImage,dstImage,Size(tmpImage.cols/2,tmpImage.rows/2));
                break;

            default:
                break;
            }

            tmpImage = dstImage;
            imshow("satori",dstImage);
        }
    }

不出意外的,缩小之后再放大之后会上天

霍夫线/圆检测算法

#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main()
{    
    Mat srcImage,dstImage,midImage;

    srcImage = imread("test.jpg");
    if(srcImage.empty()){cout << "未能成功读取图片" << endl;return -1;}

    Canny(srcImage,midImage,400,100,3);

    cvtColor(midImage,dstImage,CV_GRAY2BGR);

    vector<Vec2f> lines;
    HoughLines(midImage,lines,1,CV_PI/180,150,0,0);    

    for (size_t i = 0; i < lines.size(); i++)
    {
        float rho = lines[i][0] , theta = lines[i][1];
        Point pt1,pt2;
        double a = cos(theta),b = sin(theta);
        double x0 = a*rho,y0 = b*rho;
        pt1.x = cvRound(x0 + 1000 * (-b));
        pt1.y = cvRound(y0 + 1000 * (a));
        pt2.x = cvRound(x0 - 1000 * (-b));
        pt2.y = cvRound(y0 - 1000 * (a));

        line(dstImage,pt1,pt2,Scalar(55,100,95),1,CV_AA);
    }    

    imshow("dst",dstImage);

    while(char(waitKey(1)) != 'q');
    return 0;
}

这个检测看得我头大
另外还有HoughLinesP这个检测方法,有点意思

to be continue

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!