问题
I am working on the OpenCV color blob and I am reviewing the code. I need some help understand what the code is doing so that I can work further on it and help integrate this further. Is it possible for someone to help me with understanding/commenting the code so it is easier to interpate.
public boolean onTouch(View v, MotionEvent event) {
int cols = mRgba.cols(); //get resolution of display
int rows = mRgba.rows(); // get resolution of display
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2; //get resolution of display
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2; // get resolution of display
int x = (int)event.getX() - xOffset; // get resolution of display
int y = (int)event.getY() - yOffset; //get resolution of display
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
Rect touchedRect = new Rect();
touchedRect.x = (x>4) ? x-4 : 0;
touchedRect.y = (y>4) ? y-4 : 0;
touchedRect.width = (x+4 < cols) ? x + 4 - touchedRect.x : cols - touchedRect.x;
touchedRect.height = (y+4 < rows) ? y + 4 - touchedRect.y : rows - touchedRect.y;
Mat touchedRegionRgba = mRgba.submat(touchedRect);
Mat touchedRegionHsv = new Mat();
Imgproc.cvtColor(touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);
// Calculate average color of touched region
mBlobColorHsv = Core.sumElems(touchedRegionHsv);
int pointCount = touchedRect.width*touchedRect.height;
for (int i = 0; i < mBlobColorHsv.val.length; i++)
mBlobColorHsv.val[i] /= pointCount;
//converts scalar to hsv to RGB
mBlobColorRgba = converScalarHsv2Rgba(mBlobColorHsv);
Log.i(TAG, "Touched rgba color: (" + mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] +
", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3] + ")");
mDetector.setHsvColor(mBlobColorHsv);
Imgproc.resize(mDetector.getSpectrum(), mSpectrum, SPECTRUM_SIZE);
mIsColorSelected = true;
touchedRegionRgba.release();
touchedRegionHsv.release();
return false; // don't need subsequent touch events
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.e(TAG, "Contours count: " + contours.size());
Imgproc.drawContours(mRgba, contours, -1, CONTOUR_COLOR);
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
}
return mRgba;
}
//final conversion
private Scalar converScalarHsv2Rgba(Scalar hsvColor) {
Mat pointMatRgba = new Mat();
Mat pointMatHsv = new Mat(1, 1, CvType.CV_8UC3, hsvColor);
Imgproc.cvtColor(pointMatHsv, pointMatRgba, Imgproc.COLOR_HSV2RGB_FULL, 4);
return new Scalar(pointMatRgba.get(0, 0));
}
回答1:
I have been through and commented the code as best as I can, although I think it is fairly self documenting. Generally this code takes an image from the coordinates plotted by a users touch. it then converts the color space and resizes it for spectrum size
// When a motion event happens (someone touches the device)
public boolean onTouch(View v, MotionEvent event) {
int cols = mRgba.cols(); //get resolution of display
int rows = mRgba.rows(); // get resolution of display
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2; //get resolution of display
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2; // get resolution of display
int x = (int)event.getX() - xOffset; // get resolution of display
int y = (int)event.getY() - yOffset; //get resolution of display
//The place where the screen was touched
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
// ensure it is within the screen.
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
Rect touchedRect = new Rect();
//Ensure it is a multiple of 4
touchedRect.x = (x>4) ? x-4 : 0;
touchedRect.y = (y>4) ? y-4 : 0;
// If x+4 < cols then ?"" else :""
touchedRect.width = (x+4 < cols) ? x + 4 - touchedRect.x : cols - touchedRect.x;
touchedRect.height = (y+4 < rows) ? y + 4 - touchedRect.y : rows - touchedRect.y;
create a touched regionmat from the image created from the touches
Mat touchedRegionRgba = mRgba.submat(touchedRect);
//Convert the new mat to HSV colour space
Mat touchedRegionHsv = new Mat();
Imgproc.cvtColor(touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);
// Calculate average color of touched region
mBlobColorHsv = Core.sumElems(touchedRegionHsv);
int pointCount = touchedRect.width*touchedRect.height;
for (int i = 0; i < mBlobColorHsv.val.length; i++)
mBlobColorHsv.val[i] /= pointCount;
//converts scalar to hsv to RGB
mBlobColorRgba = converScalarHsv2Rgba(mBlobColorHsv);
Log.i(TAG, "Touched rgba color: (" + mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] +
", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3] + ")");
mDetector.setHsvColor(mBlobColorHsv);
// Resize the image to specture size
Imgproc.resize(mDetector.getSpectrum(), mSpectrum, SPECTRUM_SIZE);
mIsColorSelected = true;
// Release all mats
touchedRegionRgba.release();
touchedRegionHsv.release();
return false; // don't need subsequent touch events
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.e(TAG, "Contours count: " + contours.size());
Imgproc.drawContours(mRgba, contours, -1, CONTOUR_COLOR);
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
}
return mRgba;
}
//final conversion
private Scalar converScalarHsv2Rgba(Scalar hsvColor) {
Mat pointMatRgba = new Mat();
Mat pointMatHsv = new Mat(1, 1, CvType.CV_8UC3, hsvColor);
Imgproc.cvtColor(pointMatHsv, pointMatRgba, Imgproc.COLOR_HSV2RGB_FULL, 4);
return new Scalar(pointMatRgba.get(0, 0));
}
回答2:
For anyone who is also interested in what the method 'mDetector.process(mRgba)' is doing the following may be useful.
As a recap from the other answer, when the user touches an area the program detects the color of the area they have touched.
Every time a new camera frame arrives, the program checks if a color has been selected and if so it calls the method 'mDetector.process(mRgba)' to process the frame.
This method is commented below, but the high levels steps are:
- reduce the size of the image
- filter out everything that is not the color we are looking for
- enhance the remaining parts of the image to make it easier to detect edges or contours
- find the contours (the outline or the edge) of the remaining 'blobs' of color
- filter out any that are too small to be of interest
- return the list of remaining blobs, or more accurately the list of contours which are the outline of the remaining blobs
The commented code (Android):
public void process(Mat rgbaImage) {
//Pyramid Down - this downsizes the image and looses some resolution
//See: http://docs.opencv.org/2.4/doc/tutorials/imgproc/pyramids/pyramids.html
Imgproc.pyrDown(rgbaImage, mPyrDownMat);
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
//Convert color scheme to HSV - this means that a color can be
//identified with a single value, the hue, instead of three values
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);
//This creates a new image with only the color values that are wihtin
//the lower and upper thresholds set in mLowerBound and mUpperBound. These
//values were calculated when the method 'setHsvColor' was called with the
//color of the object that the user touched on the screen.
//So you effectively get an image with just the red or just the blue or whatever
//the color of the blob that the user selected was. Note that if there are multiple
//blobs or objects with this color you will get them all. You can see this quite easily
//with a simple test of the app with a couple of similar colored objects.
Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);
//dilate effectively emphasises the brighter colors, so making them bigger within the image
//In this case it should be the chosen color which is emphasised against the
//darker (black) background.
//See:http://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
//Finds the contours which in this case means the edge of the color blobs
Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Find max contour area
//This is actually refering to the area enclosed by a contour. For this to work it is important
//that the contour be closed, so if this is not the case some objects may be missed here.
double maxArea = 0;
Iterator<MatOfPoint> each = contours.iterator();
while (each.hasNext()) {
MatOfPoint wrapper = each.next();
double area = Imgproc.contourArea(wrapper);
if (area > maxArea)
maxArea = area;
}
// Filter contours by area and resize to fit the original image size
//Here we are simply discrading any contours that are below the min size that was
//set in the method 'setMinContourArea' or the default if it was not set. In other
//words discrading any small object detected.
mContours.clear();
each = contours.iterator();
while (each.hasNext()) {
MatOfPoint contour = each.next();
if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) {
Core.multiply(contour, new Scalar(4,4), contour);
mContours.add(contour);
}
}
}
//Now we return the list of contours - each contour is a closed area that is
//colored in whatever color the user selected when they touched the object.
//This color, as a reminder, was set by a call to 'setHsvColor'.
public List<MatOfPoint> getContours() {
return mContours;
}
}
来源:https://stackoverflow.com/questions/29770832/open-cv-code-can-someone-help-me-understand-what-the-code-is-doing