解释 Mat gray = img; Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY); Mat equalImg = img; Cv2.EqualizeHist(gray, equalImg); hist = img; int[] channels = { 0 }; Rangef[] inRanges = new Rangef[] { new Rangef(0, 256) }; int[] histSize = { 256 }; Mat[] equals = new Mat[] { equalImg }; Cv2.CalcHist(equals, channels, new Mat(), hist, 1, histSize, inRanges); Mat histImage = Mat.Zeros(400, 512, MatType.CV_8UC3); for (int i = 1; i <= hist.Rows; i++) { Point p1 = new Point(2 * (i - 1), 400 - 1); Point p2 = new Point(2 * i - 1, 400 - hist.At<float>(i - 1) / 15); Scalar scalar = new Scalar(255, 255, 255); Cv2.Rectangle(histImage, p1, p2, scalar, -1); } result = histImage; Bitmap bitmap1 = BitmapConverter.ToBitmap(result); pictureBox1.Image = bitmap1;
时间: 2024-02-15 19:26:43 浏览: 269
这段代码的主要作用是对图像进行直方图均衡化,并将均衡化后的直方图显示在 WinForm 程序中的 PictureBox 控件中。
首先,代码中的变量 gray 和 equalImg 分别表示灰度图像和均衡化后的图像,其中都被赋值为原始图像 img。接下来,利用 Cv2.CvtColor 方法将原始图像转换为灰度图像,再利用 Cv2.EqualizeHist 方法对灰度图像进行直方图均衡化,得到均衡化后的图像。
然后,通过 Cv2.CalcHist 方法计算均衡化后的图像的直方图。其中 channels 数组表示图像通道的索引,这里只有一个通道,所以为 {0};histSize 数组表示每个通道的直方图中的 bin 的数量,这里为 256;inRanges 数组表示每个 bin 的范围,这里为 0 到 255;equals 数组表示要计算直方图的输入数组,这里只有一个均衡化后的图像。
接下来,通过 Cv2.Rectangle 方法将直方图绘制成一个矩形,其中矩形的高度表示该 bin 的数量,宽度为 2,每两个 bin 之间有一个像素的间隔。最后,通过 BitmapConverter.ToBitmap 方法将处理后的图像转换为 Bitmap 格式,并将其显示在程序中的 PictureBox 控件中。
相关问题
修改代码,消除错误,错误如下:OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor, file C:\projects\bytedeco\javacpp-presets\opencv\cppbuild\windows-x86_64\opencv-3.1.0\modules\imgproc\src\color.cpp, line 8000 Exception in thread "main" java.lang.RuntimeException: C:\projects\bytedeco\javacpp-presets\opencv\cppbuild\windows-x86_64\opencv-3.1.0\modules\imgproc\src\color.cpp:8000: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor代码如下cvtColor(src_blur, src_gray, CV_RGB2GRAY); if (debug) { opencv_imgcodecs.imwrite("D:\\PlateLocate\\"+"gray"+".jpg", src_gray); System.out.println("灰度"+"D:\\PlateLocate\\"+"gray"+".jpg"); } public int plateDetect(final Mat src, Vector<Mat> resultVec) { //车牌定位 Vector<Mat> matVec = plateLocate.plateLocate(src); if (0 == matVec.size()) { return -1; } //车牌判断 if (0 != plateJudge.plateJudge(matVec, resultVec)) { return -2; } if (getPDDebug()) { int size = (int) resultVec.size(); for (int i = 0; i < size; i++) { Mat img = resultVec.get(i); //车牌定位图片 String str = "D:\\PlateLocate\\carPlateLocation.jpg"; System.out.println("车牌定位图片"+str); opencv_imgcodecs.imwrite(str, img); } } return 0; } public static String[] multiPlateRecognise(opencv_core.Mat mat) { PlateDetect plateDetect = new PlateDetect(); plateDetect.setPDLifemode(true); Vector<opencv_core.Mat> matVector = new Vector<opencv_core.Mat>(10); if (0 == plateDetect.plateDetect(mat, matVector)) { CharsRecognise cr = new CharsRecognise(); String[] results = new String[matVector.size()]; for (int i = 0; i < matVector.size(); ++i) { String result = cr.charsRecognise(matVector.get(i)); results[i] = result; } return results; } return null; } public static String[] multiPlateRecognise(String imgPath) { opencv_core.Mat src = opencv_imgcodecs.imread(imgPath); return multiPlateRecognise(src); } public static void main(String[] args) { // 多张车牌图片路径 String[] imgPaths = {"res/image/test_image/plate_locate.jpg", "res/image/test_image/test.jpg", "res/image/test_image/plate_detect.jpg", "res/general_test/京A88731.jpg"}; int sum = imgPaths.length; // 总共处理的图片数量 int errNum = 0; // 识别错误的数量 int sumTime = 0; // 总耗时 long longTime = 0; // 最长处理时长 for (int i = 0; i < sum; i++) { opencv_cor
The error "OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor" indicates that the input image to the cvtColor function is not in the correct format. The function requires an image with 3 or 4 channels, but the input image has a different number of channels. To fix this error, you can check the number of channels in the input image before calling cvtColor function, and convert the image to the correct format if necessary. Here is an example code snippet:
Mat src_blur = new Mat();
Mat src_gray = new Mat();
Imgproc.GaussianBlur(src, src_blur, new Size(3, 3), 0);
if (src.channels() == 3) {
Imgproc.cvtColor(src_blur, src_gray, Imgproc.COLOR_BGR2GRAY);
} else if (src.channels() == 4) {
Imgproc.cvtColor(src_blur, src_gray, Imgproc.COLOR_BGRA2GRAY);
} else {
// handle the error case, e.g. print an error message or throw an exception
}
In this code, we first check the number of channels in the input image (src). If it has 3 channels, we convert it to grayscale using COLOR_BGR2GRAY constant. If it has 4 channels, we use COLOR_BGRA2GRAY constant instead. If the input image has a different number of channels, we can handle the error case appropriately.
#include<iostream> #include<opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include<opencv2/highgui/highgui_c.h> #include <opencv2/imgproc/imgproc.hpp> using namespace std; using namespace cv; int main() { //Mat img = imread("cells.jpg", CV_LOAD_IMAGE_GRAYSCALE); // 读入灰度图像 Mat img; Mat img1 = imread("G:/图像处理/实验课设/实验课设5.31/cell3.png"); //cvtColor(img1, img, CV_BGR2GRAY); Mat img_blur; cvtColor(img1, img_blur, CV_BGR2GRAY); Mat img_thresh1; threshold(img_blur, img_thresh1, 0, 255, THRESH_BINARY_INV + THRESH_OTSU); // 二值化 Mat img_thresh; Mat element = getStructuringElement(MORPH_RECT, Size(35, 35), Point(-1, -1)); erode(img_thresh1, img_thresh, element); //morphologyEx(img_thresh1, img_thresh, MORPH_OPEN, kernel); imshow("FUSHI", img_thresh); vector<vector<Point>> contours; findContours(img_thresh, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // 寻找轮廓 //findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); Mat img_contours = Mat::zeros(img_thresh.size(), CV_8UC3); Scalar color(0, 0, 255); drawContours(img_contours, contours, -1, color); cout << "细胞个数:" << contours.size() << endl; imshow("去噪后的图像", img_blur); imshow("二值化后的图像", img_thresh); imshow("轮廓", img_contours); waitKey(0); return 0; } OpenCV2015版本 把element函数中size()创建滑动控件createTrackbar x64
根据您提供的代码,我理解您的问题是如何使用 createTrackbar 函数来调整 element 的大小。
首先,element 是一个矩阵,用于图像形态学操作。在您的代码中,它是使用以下代码创建的:
```
Mat element = getStructuringElement(MORPH_RECT, Size(35, 35), Point(-1, -1));
```
要在滑动条上调整 element 的大小,您需要使用 createTrackbar 函数创建滑动条,并根据滑动条的值更新 element 的大小。
以下是一个简单的例子,将 createTrackbar 用于调整 element 的大小:
```cpp
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int element_size = 35; // 初始大小
Mat element;
void on_trackbar(int, void*) {
element = getStructuringElement(MORPH_RECT, Size(element_size, element_size), Point(-1, -1));
}
int main() {
Mat img1 = imread("cell3.png");
Mat img_blur;
cvtColor(img1, img_blur, CV_BGR2GRAY);
Mat img_thresh1;
threshold(img_blur, img_thresh1, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat img_thresh;
element = getStructuringElement(MORPH_RECT, Size(element_size, element_size), Point(-1, -1));
erode(img_thresh1, img_thresh, element);
namedWindow("FUSHI", WINDOW_NORMAL);
namedWindow("Trackbar", WINDOW_NORMAL);
createTrackbar("Element Size", "Trackbar", &element_size, 100, on_trackbar);
on_trackbar(element_size, 0);
while (true) {
imshow("FUSHI", img_thresh);
char c = waitKey(10);
if (c == 27) break;
}
return 0;
}
```
在这个例子中,我们使用了一个名为 "Trackbar" 的新窗口,并创建了一个名为 "Element Size" 的滑动条,将其最小值设置为 0,最大值设置为 100。我们还设置了一个回调函数 on_trackbar,每当滑动条的值发生变化时都会被调用。
在回调函数中,我们使用当前滑动条的值来更新 element 的大小,并在 "FUSHI" 窗口中显示处理后的图像。在主循环中,我们使用 waitKey 函数来等待用户按下 ESC 键或关闭窗口。
希望这可以帮助您解决问题。
阅读全文