#include <iostream> #include <opencv2/highgui.hpp> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/opencv.hpp> using namespace cv; using namespace std; int main() { Mat img = imread("F:\\图像处理\\图片\\待修复图像.png"); if (img.empty()) { cout << "请检查文件名称是否有误!" << endl; return -1; } imshow("img", img); //转化为灰度图 Mat gray; cvtColor(img, gray, COLOR_BGR2GRAY); //通过阈值处理生成Mask掩码 Mat imgMask; threshold(gray, imgMask, 245, 255, THRESH_BINARY); //对Mask掩码膨胀处理,增加Mask的面积 Mat Kernel = getStructuringElement(MORPH_RECT, Size(3, 3)); dilate(imgMask, imgMask, Kernel); //图像修复 Mat imgInpaint; inpaint(img, imgMask, imgInpaint, 5, INPAINT_NS); //显示处理结果 imshow("imgMask", imgMask); imshow("img修复后", imgInpaint); waitKey(0); return 0; }
时间: 2024-03-19 18:41:44 浏览: 136
import cv2
img = cv2.imread("F:\\图像处理\\图片\\待修复图像.png")
if img is None:
print("请检查文件名称是否有误!")
else:
cv2.imshow("img", img)
# 转化为灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 通过阈值处理生成Mask掩码
ret, imgMask = cv2.threshold(gray, 245, 255, cv2.THRESH_BINARY)
# 对Mask掩码膨胀处理,增加Mask的面积
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgMask = cv2.dilate(imgMask, kernel)
# 图像修复
imgInpaint = cv2.inpaint(img, imgMask, 5, cv2.INPAINT_NS)
# 显示处理结果
cv2.imshow("imgMask", imgMask)
cv2.imshow("img修复后", imgInpaint)
cv2.waitKey(0)
cv2.destroyAllWindows()
相关问题
#include <iostream> #include <opencv2/imgcodecs.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/videoio.hpp> #include <opencv2/highgui.hpp> #include <opencv2/video.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui_c.h> using namespace cv; using namespace std; int main(int argc, char** argv) { VideoCapture capture("D:/dvp/sample/dataset/traffic.mp4"); if (!capture.isOpened()) { //error in opening the video input cerr << "Unable to open file!" << endl; return 0; } Mat frame, roi, hsv_roi, mask; // take first frame of the video capture >> frame; // setup initial location of window Rect track_window(300, 200, 100, 50); // simply hardcoded the values // set up the ROI for tracking roi = frame(track_window); cvtColor(roi, hsv_roi, COLOR_BGR2HSV); inRange(hsv_roi, Scalar(0, 60, 32), Scalar(180, 255, 255), mask); float range_[] = { 0, 180 }; const float* range[] = { range_ }; Mat roi_hist; int histSize[] = { 180 }; int channels[] = { 0 }; calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range); normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX); // Setup the termination criteria, either 10 iteration or move by atleast 1 pt TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1); while (true) { Mat hsv, dst; capture >> frame; if (frame.empty()) break; cvtColor(frame, hsv, COLOR_BGR2HSV); calcBackProject(&hsv, 1, channels, roi_hist, dst, range); // apply meanshift to get the new location meanShift(dst, track_window, term_crit); // Draw it on image rectangle(frame, track_window, 255, 2); imshow("img2", frame); setMouseCallback("img2", onMouse, 0); int keyboard = waitKey(30); if (keyboard == 'q' || keyboard == 27) break; } }帮我更改此段代码,使其能够通过gui使用鼠标来框选指定区域
以下是修改后的代码,可以通过GUI使用鼠标来框选指定区域:
```
#include <iostream>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui_c.h>
using namespace cv;
using namespace std;
Rect box; //定义一个矩形框,用于记录鼠标选择的区域
bool drawing_box = false; //标记是否正在通过鼠标绘制矩形框
void draw_box(Mat& img, Rect box)
{
//绘制矩形框
rectangle(img, box.tl(), box.br(), Scalar(0, 0, 255), 2);
}
void onMouse(int event, int x, int y, int flags, void* param)
{
Mat& image = *(Mat*)param;
switch (event)
{
case EVENT_MOUSEMOVE:
if (drawing_box)
{
//更新矩形框位置
box.width = x - box.x;
box.height = y - box.y;
}
break;
case EVENT_LBUTTONDOWN:
//开始绘制矩形框
drawing_box = true;
box = Rect(x, y, 0, 0);
break;
case EVENT_LBUTTONUP:
//结束绘制矩形框
drawing_box = false;
if (box.width < 0)
{
box.x += box.width;
box.width *= -1;
}
if (box.height < 0)
{
box.y += box.height;
box.height *= -1;
}
draw_box(image, box);
break;
}
}
int main(int argc, char** argv)
{
VideoCapture capture("D:/dvp/sample/dataset/traffic.mp4");
if (!capture.isOpened()) {
//error in opening the video input
cerr << "Unable to open file!" << endl;
return 0;
}
Mat frame, roi, hsv_roi, mask;
// take first frame of the video
capture >> frame;
namedWindow("img2");
setMouseCallback("img2", onMouse, &frame);
while (true) {
capture >> frame;
if (frame.empty()) break;
if (!box.empty()) {
// set up the ROI for tracking
roi = frame(box);
cvtColor(roi, hsv_roi, COLOR_BGR2HSV);
inRange(hsv_roi, Scalar(0, 60, 32), Scalar(180, 255, 255), mask);
float range_[] = { 0, 180 };
const float* range[] = { range_ };
Mat roi_hist;
int histSize[] = { 180 };
int channels[] = { 0 };
calcHist(&hsv_roi, 1, channels, mask, roi_hist, 1, histSize, range);
normalize(roi_hist, roi_hist, 0, 255, NORM_MINMAX);
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
TermCriteria term_crit(TermCriteria::EPS | TermCriteria::COUNT, 10, 1);
Mat hsv, dst;
cvtColor(frame, hsv, COLOR_BGR2HSV);
calcBackProject(&hsv, 1, channels, roi_hist, dst, range);
// apply meanshift to get the new location
meanShift(dst, box, term_crit);
// Draw it on image
rectangle(frame, box, 255, 2);
}
imshow("img2", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27) break;
}
return 0;
}
```
在主函数中,我们添加了一个名为“img2”的窗口,并将其用作鼠标事件的回调函数。在主循环中,我们检查矩形框是否已经被绘制,如果是,则设置ROI并进行跟踪。最后,在每个循环迭代中,我们都显示图像,并等待用户按下“q”或“ESC”键退出程序。
#include<iostream> #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> using namespace cv; using namespace std; int main() { Mat img = imread("C:\\Users\\Lenovo\\Pictures\\Saved Pictures\\QQ图片20230712174241.jpg", 0); Mat dst1, dst2; Sobel(img, dst1, CV_16S, 1, 0); //第五个参数为0,表示y方向不进行卷积运算 Sobel(img, dst2, CV_16S, 0, 1); //第四个参数为0,表示x方向不进行卷积运算 convertScaleAbs(dst1, dst1); convertScaleAbs(dst2, dst2); namedWindow("1", WINDOW_NORMAL); namedWindow("2", WINDOW_NORMAL); imshow("1", dst1); imshow("2", dst2); Mat dst; dst = Mat(dst1.size(), dst1.type()); int cols = dst1.cols, rows = dst2.rows; for (int i = 0; i < cols; i++) { for (int j = 0; j < rows; j++) { int x = dst.at<uchar>(j, i); int y = dst.at<uchar>(j, i); int xy = x + y; dst.at<uchar>(j, i) = saturate_cast<uchar>(xy); } } namedWindow("3", WINDOW_NORMAL); imshow("3", dst); waitKey(0); }
这段代码是使用OpenCV库进行图像处理的示例代码。它使用Sobel算子对输入的灰度图像进行边缘检测,并将水平和垂直方向的边缘图像进行叠加,最后显示处理后的图像。
首先,它包含了必要的头文件,并使用了cv和std命名空间。
然后,在主函数中,它读取了一张灰度图像作为输入。你可以根据你的需要修改文件路径。
接下来,它定义了两个Mat对象dst1和dst2,分别用于存储水平和垂直方向的边缘图像。
然后,它调用了Sobel函数两次,分别计算水平和垂直方向的边缘图像。Sobel函数的第三个参数是输出图像的数据类型,CV_16S表示输出结果为16位有符号整数。第四个和第五个参数分别表示在x方向和y方向进行卷积运算。
接下来,它使用convertScaleAbs函数将dst1和dst2转换为8位无符号整数类型的图像。
然后,它创建了三个窗口用于显示结果,并使用imshow函数显示图像。
然后,它创建了一个与dst1相同大小和类型的Mat对象dst。
接下来,它使用两个嵌套的for循环遍历dst1和dst2的每个像素,并将对应位置的像素值相加,并将结果存储在dst中。
最后,它使用imshow函数显示叠加后的图像,并使用waitKey函数等待用户按下任意键退出程序。
请注意,这只是一个简单的示例代码,你可以根据你的需求进行修改和扩展。
阅读全文