text_str = "{}|{}".format("限速", "50") 怎么使用cv2.putText正常显示
时间: 2023-11-10 17:07:24 浏览: 56
要使用cv2.putText正常显示文字,需要按照以下步骤进行操作:
1. 将图像转换为RGB格式,因为cv2.putText只能在RGB格式下工作。
2. 使用cv2.putText函数添加文字,其中需要指定文字内容、位置、字体、字体大小、颜色和线宽等参数。
3. 将图像转换为BGR格式,以便于cv2.imshow正常显示图像。
具体的代码实现如下:
```
import cv2
# 读取图像
img = cv2.imread('image.jpg')
# 将图像转换为RGB格式
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 添加文字
text_str = "{}|{}".format("限速", "50")
cv2.putText(img, text_str, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# 将图像转换为BGR格式
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# 显示图像
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
相关问题
from IPython.display import clear_output, Image import time import cv2 from PIL import Image as PILImage import io current_time = 0 def processImg(img): # 画出一个框 cv2.rectangle(img, (500, 300), (800,400), (0, 0, 255), 5) # 显示FPS global current_time if current_time == 0: current_time = time.time() else: last_time = current_time current_time = time.time() fps = 1. / (current_time - last_time) text = "FPS: %d" % int(fps) cv2.putText(img, text, (0, 100), cv2.FONT_HERSHEY_TRIPLEX, 3.65, (255, 0, 0), 2) return img def arrayShow(imageArray): pil_image = PILImage.fromarray(imageArray) # 将图像转换为字节数据 io.BytesIO() pil_image.save(stream, format='PNG') display(Image(stream.getvalue())) video_url = "http://192.168.50.180/mjpeg/1" video = cv2.VideoCapture(video_url) while True: try: clear=True ret, frame = video.read() if not ret: break lines, columns, _ = frame.shape frame = processImg(frame) frame = cv2.resize(frame, (int(columns / 4), int(lines / 4))) arrayShow(frame) time.sleep(0.02) except KeyboardInterrupt: video.release()帮我修改一下这份代码
好的,我注意到你想要在Jupyter Notebook中显示实时视频流,并在视频帧上绘制一个框和FPS信息。以下是修改后的代码:
```python
from IPython.display import clear_output, Image
import time
import cv2
from PIL import Image as PILImage
import io
current_time = 0
def processImg(img):
# 绘制一个框
cv2.rectangle(img, (500, 300), (800,400), (0, 0, 255), 5)
# 显示FPS
global current_time
if current_time == 0:
current_time = time.time()
else:
last_time = current_time
current_time = time.time()
fps = 1. / (current_time - last_time)
text = "FPS: %d" % int(fps)
cv2.putText(img, text, (0, 100), cv2.FONT_HERSHEY_TRIPLEX, 3.65, (255, 0, 0), 2)
return img
def arrayShow(imageArray):
pil_image = PILImage.fromarray(imageArray)
# 将图像转换为字节数据
stream = io.BytesIO()
pil_image.save(stream, format='PNG')
display(Image(stream.getvalue()))
video_url = "http://192.168.50.180/mjpeg/1"
video = cv2.VideoCapture(video_url)
try:
while True:
clear=True
ret, frame = video.read()
if not ret:
break
lines, columns, _ = frame.shape
frame = processImg(frame)
frame = cv2.resize(frame, (int(columns / 4), int(lines / 4)))
arrayShow(frame)
time.sleep(0.02)
except KeyboardInterrupt:
video.release()
clear_output()
```
这个修改后的代码修复了几个问题:
1. 在`arrayShow()`函数中,修复了保存图像字节数据的错误。添加了缺少的`stream`变量,并将其传递给`Image()`函数。
2. 在主循环中,添加了对`KeyboardInterrupt`异常的处理,以便在按下键盘中断时正确释放视频捕获对象,并清除输出。
请尝试运行修改后的代码,看看是否能够正常显示视频流并绘制框和FPS信息。如果还有其他问题,请提供更多信息,我会尽力帮助你解决。
import cv2 import mediapipe as mp # 初始化mediapipe mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose # 初始化摄像头 cap = cv2.VideoCapture(0) with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") continue # 转换图像颜色空间 image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # 执行姿态估计 results = pose.process(image) # 绘制姿态估计结果 image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2), mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2)) # 标注关键点编号 for index, landmark in enumerate(results.pose_landmarks.landmark): height, width, _ = image.shape cx, cy = int(landmark.x * width), int(landmark.y * height) cv2.putText(image, str(index), (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1) # 显示姿态估计结果 cv2.imshow('MediaPipe Pose', image) if cv2.waitKey(5) & 0xFF == 27: break # 释放摄像头资源和窗口 cap.release() cv2.destroyAllWindows()在以上代码基础上加上关键点与相邻关键点的角度
要在以上代码基础上加上关键点与相邻关键点的角度,可以在循环中添加以下代码:
```
# 计算关键点的角度
angles = []
for i in range(len(results.pose_landmarks.landmark)-2):
p1 = results.pose_landmarks.landmark[i]
p2 = results.pose_landmarks.landmark[i+1]
p3 = results.pose_landmarks.landmark[i+2]
angle = mp_pose.PoseLandmark.get_angle(p1, p2, p3)
angles.append(angle)
# 显示角度信息
for i, angle in enumerate(angles):
cv2.putText(image, f"Angle {i+1}: {angle:.2f}", (20, (i+1)*20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
```
这段代码会遍历每个关键点,计算它与相邻的两个关键点之间的角度,并将角度存储在一个列表中。然后,代码会将每个角度信息显示在图像中。其中,`mp_pose.PoseLandmark.get_angle(p1, p2, p3)`函数可以计算由三个关键点组成的夹角。
完整代码如下:
```
import cv2
import mediapipe as mp
# 初始化mediapipe
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
# 初始化摄像头
cap = cv2.VideoCapture(0)
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
# 转换图像颜色空间
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 执行姿态估计
results = pose.process(image)
# 绘制姿态估计结果
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, circle_radius=2))
# 标注关键点编号
for index, landmark in enumerate(results.pose_landmarks.landmark):
height, width, _ = image.shape
cx, cy = int(landmark.x * width), int(landmark.y * height)
cv2.putText(image, str(index), (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
# 计算关键点的角度
angles = []
for i in range(len(results.pose_landmarks.landmark)-2):
p1 = results.pose_landmarks.landmark[i]
p2 = results.pose_landmarks.landmark[i+1]
p3 = results.pose_landmarks.landmark[i+2]
angle = mp_pose.PoseLandmark.get_angle(p1, p2, p3)
angles.append(angle)
# 显示角度信息
for i, angle in enumerate(angles):
cv2.putText(image, f"Angle {i+1}: {angle:.2f}", (20, (i+1)*20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
# 显示姿态估计结果
cv2.imshow('MediaPipe Pose', image)
if cv2.waitKey(5) & 0xFF == 27:
break
# 释放摄像头资源和窗口
cap.release()
cv2.destroyAllWindows()
```
阅读全文