ffmpeg 4.4 解码h264为yuv
时间: 2023-07-19 13:57:52 浏览: 133
你可以使用以下命令来将 H.264 视频文件解码为 YUV 格式:
```
ffmpeg -i input.h264 -c:v rawvideo -pix_fmt yuv420p output.yuv
```
该命令将输入文件 `input.h264` 解码为原始视频格式,并将输出保存为 `output.yuv` 文件。输出文件的像素格式为 YUV420(即每个像素由一个亮度值和两个色度值组成),这是一种常见的 YUV 格式。
请注意,解码 H.264 视频文件需要 FFmpeg 安装了支持 H.264 的解码器。如果你的 FFmpeg 版本不支持 H.264 解码,请先安装对应的解码器。
相关问题
使用现有编解码器ffmpeg,将原始视频进行H264编码、解码得到重建视频,基于visual studio的opencv。(待处理文件为YUV文件,名称为“YUV.yuv”)
以下是基于Visual Studio的OpenCV代码,使用FFmpeg进行H264编码和解码,从YUV文件中读取原始视频,编码后解码得到重建视频:
```cpp
#include <iostream>
#include <opencv2/opencv.hpp>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
}
using namespace std;
using namespace cv;
int main()
{
// 读取YUV文件
string filename = "YUV.yuv";
FILE* fp = fopen(filename.c_str(), "rb");
if (fp == NULL)
{
cout << "Failed to open file: " << filename << endl;
return -1;
}
// 视频宽度和高度
int width = 640;
int height = 480;
// 帧率
int fps = 30;
// 创建编码器上下文
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
codecContext->width = width;
codecContext->height = height;
codecContext->bit_rate = 400000;
codecContext->time_base = { 1,fps };
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(codecContext, codec, NULL);
// 创建解码器上下文
AVCodecContext* decodeContext = avcodec_alloc_context3(codec);
decodeContext->width = width;
decodeContext->height = height;
decodeContext->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(decodeContext, codec, NULL);
// 创建存储编码数据的结构体
AVFrame* frame = av_frame_alloc();
frame->width = width;
frame->height = height;
frame->format = codecContext->pix_fmt;
int ret = av_frame_get_buffer(frame, 32);
if (ret < 0)
{
cout << "Failed to allocate frame buffer." << endl;
return -1;
}
// 创建存储解码数据的结构体
AVFrame* decodedFrame = av_frame_alloc();
decodedFrame->width = width;
decodedFrame->height = height;
decodedFrame->format = decodeContext->pix_fmt;
ret = av_frame_get_buffer(decodedFrame, 32);
if (ret < 0)
{
cout << "Failed to allocate frame buffer." << endl;
return -1;
}
// 创建存储编码数据的结构体
AVPacket* packet = av_packet_alloc();
av_init_packet(packet);
packet->data = NULL;
packet->size = 0;
// 创建SwsContext,用于YUV420P到RGB的转换
SwsContext* swsContext = sws_getContext(width, height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_BGR24, 0, NULL, NULL, NULL);
// 创建重建视频窗口
namedWindow("Reconstructed Video", WINDOW_NORMAL);
// 循环读取原始视频帧并编码
int frameIndex = 0;
Mat img(height, width, CV_8UC3);
while (true)
{
// 读取YUV帧
uint8_t* yuvData = new uint8_t[width * height * 3 / 2];
int bytesRead = fread(yuvData, 1, width * height * 3 / 2, fp);
if (bytesRead != width * height * 3 / 2)
{
break;
}
// 将YUV数据复制到AVFrame结构体中
uint8_t* data[AV_NUM_DATA_POINTERS] = { 0 };
data[0] = yuvData;
data[1] = yuvData + width * height;
data[2] = yuvData + width * height * 5 / 4;
int linesize[AV_NUM_DATA_POINTERS] = { 0 };
linesize[0] = width;
linesize[1] = width / 2;
linesize[2] = width / 2;
sws_scale(swsContext, data, linesize, 0, height, frame->data, frame->linesize);
// 设置AVFrame时间戳
frame->pts = frameIndex;
// 编码AVFrame
ret = avcodec_send_frame(codecContext, frame);
if (ret < 0)
{
cout << "Failed to send frame for encoding." << endl;
return -1;
}
while (ret >= 0)
{
ret = avcodec_receive_packet(codecContext, packet);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
cout << "Failed to encode packet." << endl;
return -1;
}
// 解码AVPacket
ret = avcodec_send_packet(decodeContext, packet);
if (ret < 0)
{
cout << "Failed to send packet for decoding." << endl;
return -1;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(decodeContext, decodedFrame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
cout << "Failed to decode frame." << endl;
return -1;
}
// 将解码后的数据转换成Mat
Mat decodedImg(height, width, CV_8UC3);
uint8_t* decodedData[AV_NUM_DATA_POINTERS] = { 0 };
decodedData[0] = decodedImg.data;
int decodedLinesize[AV_NUM_DATA_POINTERS] = { 0 };
decodedLinesize[0] = width * 3;
sws_scale(swsContext, decodedFrame->data, decodedFrame->linesize, 0, height, decodedData, decodedLinesize);
// 显示重建视频
imshow("Reconstructed Video", decodedImg);
waitKey(1000 / fps);
av_packet_unref(packet);
}
}
delete[] yuvData;
frameIndex++;
}
// 释放资源
av_packet_free(&packet);
av_frame_free(&decodedFrame);
av_frame_free(&frame);
avcodec_free_context(&decodeContext);
avcodec_free_context(&codecContext);
sws_freeContext(swsContext);
fclose(fp);
return 0;
}
```
注意:本代码仅供参考,使用前需要仔细阅读并理解代码,确保代码符合自己的需求和环境。此外,本代码中使用的FFmpeg和OpenCV版本为:
- FFmpeg 4.4
- OpenCV 4.5.3
如果使用不同版本的FFmpeg和OpenCV,可能需要进行相应的修改。
使用FFmpeg6.0打开rtsp并给opencv
FFmpeg 6.0版本不存在,最新版本是4.4。无论哪个版本,使用FFmpeg打开rtsp流并将其传递给OpenCV库进行处理的一般步骤如下:
1. 引入FFmpeg和OpenCV库的头文件:
```c++
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <opencv2/opencv.hpp>
```
2. 初始化FFmpeg库:
```c++
av_register_all();
avformat_network_init();
```
3. 打开rtsp流并获取流信息:
```c++
AVFormatContext* formatContext = avformat_alloc_context();
if (avformat_open_input(&formatContext, "rtsp://xxx.xxx.xxx.xxx:xxxx/", NULL, NULL) != 0) {
avformat_free_context(formatContext);
return -1;
}
if (avformat_find_stream_info(formatContext, NULL) < 0) {
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
int videoStreamIndex = -1;
for (int i = 0; i < formatContext->nb_streams; i++) {
if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex == -1) {
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
AVStream* videoStream = formatContext->streams[videoStreamIndex];
AVCodecParameters* videoCodecParameters = videoStream->codecpar;
AVCodec* videoCodec = avcodec_find_decoder(videoCodecParameters->codec_id);
if (!videoCodec) {
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
AVCodecContext* videoCodecContext = avcodec_alloc_context3(videoCodec);
if (!videoCodecContext) {
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
if (avcodec_parameters_to_context(videoCodecContext, videoCodecParameters) < 0) {
avcodec_close(videoCodecContext);
avcodec_free_context(&videoCodecContext);
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
if (avcodec_open2(videoCodecContext, videoCodec, NULL) < 0) {
avcodec_close(videoCodecContext);
avcodec_free_context(&videoCodecContext);
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return -1;
}
```
4. 创建OpenCV窗口和图像缓冲区:
```c++
cv::namedWindow("Video", cv::WINDOW_NORMAL);
cv::Mat frame;
```
5. 解码rtsp流并将其传递给OpenCV进行处理:
```c++
AVPacket packet;
AVFrame* frameYUV = av_frame_alloc();
AVFrame* frameRGB = av_frame_alloc();
struct SwsContext* swsContext = sws_getContext(videoCodecContext->width, videoCodecContext->height, videoCodecContext->pix_fmt, videoCodecContext->width, videoCodecContext->height, AV_PIX_FMT_BGR24, SWS_BILINEAR, NULL, NULL, NULL);
while (av_read_frame(formatContext, &packet) >= 0) {
if (packet.stream_index == videoStreamIndex) {
avcodec_send_packet(videoCodecContext, &packet);
while (avcodec_receive_frame(videoCodecContext, frameYUV) == 0) {
sws_scale(swsContext, frameYUV->data, frameYUV->linesize, 0, videoCodecContext->height, frameRGB->data, frameRGB->linesize);
cv::Mat srcMat(videoCodecContext->height, videoCodecContext->width, CV_8UC3, frameRGB->data[0]);
cv::cvtColor(srcMat, frame, cv::COLOR_BGR2RGB);
cv::imshow("Video", frame);
cv::waitKey(1);
}
}
av_packet_unref(&packet);
}
```
6. 释放FFmpeg和OpenCV资源:
```c++
sws_freeContext(swsContext);
av_frame_free(&frameYUV);
av_frame_free(&frameRGB);
avcodec_close(videoCodecContext);
avcodec_free_context(&videoCodecContext);
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
cv::destroyAllWindows();
```
以上步骤提供了一个基本的框架,具体实现可能会因为不同的需求和环境而有所不同,需要根据实际情况进行调整和修改。
阅读全文