使用现有编解码器ffmpeg,将原始视频进行H264编码、解码得到重建视频,基于visual studio的opencv。(待处理文件为YUV文件,名称为“YUV.yuv”)
时间: 2024-06-09 21:06:51 浏览: 170
使用ffmpeg将h.264转为yuv格式
4星 · 用户满意度95%
以下是基于Visual Studio的OpenCV代码,使用FFmpeg进行H264编码和解码,从YUV文件中读取原始视频,编码后解码得到重建视频:
```cpp
#include <iostream>
#include <opencv2/opencv.hpp>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavformat/avformat.h>
}
using namespace std;
using namespace cv;
int main()
{
// 读取YUV文件
string filename = "YUV.yuv";
FILE* fp = fopen(filename.c_str(), "rb");
if (fp == NULL)
{
cout << "Failed to open file: " << filename << endl;
return -1;
}
// 视频宽度和高度
int width = 640;
int height = 480;
// 帧率
int fps = 30;
// 创建编码器上下文
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
codecContext->width = width;
codecContext->height = height;
codecContext->bit_rate = 400000;
codecContext->time_base = { 1,fps };
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(codecContext, codec, NULL);
// 创建解码器上下文
AVCodecContext* decodeContext = avcodec_alloc_context3(codec);
decodeContext->width = width;
decodeContext->height = height;
decodeContext->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(decodeContext, codec, NULL);
// 创建存储编码数据的结构体
AVFrame* frame = av_frame_alloc();
frame->width = width;
frame->height = height;
frame->format = codecContext->pix_fmt;
int ret = av_frame_get_buffer(frame, 32);
if (ret < 0)
{
cout << "Failed to allocate frame buffer." << endl;
return -1;
}
// 创建存储解码数据的结构体
AVFrame* decodedFrame = av_frame_alloc();
decodedFrame->width = width;
decodedFrame->height = height;
decodedFrame->format = decodeContext->pix_fmt;
ret = av_frame_get_buffer(decodedFrame, 32);
if (ret < 0)
{
cout << "Failed to allocate frame buffer." << endl;
return -1;
}
// 创建存储编码数据的结构体
AVPacket* packet = av_packet_alloc();
av_init_packet(packet);
packet->data = NULL;
packet->size = 0;
// 创建SwsContext,用于YUV420P到RGB的转换
SwsContext* swsContext = sws_getContext(width, height, AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_BGR24, 0, NULL, NULL, NULL);
// 创建重建视频窗口
namedWindow("Reconstructed Video", WINDOW_NORMAL);
// 循环读取原始视频帧并编码
int frameIndex = 0;
Mat img(height, width, CV_8UC3);
while (true)
{
// 读取YUV帧
uint8_t* yuvData = new uint8_t[width * height * 3 / 2];
int bytesRead = fread(yuvData, 1, width * height * 3 / 2, fp);
if (bytesRead != width * height * 3 / 2)
{
break;
}
// 将YUV数据复制到AVFrame结构体中
uint8_t* data[AV_NUM_DATA_POINTERS] = { 0 };
data[0] = yuvData;
data[1] = yuvData + width * height;
data[2] = yuvData + width * height * 5 / 4;
int linesize[AV_NUM_DATA_POINTERS] = { 0 };
linesize[0] = width;
linesize[1] = width / 2;
linesize[2] = width / 2;
sws_scale(swsContext, data, linesize, 0, height, frame->data, frame->linesize);
// 设置AVFrame时间戳
frame->pts = frameIndex;
// 编码AVFrame
ret = avcodec_send_frame(codecContext, frame);
if (ret < 0)
{
cout << "Failed to send frame for encoding." << endl;
return -1;
}
while (ret >= 0)
{
ret = avcodec_receive_packet(codecContext, packet);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
cout << "Failed to encode packet." << endl;
return -1;
}
// 解码AVPacket
ret = avcodec_send_packet(decodeContext, packet);
if (ret < 0)
{
cout << "Failed to send packet for decoding." << endl;
return -1;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(decodeContext, decodedFrame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
break;
}
else if (ret < 0)
{
cout << "Failed to decode frame." << endl;
return -1;
}
// 将解码后的数据转换成Mat
Mat decodedImg(height, width, CV_8UC3);
uint8_t* decodedData[AV_NUM_DATA_POINTERS] = { 0 };
decodedData[0] = decodedImg.data;
int decodedLinesize[AV_NUM_DATA_POINTERS] = { 0 };
decodedLinesize[0] = width * 3;
sws_scale(swsContext, decodedFrame->data, decodedFrame->linesize, 0, height, decodedData, decodedLinesize);
// 显示重建视频
imshow("Reconstructed Video", decodedImg);
waitKey(1000 / fps);
av_packet_unref(packet);
}
}
delete[] yuvData;
frameIndex++;
}
// 释放资源
av_packet_free(&packet);
av_frame_free(&decodedFrame);
av_frame_free(&frame);
avcodec_free_context(&decodeContext);
avcodec_free_context(&codecContext);
sws_freeContext(swsContext);
fclose(fp);
return 0;
}
```
注意:本代码仅供参考,使用前需要仔细阅读并理解代码,确保代码符合自己的需求和环境。此外,本代码中使用的FFmpeg和OpenCV版本为:
- FFmpeg 4.4
- OpenCV 4.5.3
如果使用不同版本的FFmpeg和OpenCV,可能需要进行相应的修改。
阅读全文