ffmpeg c++ rgb 转h264
时间: 2023-10-05 10:11:07 浏览: 303
利用ffmpeg,将rgb编码为h264
要将 RGB 图像转换为 H.264 视频,您可以使用 FFmpeg 库。下面是一个简单的 C++ 代码示例,演示如何使用 FFmpeg 库将 RGB 图像编码为 H.264 视频:
```cpp
#include <iostream>
#include <fstream>
#include <cstring>
#include <cmath>
#include <stdint.h>
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
}
using namespace std;
int main(int argc, char** argv) {
// 1. Initialize FFmpeg
av_register_all();
// 2. Open output file
AVFormatContext* formatContext = nullptr;
avformat_alloc_output_context2(&formatContext, nullptr, nullptr, "output.mp4");
if (!formatContext) {
cout << "Failed to open output file" << endl;
return -1;
}
// 3. Find video encoder
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
cout << "Failed to find video encoder" << endl;
return -1;
}
// 4. Create new video stream
AVStream* stream = avformat_new_stream(formatContext, codec);
if (!stream) {
cout << "Failed to create new video stream" << endl;
return -1;
}
// 5. Set video stream parameters
stream->codecpar->codec_id = codec->id;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->width = 640;
stream->codecpar->height = 480;
stream->codecpar->format = AV_PIX_FMT_YUV420P;
stream->time_base = { 1, 25 }; // 25 fps
// 6. Open video encoder
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(codecContext, stream->codecpar);
if (avcodec_open2(codecContext, codec, nullptr) < 0) {
cout << "Failed to open video encoder" << endl;
return -1;
}
// 7. Allocate frame buffers
AVFrame* frame = av_frame_alloc();
frame->format = AV_PIX_FMT_RGB24;
frame->width = 640;
frame->height = 480;
av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, AV_PIX_FMT_RGB24, 1);
AVFrame* frameYUV = av_frame_alloc();
frameYUV->format = AV_PIX_FMT_YUV420P;
frameYUV->width = 640;
frameYUV->height = 480;
av_image_alloc(frameYUV->data, frameYUV->linesize, frameYUV->width, frameYUV->height, AV_PIX_FMT_YUV420P, 1);
// 8. Convert RGB to YUV
SwsContext* swsContext = sws_getContext(frame->width, frame->height, AV_PIX_FMT_RGB24,
frameYUV->width, frameYUV->height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, nullptr, nullptr, nullptr);
if (!swsContext) {
cout << "Failed to create SwsContext" << endl;
return -1;
}
// 9. Write header to output file
avformat_write_header(formatContext, nullptr);
// 10. Encode and write video frames
uint8_t* buffer = new uint8_t[640 * 480 * 3];
for (int i = 0; i < 100; i++) {
// Generate RGB image
for (int y = 0; y < 480; y++) {
for (int x = 0; x < 640; x++) {
buffer[y * 640 * 3 + x * 3 + 0] = (uint8_t)(sin(x / 10.0 + i / 10.0) * 128 + 128);
buffer[y * 640 * 3 + x * 3 + 1] = (uint8_t)(sin(y / 10.0 + i / 7.0) * 128 + 128);
buffer[y * 640 * 3 + x * 3 + 2] = (uint8_t)(sin(x / 7.0 + y / 10.0 + i / 5.0) * 128 + 128);
}
}
// Convert RGB to YUV
memcpy(frame->data[0], buffer, 640 * 480 * 3);
sws_scale(swsContext, frame->data, frame->linesize, 0, frame->height, frameYUV->data, frameYUV->linesize);
// Encode and write video frame
AVPacket packet;
av_init_packet(&packet);
packet.data = nullptr;
packet.size = 0;
frameYUV->pts = i;
avcodec_send_frame(codecContext, frameYUV);
while (avcodec_receive_packet(codecContext, &packet) == 0) {
av_interleaved_write_frame(formatContext, &packet);
av_packet_unref(&packet);
}
}
// 11. Write trailer to output file
av_write_trailer(formatContext);
// 12. Cleanup
avformat_free_context(formatContext);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_frame_free(&frameYUV);
sws_freeContext(swsContext);
delete[] buffer;
return 0;
}
```
在上面的代码中,我们首先初始化 FFmpeg 库。然后打开一个输出文件,指定 H.264 视频编码器,并创建一个新的视频流。接下来,我们设置视频流的参数,包括视频分辨率,帧率和像素格式等。然后打开视频编码器,并分配 RGB 和 YUV 帧缓冲区。我们使用 SwsContext 将 RGB 帧转换为 YUV 帧,并将它们编码为 H.264 视频帧,并将它们写入输出文件。最后,我们清理并关闭所有资源。
请注意,这只是一个简单的示例,实际使用中还需要进行更多的错误检查和异常处理。
阅读全文