c语言函数实现h264帧转yuv帧
时间: 2023-09-28 21:08:06 浏览: 377
实现 H264 帧转 YUV 帧的过程,可以分为如下几个步骤:
1. 读取 H264 数据,将其解码成原始的 YUV 数据。
2. 将 YUV 数据存储到文件中。
下面是一个示例代码:
```c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#define INBUF_SIZE 4096
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVPacket packet;
AVFrame *pFrame = NULL;
AVFrame *pFrameYUV = NULL;
uint8_t *buffer = NULL;
int videoStream = -1;
int frameFinished = 0;
int numBytes = 0;
struct SwsContext *img_convert_ctx = NULL;
if (argc < 3) {
printf("Usage: %s <input_file> <output_file>\n", argv[0]);
return -1;
}
av_register_all();
if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
printf("Error: Couldn't open input file.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Error: Couldn't find stream information.\n");
return -1;
}
for (int i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
}
if (videoStream == -1) {
printf("Error: Couldn't find video stream.\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(NULL);
if (!pCodecCtx) {
printf("Error: Couldn't allocate codec context.\n");
return -1;
}
avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoStream]->codecpar);
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Error: Couldn't find codec.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Error: Couldn't open codec.\n");
return -1;
}
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
if (!pFrame || !pFrameYUV) {
printf("Error: Couldn't allocate frames.\n");
return -1;
}
numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
if (!buffer) {
printf("Error: Couldn't allocate buffer.\n");
return -1;
}
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, 0, NULL, NULL, NULL);
if (!img_convert_ctx) {
printf("Error: Couldn't initialize conversion context.\n");
return -1;
}
FILE *fp_out = fopen(argv[2], "wb");
if (!fp_out) {
printf("Error: Couldn't open output file.\n");
return -1;
}
while (av_read_frame(pFormatCtx, &packet) >= 0) {
if (packet.stream_index == videoStream) {
avcodec_send_packet(pCodecCtx, &packet);
while (avcodec_receive_frame(pCodecCtx, pFrame) == 0) {
sws_scale(img_convert_ctx, (const uint8_t *const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
fwrite(pFrameYUV->data[0], 1, pCodecCtx->width * pCodecCtx->height, fp_out);
fwrite(pFrameYUV->data[1], 1, pCodecCtx->width * pCodecCtx->height / 4, fp_out);
fwrite(pFrameYUV->data[2], 1, pCodecCtx->width * pCodecCtx->height / 4, fp_out);
}
av_packet_unref(&packet);
}
}
fclose(fp_out);
av_free(buffer);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
```
其中,`AVFormatContext` 用于存储输入文件的格式信息,`AVCodecContext` 用于存储编解码器的上下文信息,`AVCodec` 用于存储编解码器的信息,`AVPacket` 用于存储解码后的数据帧,`AVFrame` 用于存储解码后的数据帧,`SwsContext` 用于存储图像转换上下文信息。
阅读全文