av_frame_get_buffer(outputFrame, 32) < 0)
时间: 2024-06-05 20:09:26 浏览: 10
这段代码是用来获取输出帧的缓冲区的,其中的参数32是缓冲区对齐的大小,函数返回值小于0则表示获取失败。
具体来说,av_frame_get_buffer()函数会根据输出帧的宽度、高度、像素格式等信息来计算所需的缓冲区大小,并分配一块大小合适的内存作为输出帧的缓冲区。参数32是为了保证缓冲区的对齐,这可以提高内存读写的效率。
如果函数返回值小于0,则说明获取缓冲区失败,可能是因为内存不足或者其他原因导致的。在这种情况下,需要检查代码中的内存分配和释放是否正确,以及其他相关的错误。
相关问题
ffmpeg MP4转h264 cpp代码
以下是使用FFmpeg将MP4视频转换为H.264视频的C++代码示例:
```
#include <iostream>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
}
using namespace std;
int main(int argc, char* argv[]) {
AVFormatContext* inputFormatContext = nullptr;
AVCodecContext* inputCodecContext = nullptr;
AVCodec* inputCodec = nullptr;
AVPacket inputPacket;
AVFrame* inputFrame = nullptr;
AVFormatContext* outputFormatContext = nullptr;
AVCodecContext* outputCodecContext = nullptr;
AVCodec* outputCodec = nullptr;
AVPacket outputPacket;
AVFrame* outputFrame = nullptr;
const char* inputFilePath = "input.mp4";
const char* outputFilePath = "output.h264";
// Open input file
if (avformat_open_input(&inputFormatContext, inputFilePath, nullptr, nullptr) != 0) {
cerr << "Failed to open input file: " << inputFilePath << endl;
return -1;
}
// Retrieve stream information
if (avformat_find_stream_info(inputFormatContext, nullptr) < 0) {
cerr << "Failed to retrieve input stream information" << endl;
return -1;
}
// Find video stream index
int videoStreamIndex = -1;
for (int i = 0; i < inputFormatContext->nb_streams; i++) {
if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex == -1) {
cerr << "Failed to find video stream" << endl;
return -1;
}
// Get video codec parameters
AVCodecParameters* inputCodecParameters = inputFormatContext->streams[videoStreamIndex]->codecpar;
// Find video codec
inputCodec = avcodec_find_decoder(inputCodecParameters->codec_id);
if (inputCodec == nullptr) {
cerr << "Failed to find input video codec" << endl;
return -1;
}
// Allocate input codec context
inputCodecContext = avcodec_alloc_context3(inputCodec);
if (inputCodecContext == nullptr) {
cerr << "Failed to allocate input codec context" << endl;
return -1;
}
// Copy codec parameters to input codec context
if (avcodec_parameters_to_context(inputCodecContext, inputCodecParameters) < 0) {
cerr << "Failed to copy input codec parameters to context" << endl;
return -1;
}
// Open input codec
if (avcodec_open2(inputCodecContext, inputCodec, nullptr) < 0) {
cerr << "Failed to open input codec" << endl;
return -1;
}
// Allocate input frame
inputFrame = av_frame_alloc();
if (inputFrame == nullptr) {
cerr << "Failed to allocate input frame" << endl;
return -1;
}
// Open output file
if (avformat_alloc_output_context2(&outputFormatContext, nullptr, nullptr, outputFilePath) < 0) {
cerr << "Failed to allocate output format context" << endl;
return -1;
}
// Find output video codec
outputCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (outputCodec == nullptr) {
cerr << "Failed to find output video codec" << endl;
return -1;
}
// Allocate output codec context
outputCodecContext = avcodec_alloc_context3(outputCodec);
if (outputCodecContext == nullptr) {
cerr << "Failed to allocate output codec context" << endl;
return -1;
}
// Set output codec parameters
outputCodecContext->bit_rate = inputCodecContext->bit_rate;
outputCodecContext->width = inputCodecContext->width;
outputCodecContext->height = inputCodecContext->height;
outputCodecContext->time_base = inputCodecContext->time_base;
outputCodecContext->framerate = inputCodecContext->framerate;
outputCodecContext->gop_size = inputCodecContext->gop_size;
outputCodecContext->max_b_frames = inputCodecContext->max_b_frames;
outputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
// Open output codec
if (avcodec_open2(outputCodecContext, outputCodec, nullptr) < 0) {
cerr << "Failed to open output codec" << endl;
return -1;
}
// Allocate output frame
outputFrame = av_frame_alloc();
if (outputFrame == nullptr) {
cerr << "Failed to allocate output frame" << endl;
return -1;
}
outputFrame->format = outputCodecContext->pix_fmt;
outputFrame->width = outputCodecContext->width;
outputFrame->height = outputCodecContext->height;
if (av_frame_get_buffer(outputFrame, 0) < 0) {
cerr << "Failed to allocate output frame buffer" << endl;
return -1;
}
// Write output file header
if (avformat_write_header(outputFormatContext, nullptr) < 0) {
cerr << "Failed to write output file header" << endl;
return -1;
}
// Read input file frames
while (av_read_frame(inputFormatContext, &inputPacket) == 0) {
if (inputPacket.stream_index == videoStreamIndex) {
// Decode input packet
if (avcodec_send_packet(inputCodecContext, &inputPacket) < 0) {
cerr << "Failed to send input packet for decoding" << endl;
return -1;
}
while (avcodec_receive_frame(inputCodecContext, inputFrame) == 0) {
// Convert input frame to output frame
if (sws_scale(
sws_getContext(inputCodecContext->width, inputCodecContext->height, inputCodecContext->pix_fmt,
outputCodecContext->width, outputCodecContext->height, outputCodecContext->pix_fmt,
SWS_BILINEAR, nullptr, nullptr, nullptr),
inputFrame->data, inputFrame->linesize, 0, inputCodecContext->height,
outputFrame->data, outputFrame->linesize) <= 0) {
cerr << "Failed to convert input frame to output frame" << endl;
return -1;
}
// Encode output frame
if (avcodec_send_frame(outputCodecContext, outputFrame) < 0) {
cerr << "Failed to send output frame for encoding" << endl;
return -1;
}
while (avcodec_receive_packet(outputCodecContext, &outputPacket) == 0) {
// Write output packet to file
if (av_write_frame(outputFormatContext, &outputPacket) < 0) {
cerr << "Failed to write output packet to file" << endl;
return -1;
}
av_packet_unref(&outputPacket);
}
}
}
av_packet_unref(&inputPacket);
}
// Flush output codec
if (avcodec_send_frame(outputCodecContext, nullptr) < 0) {
cerr << "Failed to send NULL frame for flushing output codec" << endl;
return -1;
}
while (avcodec_receive_packet(outputCodecContext, &outputPacket) == 0) {
// Write output packet to file
if (av_write_frame(outputFormatContext, &outputPacket) < 0) {
cerr << "Failed to write output packet to file" << endl;
return -1;
}
av_packet_unref(&outputPacket);
}
// Write output file trailer
if (av_write_trailer(outputFormatContext) < 0) {
cerr << "Failed to write output file trailer" << endl;
return -1;
}
// Clean up
av_frame_free(&inputFrame);
avcodec_free_context(&inputCodecContext);
avformat_close_input(&inputFormatContext);
av_frame_free(&outputFrame);
avcodec_free_context(&outputCodecContext);
avformat_free_context(outputFormatContext);
return 0;
}
```
此示例假设输入文件为H.264编码,输出文件为H.264编码。如果输入文件为其他编码格式,则可能需要更改示例中的某些参数。此外,使用FFmpeg时需要包含相应的库文件和头文件。
C++ffmpeg视频编码
以下是使用C++和FFmpeg进行视频编码的基本步骤:
1.导入FFmpeg库并初始化:
```c++
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
int main(int argc, char* argv[]) {
av_register_all();
avcodec_register_all();
avformat_network_init();
return 0;
}
```
2.打开输入文件并获取输入格式:
```c++
AVFormatContext* inputFormatContext = NULL;
if (avformat_open_input(&inputFormatContext, inputFileName, NULL, NULL) != 0) {
std::cout << "Could not open input file " << inputFileName << std::endl;
return -1;
}
if (avformat_find_stream_info(inputFormatContext, NULL) < 0) {
std::cout << "Could not find stream information" << std::endl;
return -1;
}
```
3.查找视频流并获取编解码器:
```c++
AVCodec* codec = NULL;
int videoStreamIndex = -1;
for (int i = 0; i < inputFormatContext->nb_streams; i++) {
if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
codec = avcodec_find_decoder(inputFormatContext->streams[i]->codecpar->codec_id);
if (codec == NULL) {
std::cout << "Unsupported codec!" << std::endl;
return -1;
}
break;
}
}
if (videoStreamIndex == -1) {
std::cout << "Could not find video stream!" << std::endl;
return -1;
}
AVCodecContext* codecContext = avcodec_alloc_context3(codec);
if (codecContext == NULL) {
std::cout << "Could not allocate codec context!" << std::endl;
return -1;
}
if (avcodec_parameters_to_context(codecContext, inputFormatContext->streams[videoStreamIndex]->codecpar) < 0) {
std::cout << "Could not copy codec parameters to codec context!" << std::endl;
return -1;
}
if (avcodec_open2(codecContext, codec, NULL) < 0) {
std::cout << "Could not open codec!" << std::endl;
return -1;
}
```
4.创建输出文件并获取输出格式:
```c++
AVFormatContext* outputFormatContext = NULL;
if (avformat_alloc_output_context2(&outputFormatContext, NULL, NULL, outputFileName) < 0) {
std::cout << "Could not allocate output format context!" << std::endl;
return -1;
}
AVOutputFormat* outputFormat = outputFormatContext->oformat;
if (outputFormat->video_codec == AV_CODEC_ID_NONE) {
std::cout << "Could not find suitable video codec!" << std::endl;
return -1;
}
AVStream* outputStream = avformat_new_stream(outputFormatContext, NULL);
if (outputStream == NULL) {
std::cout << "Could not allocate output stream!" << std::endl;
return -1;
}
AVCodecContext* outputCodecContext = avcodec_alloc_context3(NULL);
if (outputCodecContext == NULL) {
std::cout << "Could not allocate output codec context!" << std::endl;
return -1;
}
outputCodecContext->codec_id = outputFormat->video_codec;
outputCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outputCodecContext->width = codecContext->width;
outputCodecContext->height = codecContext->height;
outputCodecContext->time_base = codecContext->time_base;
outputCodecContext->bit_rate = 400000;
outputCodecContext->gop_size = 10;
outputCodecContext->max_b_frames = 1;
if (outputFormatContext->oformat->flags & AVFMT_GLOBALHEADER) {
outputCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avcodec_open2(outputCodecContext, NULL, NULL) < 0) {
std::cout << "Could not open output codec!" << std::endl;
return -1;
}
if (avcodec_parameters_from_context(outputStream->codecpar, outputCodecContext) < 0) {
std::cout << "Could not copy codec parameters to output stream!" << std::endl;
return -1;
}
```
5.创建SwsContext并分配缓冲区:
```c++
SwsContext* swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, outputCodecContext->width, outputCodecContext->height, outputCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
if (swsContext == NULL) {
std::cout << "Could not create SwsContext!" << std::endl;
return -1;
}
AVFrame* inputFrame = av_frame_alloc();
if (inputFrame == NULL) {
std::cout << "Could not allocate input frame!" << std::endl;
return -1;
}
AVFrame* outputFrame = av_frame_alloc();
if (outputFrame == NULL) {
std::cout << "Could not allocate output frame!" << std::endl;
return -1;
}
outputFrame->format = outputCodecContext->pix_fmt;
outputFrame->width = outputCodecContext->width;
outputFrame->height = outputCodecContext->height;
if (av_frame_get_buffer(outputFrame, 32) < 0) {
std::cout << "Could not allocate output frame buffer!" << std::endl;
return -1;
}
```
6.读取输入文件并编码输出文件:
```c++
AVPacket packet;
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
while (av_read_frame(inputFormatContext, &packet) >= 0) {
if (packet.stream_index == videoStreamIndex) {
if (avcodec_send_packet(codecContext, &packet) < 0) {
std::cout << "Error sending a packet for decoding!" << std::endl;
break;
}
while (avcodec_receive_frame(codecContext, inputFrame) == 0) {
sws_scale(swsContext, inputFrame->data, inputFrame->linesize, 0, codecContext->height, outputFrame->data, outputFrame->linesize);
outputFrame->pts = av_rescale_q(inputFrame->pts, codecContext->time_base, outputCodecContext->time_base);
if (avcodec_send_frame(outputCodecContext, outputFrame) < 0) {
std::cout << "Error sending a frame for encoding!" << std::endl;
break;
}
while (avcodec_receive_packet(outputCodecContext, &packet) == 0) {
av_packet_rescale_ts(&packet, outputCodecContext->time_base, outputStream->time_base);
packet.stream_index = outputStream->index;
if (av_interleaved_write_frame(outputFormatContext, &packet) < 0) {
std::cout << "Error while writing output packet!" << std::endl;
break;
}
av_packet_unref(&packet);
}
}
}
av_packet_unref(&packet);
}
```
7.释放资源:
```c++
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
avcodec_free_context(&codecContext);
avformat_free_context(outputFormatContext);
avcodec_free_context(&outputCodecContext);
av_frame_free(&inputFrame);
av_frame_free(&outputFrame);
sws_freeContext(swsContext);
```
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)