ffmpeg4.2 cuda 硬解码 多路并发 代码
时间: 2023-07-25 22:10:56 浏览: 59
以下是使用FFmpeg和CUDA实现多路并发硬解码的示例代码:
```c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <cuda.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/hwcontext.h>
#define MAX_STREAMS 4
static AVCodecContext *codec_ctx[MAX_STREAMS];
static AVCodecParameters *codec_params[MAX_STREAMS];
static AVFormatContext *format_ctx[MAX_STREAMS];
static int stream_index[MAX_STREAMS];
static pthread_t threads[MAX_STREAMS];
static int num_streams = 0;
static int quit = 0;
static CUcontext cuda_ctx;
static CUstream cuda_stream[MAX_STREAMS];
static CUvideodecoder decoder[MAX_STREAMS];
static AVBufferRef *hw_device_ctx[MAX_STREAMS];
static void *decode_thread(void *arg)
{
int stream_index = *(int *)arg;
AVPacket packet;
AVFrame *frame = NULL;
int ret;
while (!quit)
{
ret = av_read_frame(format_ctx[stream_index], &packet);
if (ret < 0)
break;
if (packet.stream_index == stream_index)
{
ret = avcodec_send_packet(codec_ctx[stream_index], &packet);
if (ret < 0)
break;
while (ret >= 0)
{
frame = av_frame_alloc();
ret = avcodec_receive_frame(codec_ctx[stream_index], frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
av_frame_free(&frame);
break;
}
// Send frame to decoder
CUVIDSOURCEDATAPACKET pkt = { 0 };
pkt.payload_size = frame->pkt_size;
pkt.payload = frame->data[0];
pkt.flags = CUVID_PKT_TIMESTAMP;
pkt.timestamp = frame->pts;
cuvidParseVideoData(decoder[stream_index], &pkt);
av_frame_free(&frame);
}
}
av_packet_unref(&packet);
}
return NULL;
}
int main(int argc, char *argv[])
{
int ret, i;
if (argc < 2)
{
fprintf(stderr, "Usage: %s <input file(s)>\n", argv[0]);
return 1;
}
av_log_set_level(AV_LOG_INFO);
// Initialize CUDA
cuInit(0);
cuDeviceGet(NULL, 0);
cuCtxCreate(&cuda_ctx, CU_CTX_SCHED_BLOCKING_SYNC, 0);
// Initialize FFmpeg
av_register_all();
avcodec_register_all();
avformat_network_init();
// Open input files and extract streams
for (i = 1; i < argc && num_streams < MAX_STREAMS; i++)
{
ret = avformat_open_input(&format_ctx[num_streams], argv[i], NULL, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to open input file %s: %s\n", argv[i], av_err2str(ret));
continue;
}
ret = avformat_find_stream_info(format_ctx[num_streams], NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to find stream info for input file %s: %s\n", argv[i], av_err2str(ret));
avformat_close_input(&format_ctx[num_streams]);
continue;
}
stream_index[num_streams] = av_find_best_stream(format_ctx[num_streams], AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (stream_index[num_streams] < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to find video stream for input file %s\n", argv[i]);
avformat_close_input(&format_ctx[num_streams]);
continue;
}
codec_params[num_streams] = format_ctx[num_streams]->streams[stream_index[num_streams]]->codecpar;
codec_ctx[num_streams] = avcodec_alloc_context3(NULL);
ret = avcodec_parameters_to_context(codec_ctx[num_streams], codec_params[num_streams]);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to copy codec parameters for input file %s: %s\n", argv[i], av_err2str(ret));
avcodec_free_context(&codec_ctx[num_streams]);
avformat_close_input(&format_ctx[num_streams]);
continue;
}
// Initialize hardware decoding
codec_ctx[num_streams]->hw_device_ctx = av_buffer_ref(hw_device_ctx[num_streams]);
codec_ctx[num_streams]->get_format = av_hwdevice_get_hwframe_constraints;
codec_ctx[num_streams]->opaque = codec_params[num_streams];
ret = av_hwframe_ctx_init(codec_ctx[num_streams]->hw_frames_ctx);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to initialize hardware decoding for input file %s: %s\n", argv[i], av_err2str(ret));
avcodec_free_context(&codec_ctx[num_streams]);
avformat_close_input(&format_ctx[num_streams]);
continue;
}
// Initialize CUDA decoder
CUVIDDECODECREATEINFO create_info = { 0 };
create_info.CodecType = codec_params[num_streams]->codec_id;
create_info.ulWidth = codec_params[num_streams]->width;
create_info.ulHeight = codec_params[num_streams]->height;
create_info.ulMaxWidth = codec_params[num_streams]->width;
create_info.ulMaxHeight = codec_params[num_streams]->height;
create_info.ulNumDecodeSurfaces = 8;
create_info.ChromaFormat = codec_params[num_streams]->format;
create_info.OutputFormat = cudaVideoSurfaceFormat_NV12;
create_info.DeinterlaceMode = cudaVideoDeinterlaceMode_Adaptive;
cuvidCreateDecoder(&decoder[num_streams], &create_info);
// Initialize CUDA context and stream
cuCtxSetCurrent(cuda_ctx);
cuStreamCreate(&cuda_stream[num_streams], CU_STREAM_NON_BLOCKING);
// Start decoding thread
pthread_create(&threads[num_streams], NULL, decode_thread, &num_streams);
num_streams++;
}
// Wait for decoding threads to finish
for (i = 0; i < num_streams; i++)
{
pthread_join(threads[i], NULL);
}
// Clean up
for (i = 0; i < num_streams; i++)
{
avcodec_free_context(&codec_ctx[i]);
avformat_close_input(&format_ctx[i]);
cuvidDestroyDecoder(decoder[i]);
cuStreamDestroy(cuda_stream[i]);
}
cuCtxDestroy(cuda_ctx);
return 0;
}
```
请注意,这只是一个示例代码,您需要根据您的实际需求进行修改和定制化。同时,如果您没有足够的CUDA和FFmpeg编程经验,建议您参考官方文档和其他教程,避免出现错误和安全问题。