c/c++使用ffmpeg库实现推流本地视频文件,播放过程中可控制跳转到视频指定位置的完整代码
时间: 2023-11-22 08:53:30 浏览: 674
c++调用ffmpeg api将视频文件内容进行udp组播推流
以下是使用FFmpeg库实现推流本地视频文件,并且允许跳转到指定位置的完整代码。需要注意的是,本代码只实现了视频流的推流,如果需要同时推送音频流,还需要在代码中添加相应的音频处理代码。
```c++
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cmath>
#include <ctime>
extern "C"{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
using namespace std;
#define STREAM_FRAME_RATE 25
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
int64_t last_pts = 0;
int64_t last_dts = 0;
int64_t seek_target = -1;
int64_t seek_stream_index = -1;
int64_t seek_flags = AVSEEK_FLAG_ANY;
AVFormatContext *input_fmt_ctx = NULL;
AVCodecContext *input_codec_ctx = NULL;
int64_t input_start_time = AV_NOPTS_VALUE;
int64_t input_duration = AV_NOPTS_VALUE;
AVFormatContext *output_fmt_ctx = NULL;
int open_input_file(const char *filename)
{
int ret = 0;
AVCodec *input_codec = NULL;
if ((ret = avformat_open_input(&input_fmt_ctx, filename, NULL, NULL)) < 0) {
fprintf(stderr, "Could not open input file '%s'", filename);
goto end;
}
if ((ret = avformat_find_stream_info(input_fmt_ctx, NULL)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(input_fmt_ctx, 0, filename, 0);
for (int i = 0; i < input_fmt_ctx->nb_streams; i++) {
AVStream *stream = input_fmt_ctx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
input_codec_ctx = avcodec_alloc_context3(NULL);
if (!input_codec_ctx) {
fprintf(stderr, "Failed to allocate codec context");
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avcodec_parameters_to_context(input_codec_ctx, stream->codecpar)) < 0) {
fprintf(stderr, "Failed to copy codec parameters to codec context");
goto end;
}
input_codec = avcodec_find_decoder(input_codec_ctx->codec_id);
if (!input_codec) {
fprintf(stderr, "Failed to find decoder for codec ID %d", input_codec_ctx->codec_id);
ret = AVERROR_DECODER_NOT_FOUND;
goto end;
}
if ((ret = avcodec_open2(input_codec_ctx, input_codec, NULL)) < 0) {
fprintf(stderr, "Failed to open codec");
goto end;
}
input_start_time = stream->start_time != AV_NOPTS_VALUE ? stream->start_time : 0;
input_duration = stream->duration;
break;
}
}
if (!input_codec_ctx) {
fprintf(stderr, "Failed to find video stream");
ret = AVERROR_INVALIDDATA;
goto end;
}
end:
if (ret < 0) {
avcodec_free_context(&input_codec_ctx);
avformat_close_input(&input_fmt_ctx);
}
return ret;
}
int open_output_file(const char *filename)
{
int ret = 0;
if ((ret = avformat_alloc_output_context2(&output_fmt_ctx, NULL, NULL, filename)) < 0) {
fprintf(stderr, "Could not create output context");
goto end;
}
AVCodec *output_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!output_codec) {
fprintf(stderr, "Failed to find encoder");
ret = AVERROR_ENCODER_NOT_FOUND;
goto end;
}
AVStream *stream = avformat_new_stream(output_fmt_ctx, output_codec);
if (!stream) {
fprintf(stderr, "Failed to create new stream");
ret = AVERROR(ENOMEM);
goto end;
}
AVCodecContext *output_codec_ctx = avcodec_alloc_context3(output_codec);
if (!output_codec_ctx) {
fprintf(stderr, "Failed to allocate codec context");
ret = AVERROR(ENOMEM);
goto end;
}
output_codec_ctx->codec_id = AV_CODEC_ID_H264;
output_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
output_codec_ctx->pix_fmt = STREAM_PIX_FMT;
output_codec_ctx->width = input_codec_ctx->width;
output_codec_ctx->height = input_codec_ctx->height;
output_codec_ctx->time_base = av_make_q(1, STREAM_FRAME_RATE);
stream->time_base = output_codec_ctx->time_base;
if ((ret = avcodec_open2(output_codec_ctx, output_codec, NULL)) < 0) {
fprintf(stderr, "Failed to open codec");
goto end;
}
if ((ret = avcodec_parameters_from_context(stream->codecpar, output_codec_ctx)) < 0) {
fprintf(stderr, "Failed to copy codec context to codec parameters");
goto end;
}
av_dump_format(output_fmt_ctx, 0, filename, 1);
if (!(output_fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
if ((ret = avio_open(&output_fmt_ctx->pb, filename, AVIO_FLAG_WRITE)) < 0) {
fprintf(stderr, "Could not open output file '%s'", filename);
goto end;
}
}
if ((ret = avformat_write_header(output_fmt_ctx, NULL)) < 0) {
fprintf(stderr, "Failed to write header");
goto end;
}
end:
if (ret < 0) {
avcodec_free_context(&output_codec_ctx);
avformat_free_context(output_fmt_ctx);
}
return ret;
}
int process_packet(AVPacket *pkt)
{
int ret = 0;
AVFrame *frame = NULL;
AVPacket output_pkt = { 0 };
if ((ret = avcodec_send_packet(input_codec_ctx, pkt)) < 0) {
fprintf(stderr, "Error sending a packet for decoding");
goto end;
}
while (ret >= 0) {
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Failed to allocate frame");
ret = AVERROR(ENOMEM);
goto end;
}
ret = avcodec_receive_frame(input_codec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
av_frame_free(&frame);
break;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding");
goto end;
}
frame->pts = av_rescale_q(frame->pts - input_start_time, input_codec_ctx->time_base, av_make_q(1, STREAM_FRAME_RATE));
frame->pkt_dts = av_rescale_q(frame->pkt_dts - input_start_time, input_codec_ctx->time_base, av_make_q(1, STREAM_FRAME_RATE));
frame->pkt_duration = av_rescale_q(frame->pkt_duration, input_codec_ctx->time_base, av_make_q(1, STREAM_FRAME_RATE));
if (last_pts && frame->pts < last_pts) {
fprintf(stderr, "Error: input file timestamps are not monotonic");
ret = AVERROR_INVALIDDATA;
goto end;
}
if (!last_pts) {
last_pts = frame->pts;
last_dts = frame->pkt_dts;
}
av_init_packet(&output_pkt);
output_pkt.data = NULL;
output_pkt.size = 0;
frame->pts -= last_pts;
frame->pkt_dts -= last_dts;
if ((ret = avcodec_send_frame(output_fmt_ctx->streams[0]->codec, frame)) < 0) {
fprintf(stderr, "Error sending a frame for encoding");
goto end;
}
while (ret >= 0) {
ret = avcodec_receive_packet(output_fmt_ctx->streams[0]->codec, &output_pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
else if (ret < 0) {
fprintf(stderr, "Error during encoding");
goto end;
}
output_pkt.stream_index = 0;
av_packet_rescale_ts(&output_pkt, output_fmt_ctx->streams[0]->codec->time_base, output_fmt_ctx->streams[0]->time_base);
output_pkt.pts += last_pts;
output_pkt.dts += last_dts;
if ((ret = av_interleaved_write_frame(output_fmt_ctx, &output_pkt)) < 0) {
fprintf(stderr, "Error writing packet");
goto end;
}
av_packet_unref(&output_pkt);
}
av_frame_free(&frame);
}
end:
av_packet_unref(pkt);
return ret;
}
int process_seek()
{
int ret = 0;
if (seek_target < 0 || seek_stream_index < 0) {
return 0;
}
if ((ret = avformat_seek_file(input_fmt_ctx, seek_stream_index, INT64_MIN, seek_target, seek_target, seek_flags)) < 0) {
fprintf(stderr, "Error seeking to timestamp %" PRId64 "\n", seek_target);
goto end;
}
avcodec_flush_buffers(input_codec_ctx);
last_pts = 0;
last_dts = 0;
end:
seek_target = -1;
seek_stream_index = -1;
seek_flags = AVSEEK_FLAG_ANY;
return ret;
}
int main(int argc, char *argv[])
{
int ret = 0;
AVPacket pkt = { 0 };
if (argc != 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
if ((ret = open_input_file(argv[1])) < 0) {
goto end;
}
if ((ret = open_output_file(argv[2])) < 0) {
goto end;
}
while (true) {
if ((ret = av_read_frame(input_fmt_ctx, &pkt)) < 0) {
if (ret == AVERROR_EOF) {
break;
}
else {
fprintf(stderr, "Error reading packet");
goto end;
}
}
if (pkt.stream_index != 0) {
av_packet_unref(&pkt);
continue;
}
if (seek_target >= 0 && pkt.dts >= seek_target) {
process_seek();
}
if ((ret = process_packet(&pkt)) < 0) {
goto end;
}
}
if ((ret = av_write_trailer(output_fmt_ctx)) < 0) {
fprintf(stderr, "Failed to write trailer");
goto end;
}
end:
av_packet_unref(&pkt);
avcodec_free_context(&input_codec_ctx);
avformat_close_input(&input_fmt_ctx);
avformat_free_context(output_fmt_ctx);
return ret;
}
```
该代码的主要思路是:先打开输入文件并处理视频流,然后打开输出文件并处理视频编码,最后循环读取输入文件的数据包并将其存储到输出文件中。在循环过程中,可以根据需要跳转到指定的时间戳。
阅读全文