c/c++使用ffmpeg库推流本地文件,并使用av_seek_frame实现跳转功能的完整代码
时间: 2023-07-15 14:15:56 浏览: 86
这里是使用ffmpeg库推流本地文件,并使用av_seek_frame实现跳转功能的完整C++代码:
```cpp
#include <iostream>
#include <string>
extern "C" {
#include <libavformat/avformat.h>
#include <libavutil/mathematics.h>
}
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, enum AVCodecID codec_id) {
AVCodecContext *c;
AVStream *st;
st = avformat_new_stream(oc, NULL);
if (!st) {
std::cerr << "Could not allocate stream" << std::endl;
exit(1);
}
c = st->codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_VIDEO;
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational) { 1, STREAM_FRAME_RATE };
st->time_base = c->time_base;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
/* some formats want stream headers to be separate */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
return st;
}
/* open the output file, and allocate the format context */
static void open_output_file(const std::string &filename, AVFormatContext **oc, AVOutputFormat *fmt) {
int ret;
/* allocate the output media context */
avformat_alloc_output_context2(oc, fmt, NULL, filename.c_str());
if (!(*oc)) {
std::cerr << "Could not create output context" << std::endl;
exit(1);
}
/* add the video stream using the default format codecs and initialize the codecs */
add_video_stream(*oc, (*oc)->oformat->video_codec);
/* open the output file, if needed */
if (!((*oc)->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&(*oc)->pb, filename.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
std::cerr << "Could not open output file" << std::endl;
exit(1);
}
}
/* write the stream header, if any */
ret = avformat_write_header(*oc, NULL);
if (ret < 0) {
std::cerr << "Error occurred when opening output file" << std::endl;
exit(1);
}
}
/* close the output file and free the format context */
static void close_output_file(AVFormatContext *oc) {
/* write the trailer, if any */
av_write_trailer(oc);
/* close the output file */
if (!(oc->oformat->flags & AVFMT_NOFILE))
avio_closep(&oc->pb);
/* free the stream */
avformat_free_context(oc);
}
/* seek to a specific frame in a video file */
static void seek_to_frame(AVFormatContext *fmt_ctx, int stream_index, int64_t timestamp) {
int ret;
/* seek to the timestamp */
ret = av_seek_frame(fmt_ctx, stream_index, timestamp, AVSEEK_FLAG_BACKWARD);
if (ret < 0) {
std::cerr << "Error seeking to timestamp " << timestamp << std::endl;
exit(1);
}
/* flush the codec buffers */
avcodec_flush_buffers(fmt_ctx->streams[stream_index]->codec);
}
int main(int argc, char **argv) {
if (argc != 2) {
std::cerr << "Usage: " << argv[0] << " <output file>" << std::endl;
return 1;
}
AVOutputFormat *fmt;
AVFormatContext *oc;
AVPacket pkt;
int frame_count, i;
double t, tincr;
int64_t next_pts;
/* register all codecs and formats */
av_register_all();
/* allocate the output media context */
fmt = av_guess_format(NULL, argv[1], NULL);
if (!fmt) {
std::cerr << "Could not determine output format" << std::endl;
return 1;
}
open_output_file(argv[1], &oc, fmt);
/* initialize the frame counter */
frame_count = 0;
/* initialize the timestamp increment */
tincr = 2 * M_PI * STREAM_FRAME_RATE / STREAM_DURATION;
next_pts = 0;
/* main loop */
for (i = 0; i < 100; i++) {
AVStream *st;
AVCodecContext *c;
AVRational time_base;
AVFrame *frame;
int got_packet = 0;
int ret;
/* get the video stream */
st = oc->streams[0];
c = st->codec;
time_base = st->time_base;
/* allocate a new frame */
frame = av_frame_alloc();
if (!frame) {
std::cerr << "Could not allocate video frame" << std::endl;
exit(1);
}
/* generate synthetic video */
frame->pts = av_rescale_q(frame_count, time_base, c->time_base);
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
std::cerr << "Could not allocate frame data" << std::endl;
exit(1);
}
for (int y = 0; y < c->height; y++)
for (int x = 0; x < c->width; x++)
frame->data[0][y * frame->linesize[0] + x] = x + y + frame_count * 3;
for (int y = 0; y < c->height / 2; y++) {
for (int x = 0; x < c->width / 2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + frame_count * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + frame_count * 5;
}
}
/* encode the frame */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
std::cerr << "Error encoding video frame" << std::endl;
exit(1);
}
/* if the frame was encoded, write it to the file */
if (got_packet) {
pkt.stream_index = st->index;
av_packet_rescale_ts(&pkt, time_base, st->time_base);
ret = av_interleaved_write_frame(oc, &pkt);
if (ret < 0) {
std::cerr << "Error while writing video frame" << std::endl;
exit(1);
}
}
/* increase the frame count */
frame_count++;
/* calculate the next presentation timestamp */
t = (double)frame_count / STREAM_FRAME_RATE;
next_pts += (int64_t)(tincr * 1000);
if (next_pts > (double)av_gettime()) {
av_usleep(next_pts - av_gettime());
}
/* free the frame */
av_frame_free(&frame);
}
/* seek to a specific frame */
seek_to_frame(oc, 0, 30);
/* continue encoding frames */
for (; i < 200; i++) {
AVStream *st;
AVCodecContext *c;
AVRational time_base;
AVFrame *frame;
int got_packet = 0;
int ret;
/* get the video stream */
st = oc->streams[0];
c = st->codec;
time_base = st->time_base;
/* allocate a new frame */
frame = av_frame_alloc();
if (!frame) {
std::cerr << "Could not allocate video frame" << std::endl;
exit(1);
}
/* generate synthetic video */
frame->pts = av_rescale_q(frame_count, time_base, c->time_base);
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
std::cerr << "Could not allocate frame data" << std::endl;
exit(1);
}
for (int y = 0; y < c->height; y++)
for (int x = 0; x < c->width; x++)
frame->data[0][y * frame->linesize[0] + x] = x + y + frame_count * 3;
for (int y = 0; y < c->height / 2; y++) {
for (int x = 0; x < c->width / 2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + frame_count * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + frame_count * 5;
}
}
/* encode the frame */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
std::cerr << "Error encoding video frame" << std::endl;
exit(1);
}
/* if the frame was encoded, write it to the file */
if (got_packet) {
pkt.stream_index = st->index;
av_packet_rescale_ts(&pkt, time_base, st->time_base);
ret = av_interleaved_write_frame(oc, &pkt);
if (ret < 0) {
std::cerr << "Error while writing video frame" << std::endl;
exit(1);
}
}
/* increase the frame count */
frame_count++;
/* calculate the next presentation timestamp */
t = (double)frame_count / STREAM_FRAME_RATE;
next_pts += (int64_t)(tincr * 1000);
if (next_pts > (double)av_gettime()) {
av_usleep(next_pts - av_gettime());
}
/* free the frame */
av_frame_free(&frame);
}
/* close the output file and free the format context */
close_output_file(oc);
return 0;
}
```
这个代码会生成一个10秒的视频,并在第30帧处进行跳转,继续生成另外10秒的视频。视频中的图像是随机生成的。你可以根据自己的需求进行修改。