编写一个基于ffmpeg的读取MP3文件输出rtsp音频流的c++例子
时间: 2023-07-16 12:12:41 浏览: 317
好的,下面是一个基于FFmpeg的读取MP3文件输出RTSP音频流的C例子:
```c
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/opt.h>
#include <libavutil/time.h>
#define INBUF_SIZE 4096
#define AUDIO_CODEC_ID AV_CODEC_ID_AAC
int main(int argc, char **argv)
{
AVFrame *frame = NULL;
AVCodecContext *ctx = NULL;
AVOutputFormat *ofmt = NULL;
AVFormatContext *outctx = NULL;
AVPacket pkt;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
int ret, i, out_size;
av_register_all();
avcodec_register_all();
const char *input_file_name = "input.mp3";
const char *output_file_name = "output.sdp";
const char *output_format_name = "rtsp";
AVFormatContext *inctx = NULL;
ret = avformat_open_input(&inctx, input_file_name, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open input file %s\n", input_file_name);
return 1;
}
ret = avformat_find_stream_info(inctx, NULL);
if (ret < 0) {
fprintf(stderr, "Could not find stream information\n");
return 1;
}
av_dump_format(inctx, 0, input_file_name, 0);
AVStream *in_audio_stream = NULL;
for (i = 0; i < inctx->nb_streams; i++) {
AVStream *stream = inctx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
in_audio_stream = stream;
break;
}
}
if (!in_audio_stream) {
fprintf(stderr, "Could not find input audio stream\n");
return 1;
}
AVCodec *decoder = avcodec_find_decoder(in_audio_stream->codecpar->codec_id);
if (!decoder) {
fprintf(stderr, "Codec not found\n");
return 1;
}
ctx = avcodec_alloc_context3(decoder);
if (!ctx) {
fprintf(stderr, "Could not allocate codec context\n");
return 1;
}
ret = avcodec_parameters_to_context(ctx, in_audio_stream->codecpar);
if (ret < 0) {
fprintf(stderr, "Could not copy codec parameters to context\n");
return 1;
}
ret = avcodec_open2(ctx, decoder, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec\n");
return 1;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
return 1;
}
avformat_alloc_output_context2(&outctx, NULL, output_format_name, output_file_name);
if (!outctx) {
fprintf(stderr, "Could not create output context\n");
return 1;
}
AVStream *out_audio_stream = avformat_new_stream(outctx, NULL);
if (!out_audio_stream) {
fprintf(stderr, "Could not create output audio stream\n");
return 1;
}
out_audio_stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
out_audio_stream->codecpar->codec_id = AUDIO_CODEC_ID;
out_audio_stream->codecpar->sample_rate = in_audio_stream->codecpar->sample_rate;
out_audio_stream->codecpar->channels = in_audio_stream->codecpar->channels;
out_audio_stream->codecpar->channel_layout = in_audio_stream->codecpar->channel_layout;
out_audio_stream->codecpar->bit_rate = in_audio_stream->codecpar->bit_rate;
AVCodec *encoder = avcodec_find_encoder(AUDIO_CODEC_ID);
if (!encoder) {
fprintf(stderr, "Encoder not found\n");
return 1;
}
ctx = avcodec_alloc_context3(encoder);
if (!ctx) {
fprintf(stderr, "Could not allocate codec context\n");
return 1;
}
ret = avcodec_parameters_to_context(ctx, out_audio_stream->codecpar);
if (ret < 0) {
fprintf(stderr, "Could not copy codec parameters to context\n");
return 1;
}
av_opt_set(ctx->priv_data, "preset", "ultrafast", 0);
av_opt_set(ctx->priv_data, "tune", "zerolatency", 0);
ret = avcodec_open2(ctx, encoder, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec\n");
return 1;
}
ofmt = outctx->oformat;
ret = avio_open(&outctx->pb, output_file_name, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file %s\n", output_file_name);
return 1;
}
ret = avformat_write_header(outctx, NULL);
if (ret < 0) {
fprintf(stderr, "Could not write header\n");
return 1;
}
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
AVBitStreamFilterContext *bsfc = av_bitstream_filter_init("aac_adtstoasc");
if (!bsfc) {
fprintf(stderr, "Could not create bitstream filter\n");
return 1;
}
while (av_read_frame(inctx, &pkt) >= 0) {
if (pkt.stream_index == in_audio_stream->index) {
ret = avcodec_send_packet(ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error sending a packet to the decoder\n");
return 1;
}
while (ret >= 0) {
ret = avcodec_receive_frame(ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
return 1;
}
ret = av_frame_make_writable(frame);
if (ret < 0) {
fprintf(stderr, "Error making frame writable\n");
return 1;
}
ret = avcodec_send_frame(ctx, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame to the encoder\n");
return 1;
}
while (ret >= 0) {
AVPacket enc_pkt;
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = avcodec_receive_packet(ctx, &enc_pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
return 1;
}
av_bitstream_filter_filter(bsfc, out_audio_stream->codec, NULL, &enc_pkt.data, &enc_pkt.size, enc_pkt.data, enc_pkt.size, 0);
enc_pkt.stream_index = out_audio_stream->index;
enc_pkt.pts = av_rescale_q(frame->pts, in_audio_stream->time_base, out_audio_stream->time_base);
enc_pkt.dts = av_rescale_q(frame->pts, in_audio_stream->time_base, out_audio_stream->time_base);
enc_pkt.duration = av_rescale_q(frame->nb_samples, in_audio_stream->codecpar->sample_rate, out_audio_stream->codecpar->sample_rate);
ret = av_write_frame(outctx, &enc_pkt);
if (ret < 0) {
fprintf(stderr, "Error writing audio frame to output\n");
return 1;
}
av_packet_unref(&enc_pkt);
}
}
}
av_packet_unref(&pkt);
}
av_write_trailer(outctx);
av_bitstream_filter_close(bsfc);
avcodec_free_context(&ctx);
avformat_close_input(&inctx);
avformat_free_context(outctx);
return 0;
}
```
这个例子使用了FFmpeg库来读取一个MP3文件,并将其转换为一个RTSP音频流。代码中首先使用avformat_open_input函数打开输入文件,然后使用avformat_find_stream_info函数获取文件的流信息。之后代码遍历每个流,找到第一个音频流。然后找到该音频流使用的解码器,为其分配一个解码器上下文,打开解码器。
之后代码创建一个新的输出文件,并为其创建一个新的音频流。使用avcodec_find_encoder函数查找指定的编码器,为其创建一个编码器上下文,并打开编码器。之后代码循环读取输入文件的AVPacket,对于音频流的AVPacket,先解码,再编码,最后写入输出文件。
最后,代码释放所有资源,完成转换过程。
阅读全文