基于ffmpeg的简单音视频播放器的demoC语言
时间: 2023-08-04 22:07:38 浏览: 173
快速学习ffmpeg 代码demo
以下是一个基于FFmpeg的简单音视频播放器的demo代码,使用C语言编写:
```c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <pthread.h>
#include <SDL2/SDL.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#define SDL_AUDIO_BUFFER_SIZE 1024
#define MAX_AUDIO_FRAME_SIZE 192000
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
pthread_mutex_t mutex;
pthread_cond_t cond;
} PacketQueue;
PacketQueue audioq;
int quit = 0;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
pthread_mutex_init(&q->mutex, NULL);
pthread_cond_init(&q->cond, NULL);
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if (av_packet_ref(pkt, pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
if (!pkt1) {
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
pthread_mutex_lock(&q->mutex);
if (!q->last_pkt) {
q->first_pkt = pkt1;
} else {
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
pthread_cond_signal(&q->cond);
pthread_mutex_unlock(&q->mutex);
return 0;
}
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
AVPacketList *pkt1;
int ret;
pthread_mutex_lock(&q->mutex);
for (;;) {
if (quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt) {
q->last_pkt = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
pthread_cond_wait(&q->cond, &q->mutex);
}
}
pthread_mutex_unlock(&q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size = 0;
for (;;) {
while (audio_pkt_size > 0) {
int got_frame = 0;
len1 = avcodec_decode_audio4(aCodecCtx, &aFrame, &got_frame, &pkt);
if (len1 < 0) {
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (got_frame) {
data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, aFrame.nb_samples, aCodecCtx->sample_fmt, 1);
memcpy(audio_buf, aFrame.data[0], data_size);
}
if (data_size <= 0) {
continue;
}
return data_size;
}
if (pkt.data) {
av_packet_unref(&pkt);
}
if (quit) {
return -1;
}
if (packet_queue_get(&audioq, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext*) userdata;
int len1, audio_size;
while (len > 0) {
if (audio_buf_index >= audio_buf_size) {
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if (audio_size < 0) {
silence_buf(stream, len);
return;
}
audio_buf_size = audio_size;
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len) {
len1 = len;
}
memcpy(stream, (uint8_t*)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
void silence_buf(Uint8 *stream, int len) {
int i;
for (i = 0; i < len; i++) {
stream[i] = 0;
}
}
void audio_thread(void *arg) {
AVCodecContext *aCodecCtx = (AVCodecContext*) arg;
SDL_AudioSpec wanted_spec, spec;
int ret;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return;
}
if (spec.format != AUDIO_S16SYS) {
fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
return;
}
while (1) {
if (quit) {
break;
}
SDL_Delay(100);
}
SDL_CloseAudio();
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
AVCodecContext *aCodecCtxOrig = NULL, *aCodecCtx = NULL;
AVCodec *aCodec = NULL;
pthread_t audio_tid;
AVPacket pkt;
int audioStream = -1, i;
av_register_all();
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
fprintf(stderr, "SDL_Init: %s\n", SDL_GetError());
return -1;
}
if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
return -1;
}
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
return -1;
}
av_dump_format(pFormatCtx, 0, argv[1], 0);
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
audioStream = i;
}
}
if (audioStream == -1) {
return -1;
}
aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec;
aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id);
if (!aCodec) {
return -1;
}
aCodecCtx = avcodec_alloc_context3(aCodec);
if (avcodec_parameters_to_context(aCodecCtx, pFormatCtx->streams[audioStream]->codecpar) < 0) {
return -1;
}
if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0) {
return -1;
}
packet_queue_init(&audioq);
av_init_packet(&pkt);
pthread_create(&audio_tid, NULL, audio_thread, aCodecCtx);
while (av_read_frame(pFormatCtx, &pkt) >= 0) {
if (pkt.stream_index == audioStream) {
packet_queue_put(&audioq, &pkt);
} else {
av_packet_unref(&pkt);
}
}
while (!quit) {
SDL_Delay(100);
}
quit = 1;
pthread_join(audio_tid, NULL);
avcodec_free_context(&aCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
```
这段代码中使用了FFmpeg库来解码音频数据,SDL库来播放音频数据。其中,PacketQueue结构体用于存储AVPacket数据,audio_decode_frame函数用于解码音频数据,audio_callback函数用于播放音频数据,audio_thread函数用于启动SDL音频线程。在主函数中,首先使用avformat_open_input函数打开音视频文件,然后遍历文件中所有的流,找到音频流并记录其索引,接着使用avcodec_find_decoder函数查找合适的解码器,使用avcodec_alloc_context3函数分配解码器上下文,使用avcodec_parameters_to_context函数将AVCodecParameters转换为AVCodecContext,使用avcodec_open2函数打开解码器,然后启动音频线程,读取音频数据并存入PacketQueue结构体中,最后等待音频线程结束并释放资源。
阅读全文