使用 C++ 和 FFmpeg实现RTSP 服务器示例
时间: 2024-06-09 19:07:19 浏览: 200
以下是一个简单的使用 C++ 和 FFmpeg 实现的 RTSP 服务器示例:
```cpp
#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <mutex>
#include <condition_variable>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libswscale/swscale.h>
}
class RtspServer {
public:
RtspServer(const std::string& url, int width, int height, int fps)
: m_url(url), m_width(width), m_height(height), m_fps(fps) {
av_register_all();
avformat_network_init();
m_fmt = av_guess_format("rtsp", m_url.c_str(), NULL);
if (!m_fmt) {
std::cout << "Failed to guess format for " << m_url << std::endl;
return;
}
m_fmt_ctx = avformat_alloc_context();
m_fmt_ctx->oformat = m_fmt;
snprintf(m_fmt_ctx->filename, sizeof(m_fmt_ctx->filename), "%s", m_url.c_str());
m_video_st = avformat_new_stream(m_fmt_ctx, NULL);
if (!m_video_st) {
std::cout << "Failed to create video stream" << std::endl;
return;
}
m_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!m_codec) {
std::cout << "Failed to find H.264 codec" << std::endl;
return;
}
m_codec_ctx = avcodec_alloc_context3(m_codec);
m_codec_ctx->codec_id = AV_CODEC_ID_H264;
m_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
m_codec_ctx->width = m_width;
m_codec_ctx->height = m_height;
m_codec_ctx->time_base.num = 1;
m_codec_ctx->time_base.den = m_fps;
m_codec_ctx->gop_size = m_fps;
m_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
if (m_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
m_codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
int ret = avcodec_open2(m_codec_ctx, m_codec, NULL);
if (ret < 0) {
std::cout << "Failed to open codec" << std::endl;
return;
}
avcodec_parameters_from_context(m_video_st->codecpar, m_codec_ctx);
av_dump_format(m_fmt_ctx, 0, m_url.c_str(), 1);
ret = avio_open(&m_fmt_ctx->pb, m_url.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
std::cout << "Failed to open output URL" << std::endl;
return;
}
ret = avformat_write_header(m_fmt_ctx, NULL);
if (ret < 0) {
std::cout << "Failed to write header" << std::endl;
return;
}
m_frame = av_frame_alloc();
m_frame->width = m_width;
m_frame->height = m_height;
m_frame->format = AV_PIX_FMT_YUV420P;
av_frame_get_buffer(m_frame, 32);
m_sws_ctx = sws_getContext(m_width, m_height, AV_PIX_FMT_BGR24,
m_width, m_height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC, NULL, NULL, NULL);
if (!m_sws_ctx) {
std::cout << "Failed to create SWS context" << std::endl;
return;
}
}
~RtspServer() {
if (m_sws_ctx) {
sws_freeContext(m_sws_ctx);
}
if (m_frame) {
av_frame_free(&m_frame);
}
if (m_fmt_ctx) {
av_write_trailer(m_fmt_ctx);
if (m_fmt_ctx->pb) {
avio_closep(&m_fmt_ctx->pb);
}
avformat_free_context(m_fmt_ctx);
}
avcodec_free_context(&m_codec_ctx);
}
void run() {
std::thread thread([this]() {
while (!m_stop) {
auto start_time = std::chrono::high_resolution_clock::now();
// Generate synthetic video frames here
cv::Mat image(m_height, m_width, CV_8UC3, cv::Scalar(0, 255, 0));
cv::circle(image, cv::Point(m_width/2, m_height/2), m_height/4, cv::Scalar(0, 0, 255), -1);
cv::cvtColor(image, m_bgr_frame, cv::COLOR_RGB2BGR);
// Convert to YUV420P format
uint8_t* in_data[1] = { m_bgr_frame.data };
int in_linesize[1] = { 3 * m_width };
uint8_t* out_data[3] = { m_frame->data[0], m_frame->data[1], m_frame->data[2] };
int out_linesize[3] = { m_width, m_width / 2, m_width / 2 };
sws_scale(m_sws_ctx, in_data, in_linesize, 0, m_height, out_data, out_linesize);
// Encode and write to output
AVPacket pkt = { 0 };
av_init_packet(&pkt);
int ret = avcodec_send_frame(m_codec_ctx, m_frame);
if (ret < 0) {
std::cout << "Failed to send frame" << std::endl;
continue;
}
while (ret >= 0) {
ret = avcodec_receive_packet(m_codec_ctx, &pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
}
if (ret < 0) {
std::cout << "Failed to receive packet" << std::endl;
continue;
}
pkt.stream_index = m_video_st->index;
av_packet_rescale_ts(&pkt, m_codec_ctx->time_base, m_video_st->time_base);
ret = av_interleaved_write_frame(m_fmt_ctx, &pkt);
if (ret < 0) {
std::cout << "Failed to write frame" << std::endl;
continue;
}
av_packet_unref(&pkt);
}
// Sleep to maintain frame rate
auto end_time = std::chrono::high_resolution_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);
if (elapsed_time < std::chrono::milliseconds(1000 / m_fps)) {
std::this_thread::sleep_for(std::chrono::milliseconds(1000 / m_fps) - elapsed_time);
}
}
});
thread.detach();
}
void stop() {
m_stop = true;
}
private:
std::string m_url;
int m_width;
int m_height;
int m_fps;
AVFormatContext* m_fmt_ctx = NULL;
AVFormatContext* m_fmt = NULL;
AVCodec* m_codec = NULL;
AVCodecContext* m_codec_ctx = NULL;
AVStream* m_video_st = NULL;
AVFrame* m_frame = NULL;
SwsContext* m_sws_ctx = NULL;
cv::Mat m_bgr_frame;
bool m_stop = false;
};
int main(int argc, char* argv[]) {
std::string url = "rtsp://localhost:8554/live";
int width = 640;
int height = 480;
int fps = 30;
RtspServer server(url, width, height, fps);
server.run();
std::this_thread::sleep_for(std::chrono::seconds(30));
server.stop();
return 0;
}
```
该示例使用 OpenCV 生成合成视频帧,并使用 FFmpeg 编码为 H.264 格式,然后将其写入 RTSP 输出。请注意,该示例仅用于演示目的,生成的视频不包含任何实际内容。
阅读全文