ffmpeg c++ opencv rtsp拉流 并推流
时间: 2023-09-10 16:08:26 浏览: 308
可以使用FFmpeg和OpenCV来进行RTSP拉流和推流。
首先,需要使用FFmpeg进行RTSP拉流。可以使用以下代码来进行拉流:
```c++
#include <iostream>
#include <opencv2/opencv.hpp>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
int main(int argc, char* argv[])
{
av_register_all();
AVFormatContext* pFormatCtx = nullptr;
if (avformat_open_input(&pFormatCtx, "rtsp://your_rtsp_url", nullptr, nullptr) != 0) {
std::cerr << "Failed to open input stream!" << std::endl;
return -1;
}
if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
std::cerr << "Failed to retrieve stream information!" << std::endl;
return -1;
}
int videoStream = -1;
for (int i = 0; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
}
if (videoStream == -1) {
std::cerr << "Failed to find video stream!" << std::endl;
return -1;
}
AVCodecParameters* pCodecParams = pFormatCtx->streams[videoStream]->codecpar;
AVCodec* pCodec = avcodec_find_decoder(pCodecParams->codec_id);
if (pCodec == nullptr) {
std::cerr << "Failed to find codec!" << std::endl;
return -1;
}
AVCodecContext* pCodecCtx = avcodec_alloc_context3(pCodec);
if (pCodecCtx == nullptr) {
std::cerr << "Failed to allocate codec context!" << std::endl;
return -1;
}
if (avcodec_parameters_to_context(pCodecCtx, pCodecParams) < 0) {
std::cerr << "Failed to copy codec parameters to codec context!" << std::endl;
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {
std::cerr << "Failed to open codec!" << std::endl;
return -1;
}
AVFrame* pFrame = av_frame_alloc();
if (pFrame == nullptr) {
std::cerr << "Failed to allocate frame!" << std::endl;
return -1;
}
AVPacket* pPacket = av_packet_alloc();
if (pPacket == nullptr) {
std::cerr << "Failed to allocate packet!" << std::endl;
return -1;
}
while (av_read_frame(pFormatCtx, pPacket) >= 0) {
if (pPacket->stream_index == videoStream) {
if (avcodec_send_packet(pCodecCtx, pPacket) < 0) {
std::cerr << "Error sending a packet for decoding!" << std::endl;
break;
}
while (avcodec_receive_frame(pCodecCtx, pFrame) == 0) {
// Use OpenCV to display the frame
cv::Mat matFrame(pFrame->height, pFrame->width, CV_8UC3, pFrame->data[0], pFrame->linesize[0]);
cv::imshow("Frame", matFrame);
cv::waitKey(1);
}
}
av_packet_unref(pPacket);
}
av_packet_free(&pPacket);
av_frame_free(&pFrame);
avcodec_free_context(&pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
```
然后,可以使用FFmpeg进行推流。可以使用以下代码来进行推流:
```c++
#include <iostream>
#include <opencv2/opencv.hpp>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
int main(int argc, char* argv[])
{
av_register_all();
AVFormatContext* pFormatCtx = nullptr;
if (avformat_alloc_output_context2(&pFormatCtx, nullptr, "flv", "rtmp://your_rtmp_url") < 0) {
std::cerr << "Failed to allocate output context!" << std::endl;
return -1;
}
AVOutputFormat* pOutputFormat = pFormatCtx->oformat;
if (avio_open(&pFormatCtx->pb, "rtmp://your_rtmp_url", AVIO_FLAG_WRITE) < 0) {
std::cerr << "Failed to open output URL!" << std::endl;
return -1;
}
AVCodec* pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (pCodec == nullptr) {
std::cerr << "Failed to find encoder!" << std::endl;
return -1;
}
AVStream* pStream = avformat_new_stream(pFormatCtx, pCodec);
if (pStream == nullptr) {
std::cerr << "Failed to create new stream!" << std::endl;
return -1;
}
AVCodecContext* pCodecCtx = avcodec_alloc_context3(pCodec);
if (pCodecCtx == nullptr) {
std::cerr << "Failed to allocate codec context!" << std::endl;
return -1;
}
if (avcodec_parameters_to_context(pCodecCtx, pStream->codecpar) < 0) {
std::cerr << "Failed to copy codec parameters to codec context!" << std::endl;
return -1;
}
pCodecCtx->codec_id = AV_CODEC_ID_H264;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->width = 640;
pCodecCtx->height = 480;
pCodecCtx->time_base = { 1, 25 };
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 10;
if (avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {
std::cerr << "Failed to open codec!" << std::endl;
return -1;
}
AVFrame* pFrame = av_frame_alloc();
if (pFrame == nullptr) {
std::cerr << "Failed to allocate frame!" << std::endl;
return -1;
}
pFrame->format = pCodecCtx->pix_fmt;
pFrame->width = pCodecCtx->width;
pFrame->height = pCodecCtx->height;
if (av_frame_get_buffer(pFrame, 0) < 0) {
std::cerr << "Failed to allocate picture!" << std::endl;
return -1;
}
AVPacket* pPacket = av_packet_alloc();
if (pPacket == nullptr) {
std::cerr << "Failed to allocate packet!" << std::endl;
return -1;
}
int gotOutput = 0;
int frameCount = 0;
while (frameCount < 10000) {
cv::Mat matFrame = cv::imread("your_image_path");
AVFrame* pFrameYUV = av_frame_alloc();
if (pFrameYUV == nullptr) {
std::cerr << "Failed to allocate YUV frame!" << std::endl;
return -1;
}
av_image_alloc(pFrameYUV->data, pFrameYUV->linesize, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 1);
SwsContext* pSwsCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
sws_scale(pSwsCtx, (uint8_t const* const*)matFrame.data, matFrame.step, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
pFrameYUV->pts = frameCount * pStream->time_base.den / pStream->time_base.num / 25;
if (avcodec_send_frame(pCodecCtx, pFrameYUV) < 0) {
std::cerr << "Error sending a frame for encoding!" << std::endl;
break;
}
while (avcodec_receive_packet(pCodecCtx, pPacket) == 0) {
pPacket->stream_index = pStream->index;
av_interleaved_write_frame(pFormatCtx, pPacket);
av_packet_unref(pPacket);
}
av_frame_free(&pFrameYUV);
frameCount++;
}
av_write_trailer(pFormatCtx);
av_packet_free(&pPacket);
av_frame_free(&pFrame);
avcodec_free_context(&pCodecCtx);
avformat_free_context(pFormatCtx);
return 0;
}
```
需要注意的是,这里的代码只是提供了一个基本的框架,需要根据实际情况进行修改和适配。
阅读全文