ffmpeg duration 和pts
时间: 2024-07-16 21:01:15 浏览: 146
FFmpeg是一个强大的跨平台多媒体处理工具包,它支持视频、音频和流的转换、提取、编码等操作。在FFmpeg中,`duration`和`pts`是两个重要的时间戳概念:
1. `duration`(持续时间):这是文件或媒体段的总长度,通常是以秒为单位的浮点数。它是基于原始素材的时长,表示从开始到结束整个文件的总体时间跨度。
2. `pts`(Presentation Time Stamp,呈现时间戳):这是一种内部时间戳系统,在编码和解码过程中用于跟踪视频帧和音频样本的播放进度。`pts`代表的是媒体数据到达解码器的时间点,而不是实际的物理时间,可以是跳跃的,取决于解码过程中的延迟和其他因素。
简而言之,`duration`反映了原始内容的整体时间长度,而`pts`则是实时或近似实时地反映媒体内容在播放过程中的位置。这两个时间戳经常一起工作,特别是在流媒体处理和同步相关的操作中。
相关问题
FFmpeg.AutoGen调用ffmpeg库开发unity录屏工具
要在 Unity 中开发录屏工具,并调用 FFmpeg 库进行视频编码,可以使用 FFmpeg.AutoGen 库来实现。以下是一个简单的示例代码:
```csharp
using System;
using System.Runtime.InteropServices;
using FFmpeg.AutoGen;
using UnityEngine;
public class ScreenRecorder : MonoBehaviour
{
private const int FPS = 30;
private const int BIT_RATE = 4000000;
private const string OUTPUT_FILE = "output.mp4";
private int frameCount = 0;
private AVCodecContext* codecContext;
private AVFormatContext* formatContext;
private AVStream* stream;
private void Start()
{
AVDictionary* options = null;
ffmpeg.av_dict_set(&options, "framerate", FPS.ToString(), 0);
ffmpeg.av_dict_set(&options, "video_size", $"{Screen.width}x{Screen.height}", 0);
ffmpeg.av_dict_set(&options, "preset", "ultrafast", 0);
ffmpeg.av_dict_set(&options, "tune", "zerolatency", 0);
ffmpeg.av_dict_set(&options, "crf", "25", 0);
ffmpeg.av_dict_set(&options, "bitrate", BIT_RATE.ToString(), 0);
AVCodec* codec = null;
codec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_H264);
if (codec == null)
{
Debug.LogError("Failed to find H.264 codec!");
return;
}
codecContext = ffmpeg.avcodec_alloc_context3(codec);
codecContext->width = Screen.width;
codecContext->height = Screen.height;
codecContext->time_base = new AVRational { num = 1, den = FPS };
codecContext->framerate = new AVRational { num = FPS, den = 1 };
codecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P;
codecContext->flags |= ffmpeg.AV_CODEC_FLAG_GLOBAL_HEADER;
if ((codec->capabilities & ffmpeg.AV_CODEC_CAP_TRUNCATED) != 0)
{
codecContext->flags |= ffmpeg.AV_CODEC_FLAG_TRUNCATED;
}
int ret = ffmpeg.avcodec_open2(codecContext, codec, &options);
if (ret < 0)
{
Debug.LogError($"Failed to open codec! Error code: {ret}");
return;
}
formatContext = ffmpeg.avformat_alloc_context();
formatContext->oformat = ffmpeg.av_guess_format(null, OUTPUT_FILE, null);
if (formatContext->oformat == null)
{
Debug.LogError("Failed to guess output format!");
return;
}
ret = ffmpeg.avio_open(&formatContext->pb, OUTPUT_FILE, ffmpeg.AVIO_FLAG_WRITE);
if (ret < 0)
{
Debug.LogError($"Failed to open file '{OUTPUT_FILE}'! Error code: {ret}");
return;
}
stream = ffmpeg.avformat_new_stream(formatContext, codec);
ret = ffmpeg.avcodec_parameters_from_context(stream->codecpar, codecContext);
if (ret < 0)
{
Debug.LogError($"Failed to copy codec parameters! Error code: {ret}");
return;
}
ret = ffmpeg.avformat_write_header(formatContext, &options);
if (ret < 0)
{
Debug.LogError($"Failed to write format header! Error code: {ret}");
return;
}
}
private void OnDestroy()
{
ffmpeg.av_write_trailer(formatContext);
if (codecContext != null)
{
ffmpeg.avcodec_close(codecContext);
ffmpeg.avcodec_free_context(&codecContext);
}
if (formatContext != null)
{
if ((formatContext->oformat->flags & ffmpeg.AVFMT_NOFILE) == 0 && formatContext->pb != null)
{
ffmpeg.avio_close(formatContext->pb);
}
ffmpeg.avformat_free_context(formatContext);
}
}
private void LateUpdate()
{
AVFrame* frame = ffmpeg.av_frame_alloc();
if (frame == null)
{
Debug.LogError("Failed to allocate frame!");
return;
}
ffmpeg.av_image_alloc(frame->data, frame->linesize, codecContext->width, codecContext->height, codecContext->pix_fmt, 16);
int size = Screen.width * Screen.height * 3;
byte[] buffer = new byte[size];
GCHandle handle = GCHandle.Alloc(buffer, GCHandleType.Pinned);
IntPtr ptr = handle.AddrOfPinnedObject();
GL.ReadPixels(0, 0, Screen.width, Screen.height, UnityEngine.GL.RGB, UnityEngine.GL.UNSIGNED_BYTE, ptr);
handle.Free();
for (int i = 0; i < codecContext->height; i++)
{
byte* row = (byte*)frame->data[0] + i * frame->linesize[0];
for (int j = 0; j < codecContext->width; j++)
{
row[3 * j] = buffer[3 * (i * codecContext->width + j) + 2];
row[3 * j + 1] = buffer[3 * (i * codecContext->width + j) + 1];
row[3 * j + 2] = buffer[3 * (i * codecContext->width + j)];
}
}
frame->pts = frameCount++;
ffmpeg.avcodec_send_frame(codecContext, frame);
AVPacket* packet = ffmpeg.av_packet_alloc();
ffmpeg.av_init_packet(packet);
while (ffmpeg.avcodec_receive_packet(codecContext, packet) >= 0)
{
packet->stream_index = stream->index;
packet->pts = packet->dts = frameCount++;
packet->duration = ffmpeg.av_rescale_q(ffmpeg.av_make_q(1, FPS), stream->time_base, formatContext->streams[0]->time_base);
packet->pos = -1;
ffmpeg.av_interleaved_write_frame(formatContext, packet);
ffmpeg.av_packet_unref(packet);
}
ffmpeg.av_frame_free(&frame);
}
}
```
在上述代码中,我们首先定义了一些常量,如帧率、输出文件名等。在 Start() 方法中,我们使用 AVDictionary 来设置 FFmpeg 的编码参数,并打开 H.264 编码器。然后,我们创建了一个 AVFormatContext 对象,并设置输出文件格式和音视频流等参数。接着,我们写入文件头,并进入 LateUpdate() 方法,开始每帧截屏并编码。最后,在 OnDestroy() 方法中,我们关闭编码器并释放相关资源。
请注意,上述代码仅适用于 Windows 平台,并且需要将 FFmpeg 库文件复制到 Unity 项目中,并在项目属性中设置库文件的引用路径。同时,为了避免因为异常退出导致编码器资源无法释放,建议在代码中添加相关的异常处理机制。
使用 C++ 和 FFmpeg创建RTSP 服务器示例
创建RTSP服务器需要以下步骤:
1. 初始化FFmpeg库
2. 打开输入文件或设备
3. 设置视频和音频流的编码器和参数
4. 创建AVFormatContext和AVCodecContext
5. 打开输出网络流
6. 循环读取输入流并编码输出
7. 关闭输入输出流和编码器
8. 释放FFmpeg库
下面是一个使用C++和FFmpeg创建RTSP服务器的示例代码:
```cpp
#include <iostream>
#include <string>
#include <stdexcept>
#include <cstdio>
#include <cstdlib>
#include <cstring>
extern "C" {
#include <libavformat/avformat.h>
}
#define RTSP_ADDRESS "rtsp://127.0.0.1:8554/test"
int main(int argc, char* argv[]) {
AVFormatContext* input_format_ctx = nullptr;
AVCodecContext* input_codec_ctx = nullptr;
AVStream* input_stream = nullptr;
AVFormatContext* output_format_ctx = nullptr;
AVCodecContext* output_codec_ctx = nullptr;
AVStream* output_stream = nullptr;
AVPacket* packet = nullptr;
int ret = 0, video_stream_index = -1, audio_stream_index = -1;
// Initialize FFmpeg library
av_register_all();
avformat_network_init();
// Open input file or device
if ((ret = avformat_open_input(&input_format_ctx, "video.mp4", nullptr, nullptr)) < 0) {
std::cerr << "Failed to open input file: " << av_err2str(ret) << std::endl;
goto end;
}
// Retrieve stream information
if ((ret = avformat_find_stream_info(input_format_ctx, nullptr)) < 0) {
std::cerr << "Failed to retrieve stream information: " << av_err2str(ret) << std::endl;
goto end;
}
// Find video and audio streams
for (int i = 0; i < input_format_ctx->nb_streams; i++) {
AVStream* stream = input_format_ctx->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_stream_index < 0) {
video_stream_index = i;
}
if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_stream_index < 0) {
audio_stream_index = i;
}
}
// Create output format context
if ((ret = avformat_alloc_output_context2(&output_format_ctx, nullptr, "rtsp", RTSP_ADDRESS)) < 0) {
std::cerr << "Failed to create output format context: " << av_err2str(ret) << std::endl;
goto end;
}
// Create output video stream
input_stream = input_format_ctx->streams[video_stream_index];
output_stream = avformat_new_stream(output_format_ctx, input_stream->codecpar->codec);
output_stream->id = video_stream_index;
if ((ret = avcodec_parameters_copy(output_stream->codecpar, input_stream->codecpar)) < 0) {
std::cerr << "Failed to copy codec parameters: " << av_err2str(ret) << std::endl;
goto end;
}
output_codec_ctx = output_stream->codec;
output_codec_ctx->codec_tag = 0;
if ((ret = avcodec_open2(output_codec_ctx, nullptr, nullptr)) < 0) {
std::cerr << "Failed to open codec: " << av_err2str(ret) << std::endl;
goto end;
}
// Open output network stream
if (!(output_format_ctx->flags & AVFMT_NOFILE)) {
if ((ret = avio_open(&output_format_ctx->pb, RTSP_ADDRESS, AVIO_FLAG_WRITE)) < 0) {
std::cerr << "Failed to open output network stream: " << av_err2str(ret) << std::endl;
goto end;
}
}
// Write output stream header
if ((ret = avformat_write_header(output_format_ctx, nullptr)) < 0) {
std::cerr << "Failed to write output stream header: " << av_err2str(ret) << std::endl;
goto end;
}
// Initialize packet for encoding
packet = av_packet_alloc();
if (!packet) {
std::cerr << "Failed to allocate packet" << std::endl;
goto end;
}
// Read input stream and encode output stream
while (true) {
AVStream* in_stream = nullptr;
AVStream* out_stream = nullptr;
if ((ret = av_read_frame(input_format_ctx, packet)) < 0) {
break;
}
in_stream = input_format_ctx->streams[packet->stream_index];
if (packet->stream_index == video_stream_index) {
out_stream = output_format_ctx->streams[packet->stream_index];
packet->pts = av_rescale_q_rnd(packet->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->dts = av_rescale_q_rnd(packet->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
packet->duration = av_rescale_q(packet->duration, in_stream->time_base, out_stream->time_base);
packet->pos = -1;
if ((ret = av_interleaved_write_frame(output_format_ctx, packet)) < 0) {
std::cerr << "Failed to encode video packet: " << av_err2str(ret) << std::endl;
goto end;
}
}
av_packet_unref(packet);
}
// Write output stream trailer
if ((ret = av_write_trailer(output_format_ctx)) < 0) {
std::cerr << "Failed to write output stream trailer: " << av_err2str(ret) << std::endl;
goto end;
}
end:
// Close input and output streams and codecs
if (input_format_ctx) {
avformat_close_input(&input_format_ctx);
}
if (output_format_ctx) {
avio_closep(&output_format_ctx->pb);
avformat_free_context(output_format_ctx);
}
if (packet) {
av_packet_free(&packet);
}
avformat_network_deinit();
avcodec_free_context(&input_codec_ctx);
avcodec_free_context(&output_codec_ctx);
return ret;
}
```
这个示例代码使用了一个名为"video.mp4"的文件作为输入文件,并将视频流编码为RTSP输出流。在实际使用中,您需要根据您的需求更改输入源和输出地址。
阅读全文