繁体   English   中英

具有H264编解码器的libav错误消息。 “非严格单调的PTS”

[英]libav error message with H264 codec. “non-strictly-monotonic PTS”

我对libav / FFMPEG经验几乎为零。 我写了这段捕获屏幕并将其写入文件的代码,与此同时,我也遇到了一些问题。 首先,我使用的是AV_CODEC_ID_MPEG4编解码器,它工作正常,但是非常奇怪的应用程序开始对此类垃圾邮件进行处理

[dshow @ 02da1c80] real-time buffer [screen-capture-recorder] [video input] too full or near too full (64% of size: 128000000 [rtbufsize parameter])! frame dropped!

因此,我搜索了一段时间,发现可能编码器太慢,因此我需要将其更改为更快的编码器。 所以我将其更改为AV_CODEC_ID_H264 突然写入的文件变得不可读,并且应用程序开始发送垃圾邮件

[libx264 @ 0455ff40] non-strictly-monotonic PTS

我到处看,发现所有的建议只是把这两行

if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);

所以我添加了它们,结果是一样的。

所以我该怎么做? 如何正确配置输出?

这是我的代码:

#include "MainWindow.h"

#include <QGuiApplication>
#include <QLabel>
#include <QScreen>
#include <QTimer>
#include <QLayout>
#include <QImage>
#include <QtConcurrent/QtConcurrent>
#include <QThreadPool>
#include <QVideoFrame>

#include "ScreenCapture.h"

MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
    resize(800, 600);

    label = new QLabel();
    label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);

    auto layout = new QHBoxLayout();
    layout->addWidget(label);

    auto widget = new QWidget();
    widget->setLayout(layout);
    setCentralWidget(widget);

    connect(this, &MainWindow::imageReady, [=](QImage image) {label->setPixmap(QPixmap::fromImage(image).scaled(label->size(), Qt::KeepAspectRatio));});

    init();
    initOutFile();
    collectFrames();
}

MainWindow::~MainWindow()
{
    isRunning = false;

    QThreadPool::globalInstance()->waitForDone();

    avformat_close_input(&inputFormatContext);
    avformat_free_context(inputFormatContext);
}

void MainWindow::init()
{
    av_register_all();
    avcodec_register_all();
    avdevice_register_all();

    auto screen = QGuiApplication::screens()[0];
    QRect geometry = screen->geometry();

    inputFormatContext = avformat_alloc_context();

//    AVDictionary* options = NULL;
//    av_dict_set(&options, "framerate", "30", NULL);
//    av_dict_set(&options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
//    av_dict_set(&options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
//    av_dict_set(&options, "preset", "ultrafast", NULL);
//    av_dict_set(&options, "probesize", "10MB", NULL);
//    av_dict_set(&options, "pix_fmt", "yuv420p", NULL);
//    av_dict_set(&options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);

//    AVInputFormat* inputFormat = av_find_input_format("gdigrab");
//    avformat_open_input(&inputFormatContext, "desktop", inputFormat, &options);

    QSettings settings("HKEY_CURRENT_USER\\Software\\screen-capture-recorder", QSettings::NativeFormat);
    settings.setValue("start_x", geometry.x());
    settings.setValue("start_y", geometry.y());
    settings.setValue("capture_width", geometry.width());
    settings.setValue("capture_height", geometry.height());

    AVDictionary* options = NULL;
    av_dict_set(&options, "preset", "ultrafast", NULL);
    av_dict_set(&options, "vcodec", "h264", NULL);
    av_dict_set(&options, "video_size", "1920x1080", NULL);
    av_dict_set(&options, "crf", "0", NULL);
    av_dict_set(&options, "tune", "zerolatency", NULL);
    av_dict_set(&options, "rtbufsize", "128M", NULL);

    AVInputFormat *format = av_find_input_format("dshow");
    avformat_open_input(&inputFormatContext, "video=screen-capture-recorder", format, &options);

    av_dict_free(&options);
    avformat_find_stream_info(inputFormatContext, NULL);

    videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

    AVStream* inStream = inputFormatContext->streams[videoStreamIndex];

    inputCodec = avcodec_find_decoder(inStream->codecpar->codec_id);
    if(!inputCodec) qDebug() << "Can't find input codec!";

    inputCodecContext = avcodec_alloc_context3(inputCodec);

    qDebug() << "IN_FORMAT" << av_get_pix_fmt_name(inStream->codec->pix_fmt);

    avcodec_parameters_to_context(inputCodecContext, inStream->codecpar);

    if(avcodec_open2(inputCodecContext, inputCodec, NULL)) qDebug() << "Can't open input codec!";
}

void MainWindow::initOutFile()
{
    const char* filename = "C:/Temp/output.mp4";

    if(avformat_alloc_output_context2(&outFormatContext, NULL, NULL, filename) < 0) qDebug() << "Can't create out context!";

    outCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if(!outCodec) qDebug() << "Can't find codec!";

    videoStream = avformat_new_stream(outFormatContext, outCodec);
    videoStream->time_base = {1, 30};

    const AVPixelFormat* pixelFormat = outCodec->pix_fmts;
    while (*pixelFormat != AV_PIX_FMT_NONE)
    {
        qDebug() << "OUT_FORMAT" << av_get_pix_fmt_name(*pixelFormat);
        ++pixelFormat;
    }

    outCodecContext = videoStream->codec;
    outCodecContext->bit_rate = 16000000;
    outCodecContext->rc_max_rate = 0;
    outCodecContext->rc_buffer_size = 0;
    outCodecContext->qmin = 10;
    outCodecContext->qmax = 51;
    outCodecContext->qcompress = 0.6f;
    outCodecContext->width = inputCodecContext->width;
    outCodecContext->height = inputCodecContext->height;
    outCodecContext->time_base = videoStream->time_base;
    outCodecContext->gop_size = 10;
    outCodecContext->max_b_frames = 1;
    outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

    if (outFormatContext->oformat->flags & AVFMT_GLOBALHEADER) outCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if(avcodec_open2(outCodecContext, outCodec, NULL)) qDebug() << "Can't open out codec!";

    swsContext = sws_getContext(inputCodecContext->width,
                                inputCodecContext->height,
                                inputCodecContext->pix_fmt,
                                outCodecContext->width,
                                outCodecContext->height,
                                outCodecContext->pix_fmt,
                                SWS_BICUBIC, NULL, NULL, NULL);

    if(avio_open(&outFormatContext->pb, filename, AVIO_FLAG_WRITE) < 0) qDebug() << "Can't open file!";
    if(avformat_write_header(outFormatContext, NULL) < 0) qDebug() << "Can't write header!";
}

void MainWindow::collectFrames()
{
    QtConcurrent::run([this](){

        AVFrame* inFrame = av_frame_alloc();
        inFrame->format = inputCodecContext->pix_fmt;
        inFrame->width = inputCodecContext->width;
        inFrame->height = inputCodecContext->height;

        int size = av_image_alloc(inFrame->data, inFrame->linesize, inFrame->width, inFrame->height, inputCodecContext->pix_fmt, 1);

        AVFrame* outFrame = av_frame_alloc();
        outFrame->format = outCodecContext->pix_fmt;
        outFrame->width = outCodecContext->width;
        outFrame->height = outCodecContext->height;

        av_image_alloc(outFrame->data, outFrame->linesize, outFrame->width, outFrame->height, outCodecContext->pix_fmt, 1);

        AVPacket packet;
        av_init_packet(&packet);

        while(isRunning && (av_read_frame(inputFormatContext, &packet) >= 0))
        {
            if(packet.stream_index == videoStream->index)
            {
                //for gdigrab
//                uint8_t* result = new uint8_t[inFrame->width * inFrame->height * 4];
//                for (int i = 0; i < inFrame->height * inFrame->width * 4; i += 4)
//                {
//                    result[i + 0] = packet.data[i + 2]; //B
//                    result[i + 1] = packet.data[i + 3]; //G
//                    result[i + 2] = packet.data[i + 0]; //R
//                    result[i + 3] = packet.data[i + 1]; //A
//                }

//                memcpy(inFrame->data[0], result, size);
//                delete result;

                QImage image(packet.data, inFrame->width, inFrame->height, QImage::Format_ARGB32);
                QImage mirrored = image.mirrored(false, true);
                emit imageReady(mirrored);

                memcpy(inFrame->data[0], mirrored.bits(), size);

                sws_scale(swsContext, inFrame->data, inFrame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);

                av_packet_unref(&packet);

                AVPacket outPacket;
                av_init_packet(&outPacket);

                int encodeResult = AVERROR(EAGAIN);
                while(encodeResult == AVERROR(EAGAIN))
                {
                    if(avcodec_send_frame(outCodecContext, outFrame)) qDebug() << "Send frame error!";

                    encodeResult = avcodec_receive_packet(outCodecContext, &outPacket);
                }
                if(encodeResult != 0) qDebug() << "Encoding error!" << encodeResult;

                if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
                if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);

                av_interleaved_write_frame(outFormatContext, &outPacket);

                av_packet_unref(&outPacket);
            }
        }

        av_freep(inFrame->data);
        av_freep(outFrame->data);

        av_write_trailer(outFormatContext);
        avio_close(outFormatContext->pb);
    });

}

您需要在调用send_frame之前设置outFrame的pts。

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM