簡體   English   中英

解碼MediaRecorder產生的webm流

[英]Decoding MediaRecorder produced webm stream

我正在嘗試使用ffmpeg API從瀏覽器解碼視頻流。 該流是由網絡攝像頭生成的,並以Mediam格式記錄為MediaRecorder。 我最終需要的是opencv cv :: Mat對象的向量,以進行進一步處理。

我已經使用uWebsocket庫編寫了C ++網絡服務器。 視頻流每秒通過websocket從瀏覽器發送到服務器。 在服務器上,我將接收到的數據附加到我的自定義緩沖區中,並使用ffmpeg API對其進行解碼。

如果僅將數據保存在磁盤上,以后再使用媒體播放器播放,則可以正常工作。 因此,瀏覽器發送的都是有效的視頻。

我認為我沒有正確理解自定義IO在網絡流傳輸中應如何表現,因為似乎沒有任何效果。

自定義緩沖區:

 struct Buffer
    {
        std::vector<uint8_t> data;
        int currentPos = 0;
    };

自定義IO的readAVBuffer方法


int MediaDecoder::readAVBuffer(void* opaque, uint8_t* buf, int buf_size)
{
    MediaDecoder::Buffer* mbuf = (MediaDecoder::Buffer*)opaque;
    int count = 0;
    for(int i=0;i<buf_size;i++)
    {
        int index = i + mbuf->currentPos;
        if(index >= (int)mbuf->data.size())
        {
            break;
        }
        count++;
        buf[i] = mbuf->data.at(index);
    }
    if(count > 0) mbuf->currentPos+=count;

    std::cout << "read : "<<count<<" "<<mbuf->currentPos<<", buff size:"<<mbuf->data.size() << std::endl;
    if(count <= 0) return AVERROR(EAGAIN); //is this error that should be returned? It cannot be EOF since we're not done yet, most likely
    return count;
}

大的解碼方法,應該返回它可以讀取的任何幀


std::vector<cv::Mat> MediaDecoder::decode(const char* data, size_t length)
{
    std::vector<cv::Mat> frames;
    //add data to the buffer
    for(size_t i=0;i<length;i++) {
        buf.data.push_back(data[i]);
    }

    //do not invoke the decoders until we have 1MB of data
    if(((buf.data.size() - buf.currentPos) < 1*1024*1024) && !initializedCodecs) return frames;

    std::cout << "decoding data length "<<length<<std::endl;

    if(!initializedCodecs) //initialize ffmpeg objects. Custom I/O, format, decoder, etc.
    {       
        //these are just members of the class 
        avioCtxPtr = std::unique_ptr<AVIOContext,avio_context_deleter>(
                    avio_alloc_context((uint8_t*)av_malloc(4096),4096,0,&buf,&readAVBuffer,nullptr,nullptr),
                    avio_context_deleter());
        if(!avioCtxPtr)
        {
            std::cerr << "Could not create IO buffer" << std::endl;
            return frames;
        }                

        fmt_ctx = std::unique_ptr<AVFormatContext,avformat_context_deleter>(avformat_alloc_context(),
                                                                          avformat_context_deleter());
        fmt_ctx->pb = avioCtxPtr.get();
        fmt_ctx->flags |= AVFMT_FLAG_CUSTOM_IO ;
        //fmt_ctx->max_analyze_duration = 2 * AV_TIME_BASE; // read 2 seconds of data
        {
            AVFormatContext *fmtCtxRaw = fmt_ctx.get();            
            if (avformat_open_input(&fmtCtxRaw, "", nullptr, nullptr) < 0) {
                std::cerr << "Could not open movie" << std::endl;
                return frames;
            }
        }
        if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) {
            std::cerr << "Could not find stream information" << std::endl;
            return frames;
        }
        if((video_stream_idx = av_find_best_stream(fmt_ctx.get(), AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0)) < 0)
        {
            std::cerr << "Could not find video stream" << std::endl;
            return frames;
        }
        AVStream *video_stream = fmt_ctx->streams[video_stream_idx];
        AVCodec *dec = avcodec_find_decoder(video_stream->codecpar->codec_id);

        video_dec_ctx = std::unique_ptr<AVCodecContext,avcodec_context_deleter> (avcodec_alloc_context3(dec),
                                                                              avcodec_context_deleter());
        if (!video_dec_ctx)
        {
            std::cerr << "Failed to allocate the video codec context" << std::endl;
            return frames;
        }
        avcodec_parameters_to_context(video_dec_ctx.get(),video_stream->codecpar);
        video_dec_ctx->thread_count = 1;
       /* video_dec_ctx->max_b_frames = 0;
        video_dec_ctx->frame_skip_threshold = 10;*/

        AVDictionary *opts = nullptr;
        av_dict_set(&opts, "refcounted_frames", "1", 0);
        av_dict_set(&opts, "deadline", "1", 0);
        av_dict_set(&opts, "auto-alt-ref", "0", 0);
        av_dict_set(&opts, "lag-in-frames", "1", 0);
        av_dict_set(&opts, "rc_lookahead", "1", 0);
        av_dict_set(&opts, "drop_frame", "1", 0);
        av_dict_set(&opts, "error-resilient", "1", 0);

        int width = video_dec_ctx->width;
        videoHeight = video_dec_ctx->height;

        if(avcodec_open2(video_dec_ctx.get(), dec, &opts) < 0)
        {
            std::cerr << "Failed to open the video codec context" << std::endl;
            return frames;
        }

        AVPixelFormat  pFormat = AV_PIX_FMT_BGR24;
        img_convert_ctx = std::unique_ptr<SwsContext,swscontext_deleter>(sws_getContext(width, videoHeight,
                                         video_dec_ctx->pix_fmt,   width, videoHeight, pFormat,
                                         SWS_BICUBIC, nullptr, nullptr,nullptr),swscontext_deleter());

        frame = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());
        frameRGB = std::unique_ptr<AVFrame,avframe_deleter>(av_frame_alloc(),avframe_deleter());


        int numBytes = av_image_get_buffer_size(pFormat, width, videoHeight,32 /*https://stackoverflow.com/questions/35678041/what-is-linesize-alignment-meaning*/);
        std::unique_ptr<uint8_t,avbuffer_deleter> imageBuffer((uint8_t *) av_malloc(numBytes*sizeof(uint8_t)),avbuffer_deleter());
        av_image_fill_arrays(frameRGB->data,frameRGB->linesize,imageBuffer.get(),pFormat,width,videoHeight,32);
        frameRGB->width = width;
        frameRGB->height = videoHeight;

        initializedCodecs = true;
    }    
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = nullptr;
    pkt.size = 0;

    int read_frame_return = 0;
    while ( (read_frame_return=av_read_frame(fmt_ctx.get(), &pkt)) >= 0)
    {
        readFrame(&frames,&pkt,video_dec_ctx.get(),frame.get(),img_convert_ctx.get(),
                  videoHeight,frameRGB.get());
        //if(cancelled) break;
    }
    avioCtxPtr->eof_reached = 0;
    avioCtxPtr->error = 0;


    //flush
   // readFrame(frames.get(),nullptr,video_dec_ctx.get(),frame.get(),
     //         img_convert_ctx.get(),videoHeight,frameRGB.get());

    avioCtxPtr->eof_reached = 0;
    avioCtxPtr->error = 0;

    if(frames->size() <= 0)
    {
        std::cout << "buffer pos: "<<buf.currentPos<<", buff size:"<<buf.data.size()
                  <<",read_frame_return:"<<read_frame_return<< std::endl;
    }

    return frames;
}


我期望發生的事情是隨着我不斷獲取cv :: Mat幀而不斷提取數據。 實際發生的情況是在完全讀取緩沖區之后,我看到:

[matroska,webm @ 0x507b450] Read error at pos. 1278266 (0x13813a)
[matroska,webm @ 0x507b450] Seek to desired resync point failed. Seeking to earliest point available instead.

然后,即使以后再增加它的大小,也不會從緩沖區讀取更多的字節。

我在這里做錯了什么,我不明白。

我最終要做的是在另一個線程中讀取傳入數據並進行實際解碼。 但是,如果沒有更多的可用字節,則read方法將阻塞,直到出現任何問題為止。

當新的字節到達時,它們被添加到緩沖區中,而conditional_variable發出信號通知正在等待的線程喚醒並再次開始從緩沖區中讀取數據。

它足夠好用。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM