[英]How to write silent audio data to an audio stream?
我正在向AVStream寫一些圖像,然后我正在讀一個mp3文件並將其寫入不同的AVStream。 問題是音頻流比視頻流短一點,所以如果我添加更多圖像和另一個音頻文件,音頻就不再與視頻同步了。 所以我的想法是在將另一個音頻文件寫入音頻流之前將靜音音頻數據寫入音頻流。 但我無法弄清楚如何將靜音數據寫入音頻流。
我發現這篇文章,但我不知道如何計算數據包大小或如何將數據包寫入音頻流。
到目前為止,這是我最“成功”的方法,但結果(audioTest(0xff).mp4)遠非沉默。
/* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}
/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}
silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;
/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}
int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);
ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}
for (int j = 0; j < data->audioCodecContext->frame_size; j++)
{
silentFrame->data[0][2 * j] = 0xff;
for (int k = 1; k < data->audioCodecContext->channels; k++)
{
silentFrame->data[0][2 * j + k] = silentFrame->data[0][2 * j];
}
}
int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);
ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);
if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}
silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;
ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}
if (got_output)
{
pkt.stream_index = data->audioStream->index;
if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}
libffmpeg::av_packet_unref(&pkt);
}
}
libffmpeg::av_frame_free(&silentFrame);
錯誤是我如何寫入數組。 我不習慣c ++,所以我的解決方案可能有點亂,但至少它現在有效。
/* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}
/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}
silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;
/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}
libffmpeg::AVPacket* pkt = libffmpeg::av_packet_alloc();
if (!pkt)
{
Helper::ThrowError("could not allocate the packet.");
}
void* buffer = malloc(data->audioCodecContext->frame_size * data->audioCodecContext->channels * 16);
for (int i = 0; i < data->audioCodecContext->frame_size * data->audioCodecContext->channels * 2; i++)
{
*((int*)buffer + i) = 0x0;
}
int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);
ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}
silentFrame->data[0] = (libffmpeg::uint8_t*) buffer;
int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);
ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);
if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}
silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;
ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}
if (got_output)
{
pkt.stream_index = data->audioStream->index;
if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}
libffmpeg::av_packet_unref(&pkt);
}
}
free(buffer);
libffmpeg::av_frame_free(&silentFrame);
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.