FFmpeg: Use refcounted buffers for frame data

This commit is contained in:
Vicki Pfau 2021-08-21 18:27:06 -07:00
parent 2ade2f9217
commit c18bc1baa5
1 changed files with 5 additions and 8 deletions

View File

@ -318,6 +318,7 @@ bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->nb_samples = encoder->audio->frame_size;
encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->format = encoder->audio->sample_fmt;
encoder->audioFrame->pts = 0; encoder->audioFrame->pts = 0;
encoder->audioFrame->channel_layout = AV_CH_LAYOUT_STEREO;
#ifdef USE_LIBAVRESAMPLE #ifdef USE_LIBAVRESAMPLE
encoder->resampleContext = avresample_alloc_context(); encoder->resampleContext = avresample_alloc_context();
av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
@ -334,9 +335,7 @@ bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
#endif #endif
encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4;
encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->audioBuffer = av_malloc(encoder->audioBufferSize);
encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); av_frame_get_buffer(encoder->audioFrame, 0);
encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize);
avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0);
if (encoder->audio->codec->id == AV_CODEC_ID_AAC && if (encoder->audio->codec->id == AV_CODEC_ID_AAC &&
(strcasecmp(encoder->containerFormat, "mp4") == 0|| (strcasecmp(encoder->containerFormat, "mp4") == 0||
@ -505,7 +504,7 @@ bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) {
encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->height = encoder->video->height;
encoder->videoFrame->pts = 0; encoder->videoFrame->pts = 0;
_ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight);
av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->videoFrame->width, encoder->videoFrame->height, encoder->videoFrame->format, 32); av_frame_get_buffer(encoder->videoFrame, 32);
#ifdef FFMPEG_USE_CODECPAR #ifdef FFMPEG_USE_CODECPAR
avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video);
#endif #endif
@ -603,7 +602,6 @@ void FFmpegEncoderClose(struct FFmpegEncoder* encoder) {
} }
if (encoder->videoFrame) { if (encoder->videoFrame) {
av_freep(encoder->videoFrame->data);
av_frame_free(&encoder->videoFrame); av_frame_free(&encoder->videoFrame);
} }
@ -668,7 +666,6 @@ void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right
return; return;
} }
int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
encoder->currentAudioSample = 0; encoder->currentAudioSample = 0;
#ifdef USE_LIBAVRESAMPLE #ifdef USE_LIBAVRESAMPLE
avresample_convert(encoder->resampleContext, 0, 0, 0, avresample_convert(encoder->resampleContext, 0, 0, 0,
@ -678,14 +675,14 @@ void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right
return; return;
} }
av_frame_make_writable(encoder->audioFrame); av_frame_make_writable(encoder->audioFrame);
int samples = avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize); int samples = avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->audioFrame->nb_samples);
#else #else
av_frame_make_writable(encoder->audioFrame); av_frame_make_writable(encoder->audioFrame);
if (swr_get_out_samples(encoder->resampleContext, 1) < encoder->audioFrame->nb_samples) { if (swr_get_out_samples(encoder->resampleContext, 1) < encoder->audioFrame->nb_samples) {
swr_convert(encoder->resampleContext, NULL, 0, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4); swr_convert(encoder->resampleContext, NULL, 0, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
return; return;
} }
int samples = swr_convert(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize, int samples = swr_convert(encoder->resampleContext, encoder->audioFrame->data, encoder->audioFrame->nb_samples,
(const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4); (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
#endif #endif