
Actualmente estoy usando ffmpeg 6.0 para encapsular transmisiones sin formato h264 en formatos mp4, flv o rtmp. Cuando la tasa de bits supera los 1024 kb/s, se produce un fenómeno de mosaico en la parte inferior de la imagen final. Sospecho que ffmpeg puede tener algunas configuraciones de parámetros de búfer que hacen que la imagen se trunque. El código que escribí es el siguiente:
avformat_alloc_output_context2(&m_outputFormatContext, NULL, "flv", m_outputUrl.c_str()); const AVOutputFormat* ofmt = m_outputFormatContext->oformat;
AVStream* outStream = avformat_new_stream(m_outputFormatContext, NULL);
outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
outStream->codecpar->codec_id = AV_CODEC_ID_H264;
outStream->codecpar->width = track.width;
outStream->codecpar->height = track.height;
outStream->codecpar->profile = track.profile;
outStream->codecpar->codec_tag = 0;
outStream->codecpar->level = track.level;
if(track.profile<100){
outStream->codecpar->format = AV_PIX_FMT_YUV420P;
}else{
outStream->codecpar->format = AV_PIX_FMT_YUV444P;
}
int spsPpsLen = track.params1.size() + track.params2.size();
outStream->codecpar->extradata_size = spsPpsLen;
outStream->codecpar->extradata = (uint8_t*)av_malloc(spsPpsLen + AV_INPUT_BUFFER_PADDING_SIZE);
memcpy(outStream->codecpar->extradata, track.params1.data(), track.params1.size());
memcpy(outStream->codecpar->extradata+ track.params1.size(), track.params2.data(), track.params2.size());
AVDictionary *formatOpts = NULL;
av_dict_set(&formatOpts, "rw_timeout", "500000", 0);
ret = avio_open2(&m_outputFormatContext->pb, m_outputUrl.c_str() , AVIO_FLAG_WRITE , NULL , &formatOpts);
ret = avformat_write_header(m_outputFormatContext, NULL);
av_interleaved_write_frame(m_outputFormatContext, packet);//write video packet