前语
运用FFmpeg 对一个 Mp4 文件的音频流进行解码,然后运用 libswresample 将解码后的 PCM 音频数据转换为目标格局的数据,终究运用 OpenSLES 进行播映。
FFmpeg 音频解码
前面我们已经对视频解码流程进行了详细的介绍,一个多媒体文件(Mp4)一般包括一个音频流和一个视频流,而FFmpeg 对音频流和视频流的解码流程共同。因而,本节将不再对音频解码流程进行赘述。
类似于视频流的处理,音频流的处理流程为:(Mp4文件)解协议->解封装->音频解码->重采样->播映。
这里面有重复说到重采样,类似于视频图画的转码,因为显现器终究显现的是 RGB 数据,这个一点比较好理解,那么为什么要对解码的音频数据进行重采样呢?
一般采集音频时会有多种采样率可以挑选,当该采样率与音频设备驱动的固定采样率不符时,就会导致变声或者音频呈现快放慢放效果。
此时就需要用到重采样来确保音频采样率和设备驱动采样率共同,使音频正确播映。
运用 libswresample 库将对音频进行重采样,有如下几个过程:
//1. 生成 resample 上下文,设置输入和输出的通道数、采样率以及采样格局,初始化上下文
m_SwrContext = swr_alloc();
av_opt_set_int(m_SwrContext, "in_channel_layout", codeCtx->channel_layout, 0);
av_opt_set_int(m_SwrContext, "out_channel_layout", AUDIO_DST_CHANNEL_LAYOUT, 0);
av_opt_set_int(m_SwrContext, "in_sample_rate", codeCtx->sample_rate, 0);
av_opt_set_int(m_SwrContext, "out_sample_rate", AUDIO_DST_SAMPLE_RATE, 0);
av_opt_set_sample_fmt(m_SwrContext, "in_sample_fmt", codeCtx->sample_fmt, 0);
av_opt_set_sample_fmt(m_SwrContext, "out_sample_fmt", DST_SAMPLT_FORMAT, 0);
swr_init(m_SwrContext);
//2. 申请输出 Buffer
m_nbSamples = (int)av_rescale_rnd(NB_SAMPLES, AUDIO_DST_SAMPLE_RATE, codeCtx->sample_rate, AV_ROUND_UP);
m_BufferSize = av_samples_get_buffer_size(NULL, AUDIO_DST_CHANNEL_COUNTS,m_nbSamples, DST_SAMPLT_FORMAT, 1);
m_AudioOutBuffer = (uint8_t *) malloc(m_BufferSize);
//3. 重采样,frame 为解码帧
int result = swr_convert(m_SwrContext, &m_AudioOutBuffer, m_BufferSize / 2, (const uint8_t **) frame->data, frame->nb_samples);
if (result > 0 ) {
//play
}
//4. 释放资源
if(m_AudioOutBuffer) {
free(m_AudioOutBuffer);
m_AudioOutBuffer = nullptr;
}
if(m_SwrContext) {
swr_free(&m_SwrContext);
m_SwrContext = nullptr;
}
OpenSLES 播映音频
OpenSL ES 全称为:Open Sound Library for Embedded Systems,是一个针对嵌入式体系的敞开硬件音频加速库,支撑音频的采集和播映,它供给了一套高功能、低延迟的音频功用完成办法。
而且完成了软硬件音频功能的跨渠道部署,大大降低了上层处理音频运用的开发难度。
OpenSL ES 是基于 c 语言完成的,但其供给的接口是选用面向目标的方法完成,OpenSL ES 的大多数 API 是经过目标来调用的。
Object 和 Interface OpenSL ES 中的两大根本概念,可以类比为 Java 中的目标和接口。在 OpenSL ES 中, 每个 Object 可以存在一系列的 Interface ,而且为每个目标都供给了一系列的根本操作,如 Realize,GetState,Destroy 等。
重要的一点,只有经过 GetInterface 办法拿到 Object 的 Interface ,才干运用 Object 供给的功用。
Audio 引擎目标和接口
Audio 引擎目标和接口,即 Engine Object 和 SLEngineItf Interface 。Engine Object 的主要功用是办理 Audio Engine 的生命周期,供给引擎目标的办理接口。引擎目标的运用办法如下:
SLresult result;
// 创立引擎目标
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// 实例化
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// 获取引擎目标接口
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
assert(SL_RESULT_SUCCESS == result);
(void)result;
// 释放引擎目标的资源
result = (*engineObject)->Destroy(engineObject, SL_BOOLEAN_FALSE);
assert(SL_RESULT_SUCCESS == result);
(void)result;
SLRecordItf 和 SLPlayItf
SLRecordItf 和 SLPlayItf 别离抽象多媒体功用 recorder 和 player ,经过 SLEngineItf 的 CreateAudioPlayer 和 CreateAudioRecorder 办法别离创立 player 和 recorder 目标实例。
// 创立 audio recorder 目标
result = (*engineEngine)->CreateAudioRecorder(engineEngine, &recorderObject , &recSource, &dataSink,
NUM_RECORDER_EXPLICIT_INTERFACES, iids, required);
// 创立 audio player 目标
SLresult result = (*engineEngine)->CreateAudioPlayer(
engineEngine,
&audioPlayerObject,
&dataSource,
&dataSink,
1,
interfaceIDs,
requiredInterfaces
);
SLDataSource 和 SLDataSink
OpenSL ES 中的 SLDataSource 和 SLDataSink 结构体,主要用于构建 audio player 和 recorder 目标,其中 SLDataSource 表明音频数据来源的信息,SLDataSink 表明音频数据输出信息。
// 数据源简略缓冲队列定位器
SLDataLocator_AndroidSimpleBufferQueue dataSou
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEU
1
};
// PCM 数据源格局
SLDataFormat_PCM dataSourceFormat = {
SL_DATAFORMAT_PCM, // 格局类型
wav_get_channels(wav), // 通道数
wav_get_rate(wav) * 1000, //采样率
wav_get_bits(wav), // 位宽
wav_get_bits(wav),
SL_SPEAKER_FRONT_CENTER, // 通道屏蔽
SL_BYTEORDER_LITTLEENDIAN // 字节次序(巨细端序)
};
// 数据源
SLDataSource dataSource = {
&dataSourceLocator,
&dataSourceFormat
};
// 针对数据接收器的输出混合定位器(混音器)
SLDataLocator_OutputMix dataSinkLocator = {
SL_DATALOCATOR_OUTPUTMIX, // 定位器类型
outputMixObject // 输出混合
};
// 输出
SLDataSink dataSink = {
&dataSinkLocator, // 定位器
0,
};
OpenSL ES Recorder 和 Player 功用构建
音频录制流程图
音频播映流程图
Audio Player 的 Data Source 也可所以本地存储或缓存的音频数据,以上图片来自于 Jhuster 的博客。
因为本文只介绍音频的解码播映,下面的代码仅展示 OpenSLES Audio Player 播映音频的过程。
//OpenSLES 渲染器初始化
void OpenSLRender::Init() {
LOGCATE("OpenSLRender::Init");
int result = -1;
do {
//创立并初始化引擎目标
result = CreateEngine();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateEngine fail. result=%d", result);
break;
}
//创立并初始化混音器
result = CreateOutputMixer();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateOutputMixer fail. result=%d", result);
break;
}
//创立并初始化播映器
result = CreateAudioPlayer();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateAudioPlayer fail. result=%d", result);
break;
}
//设置播映状态
(*m_AudioPlayerPlay)->SetPlayState(m_AudioPlayerPlay, SL_PLAYSTATE_PLAYING);
//激活回调接口
AudioPlayerCallback(m_BufferQueue, this);
} while (false);
if(result != SL_RESULT_SUCCESS) {
LOGCATE("OpenSLRender::Init fail. result=%d", result);
UnInit();
}
}
int OpenSLRender::CreateEngine() {
SLresult result = SL_RESULT_SUCCESS;
do {
result = slCreateEngine(&m_EngineObj, 0, nullptr, 0, nullptr, nullptr);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine slCreateEngine fail. result=%d", result);
break;
}
result = (*m_EngineObj)->Realize(m_EngineObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine Realize fail. result=%d", result);
break;
}
result = (*m_EngineObj)->GetInterface(m_EngineObj, SL_IID_ENGINE, &m_EngineEngine);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine GetInterface fail. result=%d", result);
break;
}
} while (false);
return result;
}
int OpenSLRender::CreateOutputMixer() {
SLresult result = SL_RESULT_SUCCESS;
do {
const SLInterfaceID mids[1] = {SL_IID_ENVIRONMENTALREVERB};
const SLboolean mreq[1] = {SL_BOOLEAN_FALSE};
result = (*m_EngineEngine)->CreateOutputMix(m_EngineEngine, &m_OutputMixObj, 1, mids, mreq);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateOutputMixer CreateOutputMix fail. result=%d", result);
break;
}
result = (*m_OutputMixObj)->Realize(m_OutputMixObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateOutputMixer CreateOutputMix fail. result=%d", result);
break;
}
} while (false);
return result;
}
int OpenSLRender::CreateAudioPlayer() {
SLDataLocator_AndroidSimpleBufferQueue android_queue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM pcm = {
SL_DATAFORMAT_PCM,//format type
(SLuint32)2,//channel count
SL_SAMPLINGRATE_44_1,//44100hz
SL_PCMSAMPLEFORMAT_FIXED_16,// bits per sample
SL_PCMSAMPLEFORMAT_FIXED_16,// container size
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,// channel mask
SL_BYTEORDER_LITTLEENDIAN // endianness
};
SLDataSource slDataSource = {&android_queue, &pcm};
SLDataLocator_OutputMix outputMix = {SL_DATALOCATOR_OUTPUTMIX, m_OutputMixObj};
SLDataSink slDataSink = {&outputMix, nullptr};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
SLresult result;
do {
result = (*m_EngineEngine)->CreateAudioPlayer(m_EngineEngine, &m_AudioPlayerObj, &slDataSource, &slDataSink, 3, ids, req);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer CreateAudioPlayer fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->Realize(m_AudioPlayerObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer Realize fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_PLAY, &m_AudioPlayerPlay);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_BUFFERQUEUE, &m_BufferQueue);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
result = (*m_BufferQueue)->RegisterCallback(m_BufferQueue, AudioPlayerCallback, this);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer RegisterCallback fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_VOLUME, &m_AudioPlayerVolume);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
} while (false);
return result;
}
//播映器的 callback
void OpenSLRender::AudioPlayerCallback(SLAndroidSimpleBufferQueueItf bufferQueue, void *context) {
OpenSLRender *openSlRender = static_cast<OpenSLRender *>(context);
openSlRender->HandleAudioFrameQueue();
}
void OpenSLRender::HandleAudioFrameQueue() {
LOGCATE("OpenSLRender::HandleAudioFrameQueue QueueSize=%d", m_AudioFrameQueue.size());
if (m_AudioPlayerPlay == nullptr) return;
//播映存放在音频帧队列中的数据
AudioFrame *audioFrame = m_AudioFrameQueue.front();
if (nullptr != audioFrame && m_AudioPlayerPlay) {
SLresult result = (*m_BufferQueue)->Enqueue(m_BufferQueue, audioFrame->data, (SLuint32) audioFrame->dataSize);
if (result == SL_RESULT_SUCCESS) {
m_AudioFrameQueue.pop();
delete audioFrame;
}
}
}
编译好的FFmpeg下载地址
FFmpeg_4.3.2支撑Android的音视频处理库-Android文档类资源-CSDN文库
重视大众号:Android老皮
解锁 《Android十大板块文档》 ,让学习更靠近未来实战。已形成PDF版
内容如下:
1.Android车载运用开发体系学习指南(附项目实战)
2.Android Framework学习指南,助力成为体系级开发高手
3.2023最新Android中高档面试题汇总+解析,离别零offer
4.企业级Android音视频开发学习道路+项目实战(附源码)
5.Android Jetpack从入门到通晓,构建高质量UI界面
6.Flutter技术解析与实战,跨渠道首要之选
7.Kotlin从入门到实战,全方面提高架构基础
8.高档Android插件化与组件化(含实战教程和源码)
9.Android 功能优化实战+360全方面功能调优
10.Android零基础入门到通晓,高手进阶之路
敲代码不易,重视一下吧。ღ( ・ᴗ・` )