#include <unistd.h> #include <assert.h> #include <android/log.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <SLES/OpenSLES.h> #include <SLES/OpenSLES_Android.h> #define LOG_TAG "FFMPEG_WZP_AUDIO" #define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__) //用于保存ffmpeg变的信息 struct ffmpeg_info_t{ AVFormatContext *fmt_ctx; AVCodecContext *dec_ctx; int audio_stream_index ; }ffmpeg_info; struct openslgs_info_t{ // engine interfaces SLObjectItf engineObject ; SLEngineItf engineEngine; // output mix interfaces SLObjectItf outputMixObject ; SLEnvironmentalReverbItf outputMixEnvironmentalReverb ; // buffer queue player interfaces SLObjectItf bqPlayerObject ; SLPlayItf bqPlayerPlay; SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue; SLEffectSendItf bqPlayerEffectSend; SLMuteSoloItf bqPlayerMuteSolo; SLVolumeItf bqPlayerVolume; }openslgs; typedef struct PacketQueue { AVPacketList * first_pkt, *last_pkt; int nb_packets; int size; }PacketQueue; PacketQueue audio_queue; void packet_queue_init(PacketQueue *q) { memset(q, 0, sizeof(PacketQueue)); } int packet_queue_put(PacketQueue *q, AVPacket *pkt) { AVPacketList *pkt1; if(av_dup_packet(pkt) < 0) return -1; pkt1 = av_malloc(sizeof(AVPacketList)); if (!pkt1) return -1; pkt1->pkt = *pkt; pkt1->next = NULL; if (!q->last_pkt) q->first_pkt = pkt1; else q->last_pkt->next = pkt1; q->last_pkt = pkt1; q->nb_packets++; q->size += pkt1->pkt.size; return 0; } int quit = 0; static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) { AVPacketList *pkt1; int ret; if(quit) { ret = -1; return -1; } pkt1 = q->first_pkt; if (pkt1) { q->first_pkt = pkt1->next; if (!q->first_pkt) q->last_pkt = NULL; q->nb_packets--; q->size -= pkt1->pkt.size; *pkt = pkt1->pkt; av_free(pkt1); ret = 1; return ret; } else if (!block) { ret = 0; return ret; } return ret; } AVPacket packet; AVFrame *frame; int decode_frame(uint8_t *stream){ int len, data_size, got_frame; for(;;){ while(packet.size>0){ if(!frame) { if (!(frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); } else { avcodec_get_frame_defaults(frame); } len = avcodec_decode_audio4(ffmpeg_info.dec_ctx, frame, &got_frame, &packet); if(len < 0) { /* if error, skip frame */ packet.size = 0; break; } packet.data += len; packet.size -= len; if(got_frame <= 0) /* No data yet, get more frames */ continue; data_size = av_samples_get_buffer_size(NULL, ffmpeg_info.dec_ctx->channels, frame->nb_samples, ffmpeg_info.dec_ctx->sample_fmt, 1); memcpy(stream, frame->data[0], data_size); return data_size; } packet_queue_get(&audio_queue,&packet,1); } } uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]; // this callback handler is called every time a buffer finishes playing void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context){ int len=decode_frame(audio_buf); (*openslgs.bqPlayerBufferQueue)->Enqueue(openslgs.bqPlayerBufferQueue,audio_buf,len); } //打开文件,初化化ffmpeg信息 static int open_input_file(const char *filename) { ffmpeg_info.audio_stream_index =-1; int ret; AVCodec *dec; if ((ret = avformat_open_input(&ffmpeg_info.fmt_ctx, filename, NULL, NULL)) < 0) { LOGE("Cannot open input file\n"); return ret; } if ((ret = avformat_find_stream_info(ffmpeg_info.fmt_ctx, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); return ret; } /* select the audio stream */ ret = av_find_best_stream(ffmpeg_info.fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0); if (ret < 0) { LOGE("Cannot find a audio stream in the input file\n"); return ret; } ffmpeg_info.audio_stream_index = ret; ffmpeg_info.dec_ctx = ffmpeg_info.fmt_ctx->streams[ffmpeg_info.audio_stream_index]->codec; /* init the audio decoder */ if ((ret = avcodec_open2(ffmpeg_info.dec_ctx, dec, NULL)) < 0) { LOGE("Cannot open audio decoder\n"); return ret; } return 0; } void createEngine(){ SLresult result; // create engine result = slCreateEngine(&openslgs.engineObject, 0, NULL, 0, NULL, NULL); assert(SL_RESULT_SUCCESS == result); // realize the engine result = (*openslgs.engineObject)->Realize(openslgs.engineObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); // get the engine interface result = (*openslgs.engineObject)->GetInterface(openslgs.engineObject, SL_IID_ENGINE, &openslgs.engineEngine); assert(SL_RESULT_SUCCESS == result); // create output mix const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB}; const SLboolean req[1] = {SL_BOOLEAN_FALSE}; result = (*openslgs.engineEngine)->CreateOutputMix(openslgs.engineEngine, &openslgs.outputMixObject, 1, ids, req); assert(SL_RESULT_SUCCESS == result); // realize the output mix result = (*openslgs.outputMixObject)->Realize(openslgs.outputMixObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); #if 0 // get the environmental reverb interface result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB, &outputMixEnvironmentalReverb); if (SL_RESULT_SUCCESS == result) { result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(outputMixEnvironmentalReverb, &reverbSettings); } #endif // ignore unsuccessful result codes for env reverb } void createBufferQueueAudioPlayer(){ SLresult result; // configure audio source SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2}; /* SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 1, SL_SAMPLINGRATE_16, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, SL_SPEAKER_FRONT_CENTER, SL_BYTEORDER_LITTLEENDIAN}; format_pcm.samplesPerSec=ffmpeg_info.dec_ctx->sample_rate*1000; SLDataSource audioSrc = {&loc_bufq, &format_pcm}; */ SLDataFormat_PCM pcm; pcm.formatType = SL_DATAFORMAT_PCM; pcm.numChannels = ffmpeg_info.dec_ctx->channels;//跟下面的channelMask 要配对 不会会报错 pcm.samplesPerSec = ffmpeg_info.dec_ctx->sample_rate*1000; pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; pcm.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; //立体声 这是参照audiotrack CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT)得到的 pcm.endianness = SL_BYTEORDER_LITTLEENDIAN; SLDataSource audioSrc = {&loc_bufq, &pcm}; // configure audio sink SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, openslgs.outputMixObject}; SLDataSink audioSnk = {&loc_outmix, NULL}; // create audio player const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, /*SL_IID_MUTESOLO,*/ SL_IID_VOLUME}; const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, /*SL_BOOLEAN_TRUE,*/ SL_BOOLEAN_TRUE}; result = (*openslgs.engineEngine)->CreateAudioPlayer(openslgs.engineEngine, &openslgs.bqPlayerObject, &audioSrc, &audioSnk, 3, ids, req); assert(SL_RESULT_SUCCESS == result); // realize the player result = (*openslgs.bqPlayerObject)->Realize(openslgs.bqPlayerObject, SL_BOOLEAN_FALSE); assert(SL_RESULT_SUCCESS == result); // get the play interface result = (*openslgs.bqPlayerObject)->GetInterface(openslgs.bqPlayerObject, SL_IID_PLAY, &openslgs.bqPlayerPlay); assert(SL_RESULT_SUCCESS == result); // get the buffer queue interface result = (*openslgs.bqPlayerObject)->GetInterface(openslgs.bqPlayerObject, SL_IID_BUFFERQUEUE, &openslgs.bqPlayerBufferQueue); assert(SL_RESULT_SUCCESS == result); // register callback on the buffer queue result = (*openslgs.bqPlayerBufferQueue)->RegisterCallback(openslgs.bqPlayerBufferQueue, bqPlayerCallback, NULL); assert(SL_RESULT_SUCCESS == result); assert(SL_RESULT_SUCCESS == result); #if 0 // mute/solo is not supported for sources that are known to be mono, as this is // get the mute/solo interface result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_MUTESOLO, &bqPlayerMuteSolo); assert(SL_RESULT_SUCCESS == result); #endif // get the volume interface result = (*openslgs.bqPlayerObject)->GetInterface(openslgs.bqPlayerObject, SL_IID_VOLUME, &openslgs.bqPlayerVolume); assert(SL_RESULT_SUCCESS == result); // set the player's state to playing result = (*openslgs.bqPlayerPlay)->SetPlayState(openslgs.bqPlayerPlay, SL_PLAYSTATE_PLAYING); assert(SL_RESULT_SUCCESS == result); } void put_packet_intoqueue(){ AVPacket packet; while(1){ if(av_read_frame(ffmpeg_info.fmt_ctx,&packet)<0){ break; } if(packet.stream_index==ffmpeg_info.audio_stream_index){ packet_queue_put(&audio_queue,&packet); }else av_free_packet(&packet); } LOGI("audio_queue.size=%d\n",audio_queue.size); } int main(int argc, char **argv){ avcodec_register_all(); av_register_all(); open_input_file(argv[1]); packet_queue_init(&audio_queue); put_packet_intoqueue(); createEngine(); createBufferQueueAudioPlayer(); bqPlayerCallback(NULL,NULL); for(;;){ getchar(); } }
相关推荐
综上所述,"Android OpenSL ES + ffmpeg native 音频解码播放"涉及到的关键技术有Android原生编程、OpenSL ES的音频播放机制、FFmpeg的音频解码能力以及PCM数据的处理。通过深入理解和熟练运用这些技术,开发者可以...
在Android平台上实现音频解码播放,常常会结合FFmpeg库和OpenSL ES框架。FFmpeg是一个强大的开源多媒体处理工具,而OpenSL ES是Android提供的原生音频接口,用于高效、低延迟的音频处理。以下是对这个主题的详细阐述...
通过分析和运行代码,开发者可以掌握如何在Android上使用FFmpeg解码音频,以及如何利用OpenSL ES高效播放解码后的数据。同时,它也展示了JNI在跨语言交互中的应用,对于提升Android底层编程能力大有裨益。 总的来说...
4. **数据传输**: 将ffmpeg解码后的PCM数据写入AudioTrack或通过OpenSL ES的BufferQueue进行播放。如果使用AudioTrack,通常需要在单独的线程中持续写入数据以保证连续播放;OpenSL ES则提供了更复杂的异步处理机制...
在实际开发中,通过 FFmpeg 解码音频文件,然后利用 OpenSL ES API 将解码后的音频数据发送给音频硬件进行播放。开发者需要理解 FFmpeg 中的音频解码流程,包括选择合适的解码器、读取和解码音频帧,以及处理不同...
本示例"android在JNI中实用OpenSL ES播放pcm数据"是针对Android音频播放的一个教程,它通过JNI调用OpenSL ES API来播放PCM(Pulse Code Modulation)音频数据。 OpenSL ES(Open Sound Library for Embedded ...
开发者将FFmpeg用于音频解码,解码后的PCM数据通过JNI传递给Java层,然后使用OpenSL ES进行播放。这样的实现可以确保音频解码和播放的效率,同时兼容各种音频格式。 总的来说,理解并掌握Android上FFmpeg的音频解码...
FFmpeg是一套可以用来记录、转换数字音频、视频,并能将其转化为流的开源计算机程序。采用LGPL或GPL许可证。它提供了录制、转换以及流化音视频的完整解决方案。它包含了非常先进的音频/视频编解码库libavcodec,为了...
基于FFmpeg+OpenSL ES的音频播放SDK。可以连续循环播放短音频;播放原始和资产音频文件;可以独立设置音量水平;实时当前音量分贝级(用于绘制波形图);可以更改音频播放速度和音调
Android OpenSL ES 音频播放/录音实例,通过对OpenSLES库的调用,轻松实现 录音及回放操作,以及调节音量等设置
2. **音频回放**:详细说明了如何通过OpenSL ES API实现音频数据的回放功能,包括基本的音频播放控制、音量调整等功能。 3. **音频捕获**:提供了关于如何使用OpenSL ES进行音频数据采集的方法,适用于实时音频...
基于AudioRecord和OpenSL ES两种框架的采集PCM数据功能的统一封装 基于AudioRecord和OpenSL ES两种框架的采集PCM数据功能的统一封装 基于AudioRecord和OpenSL ES两种框架的采集PCM数据功能的统一封装 基于Audio...
基于FFmpeg_+_OpenSL_ES的音频播放SDK。可循环不间断播放短音频;播放raw和ass_wlmusic
结合FFmpeg的解码能力,OpenSL ES可以在不经过Java层的情况下直接播放解码后的音频数据,进一步优化性能。 在实现基于Android平台的音频播放处理时,开发者需要先将FFmpeg集成到Android项目中,并构建一个FFmpeg...
OpenSL ES 提供了一套标准接口来实现高质量音频播放、录音以及混合等功能。本文将根据《OpenSL ES Specification 1.1》的内容,详细介绍 OpenSL ES 的核心概念和技术要点。 #### 二、版权与使用许可 该文档明确...
FFmpeg负责处理多媒体文件的解码,提取音频和视频流,而OpenSL ES则负责将解码后的音频数据送入设备的音频硬件进行播放,从而实现高质量的本地播放。这种方式可以充分利用硬件加速,降低CPU使用率,提高播放性能,...
FFmusic Android 平台视频播放器 软解利用ffmpeg-3.3.9库实现 硬解通过MediaCodec实现 解码后通过OpenGL ES绘制显示yuv数据,OpenSL ES播放pcm数据,SoundTouch显示变速变调功能。 播放效果
- 接着,处理后的音频数据通过OpenSL ES送回扬声器播放。 - 最后,在Java层创建JNI方法声明,然后使用`System.loadLibrary()`加载库并调用相应的原生方法。 6. **注意事项** - 在实际应用中,需要考虑各种平台和...
本文主要介绍Android上可以进行音频(PCM)播放的两个组件–AudioTrack/OpenSL ES的简单使用方法。 对一个音频文件(如MP3文件),如何使用FFmpeg进行解码获取到PCM,之前的文章已经有相应的说明: ...