1. 程式人生 > >(五) AudioTrack播放pcm音訊

(五) AudioTrack播放pcm音訊

java

public class AudioTrackActivity extends BaseActivity {

    public static void startAudioTrackActivity(Activity activity) {
        activity.startActivity(new Intent(activity, AudioTrackActivity.class));
    }

    @BindView(R.id.tv_audiotrack_status) TextView mStatusTv;
    private AudioTrack mAudioTrack;
    private boolean isLocalPlaying = true;

    @Override
    protected void onCreate(@Nullable Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_audiotrck);
        mUnbinder = ButterKnife.bind(this);

        mAudioTrack = createAudioTrack(44100, 2);
    }

    /*
        AudioTrack 主要函式:

            開始播放
            public void play()throws IllegalStateException{}

            停止播放音訊資料,如果是STREAM模式,會等播放完最後寫入buffer的資料才會停止。如果立即停止,要呼叫pause()方法,然後呼叫flush方法,會捨棄還沒有播放的資料
            public void stop()throws IllegalStateException{}

            暫停播放,呼叫play()重新開始播放
            public void pause()throws IllegalStateException {}

            只在模式為STREAM下可用。將音訊資料刷進等待播放的佇列,任何寫入的資料如果沒有提交的話,都會被捨棄,但是並不能保證所有用於資料的緩衝空間都可用於後續的寫入。
            public void flush() {}

            釋放本地AudioTrack資源
            public void release() {}

            返回當前的播放狀態
            public int getPlayState() {}
     */
    /**
     * 建立一個AudioTrac物件,用於播放, jni 中會呼叫此函式
     * @param sampleRateInHz 取樣率 44100
     * @param nb_channels 雙聲道
     * @return
     */
    public AudioTrack createAudioTrack(int sampleRateInHz, int nb_channels){
        //固定格式的音訊碼流
        int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
        LogUtils.d("mydebug---", "createAudioTrack nb_channels : "+nb_channels);
        //聲道佈局
        int channelConfig;
        if(nb_channels == 1) {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_MONO;
        } else if (nb_channels == 2) {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
        } else {
            channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
        }

        int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);

        AudioTrack audioTrack = new AudioTrack(
                AudioManager.STREAM_MUSIC,
                sampleRateInHz, channelConfig,
                audioFormat,
                bufferSizeInBytes, AudioTrack.MODE_STREAM);
        //播放
        //audioTrack.play();
        //寫入PCM
        //audioTrack.write(audioData, offsetInBytes, sizeInBytes);
        return audioTrack;
    }

    // 儲存yuv,解碼完成,供jni呼叫
    public void setStatus() {
        LogUtils.d("mydebug---", "setStatus thread : "+Thread.currentThread()); // setStatus thread : Thread[Thread-2,5,main]
        runOnUiThread(new Runnable() {
            @Override
            public void run() {
                mStatusTv.setText("完成!");
            }
        });
    }

    @OnClick({R.id.btn_audiotrack_localplay, R.id.btn_audiotrack_jniplay, R.id.btn_audiotrack_play, R.id.btn_audiotrack_stop})
    void click(View view) {
        final String sdcrad = Environment.getExternalStorageDirectory().getAbsolutePath();
        switch (view.getId()) {
            case R.id.btn_audiotrack_localplay: // 播放本地pcm音訊
                isLocalPlaying = true;
                new Thread(){
                    @Override
                    public void run() {
                        mAudioTrack.play();
                        File file = new File(sdcrad+"/test.pcm");
                        FileInputStream fis = null;
                        try {
                            fis = new FileInputStream(file);
                            byte buf[] = new byte[8192]; // 這裡的 8192 是因為我知道該本地音訊每幀大小就是這個,不是8192也可以,但是下面的sleep就要去掉,否則音訊會卡頓雜音
                            int len = -1;
                            while (isLocalPlaying && (len = fis.read(buf))!=-1) {
                                mAudioTrack.write(buf, 0, len);
//                                Thread.sleep(16); // sleep 16毫秒是因為1秒60幀的時間間隔就是16,不sleep也沒關係,只是語速會快一點
                            }
                        } catch (Exception e) {
                            e.printStackTrace();
                        } finally {
                            if (fis!=null) {
                                try {
                                    fis.close();
                                } catch (IOException e) {
                                    e.printStackTrace();
                                }
                            }
                        }
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_jniplay: // jni 中播放本地音訊,不解碼
                new Thread(){
                    @Override
                    public void run() {
                        localplay(sdcrad+"/test.pcm");
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_play: // 解碼mp4,使用 AudioTrack 播放
                mStatusTv.setVisibility(View.VISIBLE);
                mStatusTv.setText("AudioTrack播放PCM,解碼中..");
                new Thread(){
                    @Override
                    public void run() {
                        play(sdcrad+"/1080.mp4", sdcrad+"/audiotrack.pcm");
                    }
                }.start();
                break;
            case R.id.btn_audiotrack_stop:
                isLocalPlaying = false;
                mAudioTrack.pause();
                mAudioTrack.flush();
                stop();
                break;
        }
    }

    @Override
    protected void onDestroy() {
        super.onDestroy();
        mAudioTrack.release();
        stop();
    }

    private native void play(String path, String outPcm);
    private native void localplay(String path);
    private native void stop();

}

c++

#include <unistd.h>
#include "hjcommon.hpp"
extern "C" {
    #include "libavutil/imgutils.h"
}

static bool isRunning = true;
static jobject obj_audiotrack = 0; // AudioTrack 物件,全域性引用
static jmethodID mid_write; // jmethodID 不用做成全域性引用也能跨執行緒使用
static jmethodID mid_release; // AudioTrack 釋放函式
static void initAudioTrack(JNIEnv *env, jobject instance, int sample_rate, int nb_channels)
{
    if (!obj_audiotrack) env->DeleteGlobalRef(obj_audiotrack);

    jclass clz_mp6 = env->GetObjectClass(instance);
    jmethodID mid_create = env->GetMethodID(clz_mp6, "createAudioTrack", "(II)Landroid/media/AudioTrack;"); // 獲取建立AudioTrack例項的java函式
    LOGD("initAudioTrack sample_rate=%d, nb_channels=%d", sample_rate, nb_channels);
    jobject obj_aud = env->CallObjectMethod(instance, mid_create, sample_rate, nb_channels); // 傳參 44100, 2 是因為我事先知道其取樣率與雙通道
    jclass clz_aud = env->GetObjectClass(obj_aud);

    jmethodID mid_play = env->GetMethodID(clz_aud, "play", "()V"); // 獲取AudioTrack的play函式
    env->CallVoidMethod(obj_aud, mid_play); // 呼叫play函式
    mid_write = env->GetMethodID(clz_aud, "write", "([BII)I"); // 獲取AudioTrack的write函式
    mid_release = env->GetMethodID(clz_aud, "release", "()V");

    obj_audiotrack = env->NewGlobalRef(obj_aud);
}
static void audiotrack_write(JNIEnv *env, uint8_t *out_buffer, int out_buffer_size) // 往 AudioTrack 中寫入資料,播放音訊
{
    //out_buffer緩衝區資料,轉成byte陣列
    jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);
    jbyte* sample_bytep = env->GetByteArrayElements(audio_sample_array, NULL);
    //out_buffer的資料複製到sampe_bytep
    memcpy(sample_bytep, out_buffer, out_buffer_size);
    //同步
    env->ReleaseByteArrayElements(audio_sample_array, sample_bytep, 0);

    //AudioTrack.write PCM資料
    env->CallIntMethod(obj_audiotrack, mid_write, audio_sample_array, 0, out_buffer_size);
    //釋放區域性引用
    env->DeleteLocalRef(audio_sample_array);
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_play(JNIEnv *env, jobject instance, jstring path_, jstring outPcm_)
{
    isRunning = true;
    char path[128];
    hjcpyJstr2char(env, path_, path);
    char outPcm[128];
    hjcpyJstr2char(env, outPcm_, outPcm);

    AVFormatContext * avFormatContext = 0;
    int ret = avformat_open_input(&avFormatContext, path, 0, 0);
    if (ret!=0)
    {
        LOGE("avformat_open_input error.");
        return;
    }
    ret = avformat_find_stream_info(avFormatContext, 0);
    if (ret!=0)
    {
        LOGE("avformat_find_stream_info error.");
        return;
    }
    int audioStream = av_find_best_stream(avFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);
    AVStream *avs = avFormatContext->streams[audioStream];
    // sample_rate=44100, channels=2, sample_format=8
    LOGD("sample_rate=%d, channels=%d, sample_format=%d", avs->codecpar->sample_rate, avs->codecpar->channels, avs->codecpar->format);

    AVCodecContext * audioCodecContext = avcodec_alloc_context3(0);
    int gaRet = hjgetAVDecoder6_1(audioCodecContext, avs->codecpar, false);
    if (gaRet!=0) return;
    LOGD("audioCodecContext->sample_rate=%d", audioCodecContext->sample_rate); // audioCodecContext->sample_rate=44100

    initAudioTrack(env, instance, audioCodecContext->sample_rate, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO)); // 初始化 AudioTrack 44100, 2

    int frameCount = 0;
    AVPacket * packet = av_packet_alloc();
    AVFrame * frame = av_frame_alloc();
    FILE * fp = fopen(outPcm, "wb");

    // 音訊解碼出來後時無法直接播放的,需要重取樣
    SwrContext * swrContext = swr_alloc(); // 建立音訊重取樣上下文
    // av_get_default_channel_layout 根據給的聲道數返回預設的channel layout  ,固定讓輸出2聲道   AV_SAMPLE_FMT_S16 樣本格式   AV_CH_LAYOUT_STEREO 表示立聲道
    swr_alloc_set_opts(swrContext, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, audioCodecContext->sample_rate,
                       audioCodecContext->channel_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate, 0, 0);
    int swrRet = swr_init(swrContext); // 初始化,返回 0 ok
    if (swrRet!=0)
    {
        LOGE("swr_init is failed : %s", av_err2str(swrRet));
        return;
    }
    unsigned char * pcm = 0; //new unsigned char[48000*4*2]; // 重取樣時樣本數量的快取,設大點無所謂,但是不要小了
    int out_buffer_size = 0;
    bool is = true;

    while (isRunning)
    {
        ret = av_read_frame(avFormatContext, packet);
        if (ret!=0)
        {
            LOGI("decode end : %s", av_err2str(ret));
            break;
        }
        if (packet->stream_index==audioStream)
        {
            int num = 0;
            while (true)
            {
                num++;
                ret = avcodec_send_packet(audioCodecContext, packet);
                if (ret!=0)
                {
                    LOGW("avcodec_send_packet audio error.");
                    usleep(1000);
                }
                if (ret==0 || num>=5) break;
            }

            while (true)
            {
                ret = avcodec_receive_frame(audioCodecContext, frame);
                if (ret!=0) break;
                frameCount++;

                uint8_t * out[2] = {0}; // 因為在上面設定重取樣引數時設定了輸出聲道數固定為2,所以這裡陣列長度是2? 下標1的只是表示陣列結尾NULL?
                out[0] = pcm;
                if (is)
                {
                    is = false;
                    int per_sample = av_get_bytes_per_sample((AVSampleFormat)frame->format);
                    out_buffer_size = per_sample * frame->nb_samples * 2; // 音訊大小:音訊每個樣本佔位元組數 * 單通道樣本數 * 通道數(左右聲道)
                    LOGD("per_sample=%d, out_buffer_size=%d", per_sample, out_buffer_size); // per_sample=4, out_buffer_size=8192
                    pcm = new unsigned char[out_buffer_size];
                    out[0] = pcm;
                }
                int len = swr_convert(swrContext, out, frame->nb_samples, (const uint8_t **) frame->data, frame->nb_samples);
                // format=8, sample_rate=44100, channels=2, nb_samples=1024, len=1024
                LOGD("format=%d, sample_rate=%d, channels=%d, nb_samples=%d, len=%d", frame->format, frame->sample_rate, frame->channels, frame->nb_samples, len);

                // 通過 AudioTrack 播放pcm音訊
//                int out_buffer_size = av_samples_get_buffer_size(NULL, 2, frame->nb_samples, (AVSampleFormat) frame->format, 1); // 根據傳入的引數計算每幀音訊大小
                audiotrack_write(env, out[0], out_buffer_size); // 只在一個執行緒的話,音視訊會卡頓雜音(原因是單執行緒音訊重取樣?)

                fwrite(out[0], 1, out_buffer_size, fp); // 儲存到本地
            }
        }

//        usleep(1000 * 16);

        av_packet_unref(packet);
    }

    delete [] pcm;
    swr_free(&swrContext); // 釋放記憶體

    av_packet_free(&packet);
    av_frame_free(&frame);
    avcodec_close(audioCodecContext);
    avcodec_free_context(&audioCodecContext);
    avformat_close_input(&avFormatContext);
    fclose(fp);

    /*
        AudioTrack 主要函式:

            開始播放
            public void play()throws IllegalStateException{}

            停止播放音訊資料,如果是STREAM模式,會等播放完最後寫入buffer的資料才會停止。如果立即停止,要呼叫pause()方法,然後呼叫flush方法,會捨棄還沒有播放的資料
            public void stop()throws IllegalStateException{}

            暫停播放,呼叫play()重新開始播放
            public void pause()throws IllegalStateException {}

            只在模式為STREAM下可用。將音訊資料刷進等待播放的佇列,任何寫入的資料如果沒有提交的話,都會被捨棄,但是並不能保證所有用於資料的緩衝空間都可用於後續的寫入。
            public void flush() {}

            釋放本地AudioTrack資源
            public void release() {}

            返回當前的播放狀態
            public int getPlayState() {}
     */
    env->CallVoidMethod(obj_audiotrack, mid_release); // 釋放 AudioTrack
    env->DeleteGlobalRef(obj_audiotrack); // 刪除全域性引用

    // 呼叫java函式
    jclass  clz = env->GetObjectClass(instance);
    jmethodID mid = env->GetMethodID(clz, "setStatus", "()V");
    env->CallVoidMethod(instance, mid);
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_localplay(JNIEnv *env, jobject instance, jstring path_)
{
    isRunning = true;
    char path[128];
    hjcpyJstr2char(env, path_, path);

    initAudioTrack(env, instance, 44100, 2); // 初始化 AudioTrack 44100, 2
    FILE * fp = fopen(path, "rb");
    int size = 8192; // 這裡的 8192 是因為我知道該本地音訊每幀大小就是這個,不是8192也可以,但是下面的sleep就要去掉,否則音訊會卡頓雜音
    unsigned char * buf = new unsigned char[size];
    int len = -1;
    while (isRunning && feof(fp)==0)
    {
        len = fread(buf, 1, size, fp);
        if (len>0)
        {
            audiotrack_write(env, buf, size);
        }
        else break;
        usleep(1000*16); // sleep 16毫秒是因為1秒60幀的時間間隔就是16,不sleep也沒關係,只是語速會快一點
    }

    fclose(fp);
    env->CallVoidMethod(obj_audiotrack, mid_release); // 釋放 AudioTrack
    env->DeleteGlobalRef(obj_audiotrack); // 刪除全域性引用
}

JNIEXPORT void JNICALL Java_hankin_hjmedia_ff_some_AudioTrackActivity_stop(JNIEnv *env, jobject instance)
{
    isRunning = false;
}