status_t AudioTrack::createTrack(        int streamType,        uint32_t sampleRate,        int format,        int channelCount,        int frameCount,        uint32_t flags,        const sp& sharedBuffer,        audio_io_handle_t output,        bool enforceFrameCount){    status_t status;    const sp& audioFlinger = AudioSystem::get_audio_flinger();    if (audioFlinger == 0) {       LOGE("Could not get audioflinger");       return NO_INIT;    }    int afSampleRate;    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {        return NO_INIT;    }    int afFrameCount;    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {        return NO_INIT;    }    uint32_t afLatency;    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {        return NO_INIT;    }    mNotificationFramesAct = mNotificationFramesReq;    if (!AudioSystem::isLinearPCM(format)) {        if (sharedBuffer != 0) {            frameCount = sharedBuffer->size();        }    } else {        // Ensure that buffer depth covers at least audio hardware latency        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);        if (minBufCount < 2) minBufCount = 2;        int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;        if (sharedBuffer == 0) {            if (frameCount == 0) {                frameCount = minFrameCount;            }            if (mNotificationFramesAct == 0) {                mNotificationFramesAct = frameCount/2;            }            // Make sure that application is notified with sufficient margin            // before underrun            if (mNotificationFramesAct > (uint32_t)frameCount/2) {                mNotificationFramesAct = frameCount/2;            }            if (frameCount < minFrameCount) {                if (enforceFrameCount) {                    LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);                    return BAD_VALUE;                } else {                    frameCount = minFrameCount;                }            }        } else {            // Ensure that buffer alignment matches channelcount            if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {                LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);                return BAD_VALUE;            }            frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);        }    }    sp track = audioFlinger->createTrack(getpid(),                                                      streamType,                                                      sampleRate,                                                      format,                                                      channelCount,                                                      frameCount,                                                      ((uint16_t)flags) << 16,                                                      sharedBuffer,                                                      output,                                                      &mSessionId,                                                      &status);    if (track == 0) {        LOGE("AudioFlinger could not create track, status: %d", status);        return status;    }    sp cblk = track->getCblk();    if (cblk == 0) {        LOGE("Could not get control block");        return NO_INIT;    }    mAudioTrack.clear();    mAudioTrack = track;    mCblkMemory.clear();    mCblkMemory = cblk;    mCblk = static_cast(cblk->pointer());    mCblk->flags |= CBLK_DIRECTION_OUT;    if (sharedBuffer == 0) {        mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);    } else {        mCblk->buffers = sharedBuffer->pointer();         // Force buffer full condition as data is already present in shared memory        mCblk->stepUser(mCblk->frameCount);    }    mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);    mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);    mAudioTrack->attachAuxEffect(mAuxEffectId);    mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;    mCblk->waitTimeMs = 0;    mRemainingFrames = mNotificationFramesAct;    mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;    return NO_ERROR;}//--> client buffer //audio flinger client move data from client buffer to hardware buffer//--> hardware buffer//hardware playback porcess extract data from hardware buffer//what is "frame"//-----------------------------------------------------------------------------------------------------//A frame is a set of samples, one per channel, at a particular instant in time. For stereophonic audio, //a frame consists of two samples. For Dolby 5.1 Surround Sound, a frame would consist of six samples //(left channel, center channel, right channel, rear right, rear left, and the low frequency channel). //For monophonic audio, a frame is equivalent to one sample.//Retrieved from "http://alsa.opensrc.org/Frame"//1个采样点只针对一个声道,而实际上可能会有一或多个声道.//由于不能用一个独立的单位来表示全部声道一次采样的数据量,也就引出了Frame的概念.//Frame的大小,就是一个采样点的字节数×声道数.//另外,在目前的声卡驱动程序中,其内部缓冲区也是采用Frame作为单位来分配和管理的.//what is "bufCount"//-------------------------------------------------------------------------------------------------------//bufCount 表示一个frame里面的字节数,用来描述一个时域的采样点所占的字节宽度,(注意:不等同于channel数.)//what is "frame count"//-------------------------------------------------------------------------------------------------------//frameFount 表示一个buffer所能够缓冲的frame的个数,也就是时域的点数 //所以://afFrameCount -- 硬件buffer的大小,以frame为单位.//afSampleRate -- 硬件采样率.//afFrameCount/afSampleRate -- 硬件buffer保存的数据能够播放的时间是多少,换算成ms就是//                             1000*(afFrameCount/afSampleRate)//硬件buffer的大小是既定的,如果硬件要求的延迟硬件buffer不能提供,则需要多开几个软件buffer,以缓冲更多的数据,提供更长的播放时间.//如果硬件要求的延迟为afLatency(ms),//那么硬件延迟要求对应的最小的软件buffer的数目为://minBufCount=afLatency/1000*(afFrameCount/afSampleRate)//但这里并未给出每个软件buffer的大小.//暂记每个软件buffer的大小(以frame为单位)为bufsize,采样率转换之前的采样率为sampleRate//由于每个软件buffer和硬件buffer中的数据所对应的播放时长是一样的.所以://bufsize/sampleRate=afFrameCount/afSampleRate;//bufsize=afFrameCount*sampleRate/afSampleRate;//则所有软件buffer加在一起大小为//minFrameCount=minBufCount*afFrameCount*sampleRate/afSampleRate;

更多相关文章

  1. Android(安卓)根据显示长度 调整字体大小的 TextView
  2. android framework之Audio
  3. android获得控件大小,高度、宽度等
  4. view.java
  5. android中Button显示两行字体(spannable)
  6. file system
  7. Android字体大小设置自适应屏幕分辨率
  8. ScrollView隐藏、调整大小
  9. android动态控制组件的位置、大小和新的动画

随机推荐

  1. android软键盘的隐藏以及Edittext的焦点
  2. Android客户端上开发人人客户端系列教程
  3. Android中String资源文件的format方法
  4. android的init.rc文件的语法
  5. Android(安卓)应用程序之间内容分享详解(
  6. LeadTools Android(安卓)入门教学——运
  7. Android(安卓)Afinal使用与总结
  8. Android(安卓)本地推送消息到通知栏 Noti
  9. Android(安卓)线程池框架、Executor、Thr
  10. EditText弹出软件盘时不进行全屏