前几天在看stream type的时候,调用函数AudioSystem::getOutput的地方并没有继续往下看。
今天深入看看。
*****************************************源码*************************************************
status_t AudioTrack::set(        int streamType,        uint32_t sampleRate,        int format,        int channels,        int frameCount,        uint32_t flags,        callback_t cbf,        void* user,        int notificationFrames,        const sp& sharedBuffer,        bool threadCanCallJava,        int sessionId){    LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());    if (mAudioTrack != 0) {        LOGE("Track already in use");        return INVALID_OPERATION;    }    int afSampleRate;    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {        return NO_INIT;    }    uint32_t afLatency;    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {        return NO_INIT;    }    // handle default values first.    if (streamType == AudioSystem::DEFAULT) {        streamType = AudioSystem::MUSIC;    }    if (sampleRate == 0) {        sampleRate = afSampleRate;    }    // these below should probably come from the audioFlinger too...    if (format == 0) {        format = AudioSystem::PCM_16_BIT;    }    if (channels == 0) {        channels = AudioSystem::CHANNEL_OUT_STEREO;    }    // validate parameters    if (!AudioSystem::isValidFormat(format)) {        LOGE("Invalid format");        return BAD_VALUE;    }    // force direct flag if format is not linear PCM    if (!AudioSystem::isLinearPCM(format)) {        flags |= AudioSystem::OUTPUT_FLAG_DIRECT;    }    if (!AudioSystem::isOutputChannel(channels)) {        LOGE("Invalid channel mask");        return BAD_VALUE;    }    uint32_t channelCount = AudioSystem::popCount(channels);    audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,            sampleRate, format, channels, (AudioSystem::output_flags)flags);    if (output == 0) {        LOGE("Could not get audio output for stream type %d", streamType);        return BAD_VALUE;    }    mVolume[LEFT] = 1.0f;    mVolume[RIGHT] = 1.0f;    mSendLevel = 0;    mFrameCount = frameCount;    mNotificationFramesReq = notificationFrames;    mSessionId = sessionId;    mAuxEffectId = 0;    // create the IAudioTrack    status_t status = createTrack(streamType, sampleRate, format, channelCount,                                  frameCount, flags, sharedBuffer, output, true);    if (status != NO_ERROR) {        return status;    }    if (cbf != 0) {        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);        if (mAudioTrackThread == 0) {          LOGE("Could not create callback thread");          return NO_INIT;        }    }    mStatus = NO_ERROR;    mStreamType = streamType;    mFormat = format;    mChannels = channels;    mChannelCount = channelCount;    mSharedBuffer = sharedBuffer;    mMuted = false;    mActive = 0;    mCbf = cbf;    mUserData = user;    mLoopCount = 0;    mMarkerPosition = 0;    mMarkerReached = false;    mNewPosition = 0;    mUpdatePeriod = 0;    mFlags = flags;    return NO_ERROR;}

**********************************************************************************************


#######################说明################################
// +++++++++++++++++++++++++AudioSystem::getOutput+++++++++++++++++++++++++++++++++++++++audio_io_handle_t AudioSystem::getOutput(stream_type stream,                                    uint32_t samplingRate,                                    uint32_t format,                                    uint32_t channels,                                    output_flags flags){    audio_io_handle_t output = 0;    // Do not use stream to output map cache if the direct output    // flag is set or if we are likely to use a direct output    // (e.g voice call stream @ 8kHz could use BT SCO device and be routed to    // a direct output on some platforms).    // TODO: the output cache and stream to output mapping implementation needs to    // be reworked for proper operation with direct outputs. This code is too specific    // to the first use case we want to cover (Voice Recognition and Voice Dialer over    // Bluetooth SCO    if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0 &&        ((stream != AudioSystem::VOICE_CALL && stream != AudioSystem::BLUETOOTH_SCO) ||         channels != AudioSystem::CHANNEL_OUT_MONO ||         (samplingRate != 8000 && samplingRate != 16000))) {        Mutex::Autolock _l(gLock);        output = AudioSystem::gStreamOutputMap.valueFor(stream);        LOGV_IF((output != 0), "getOutput() read %d from cache for stream %d", output, stream);    }    if (output == 0) {        const sp& aps = AudioSystem::get_audio_policy_service();        if (aps == 0) return 0;        output = aps->getOutput(stream, samplingRate, format, channels, flags);        if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {            Mutex::Autolock _l(gLock);            AudioSystem::gStreamOutputMap.add(stream, output);        }// +++++++++++++++++++++++++++++AudioPolicyService::getOutput+++++++++++++++++++++++++++++++++++audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream,                                    uint32_t samplingRate,                                    uint32_t format,                                    uint32_t channels,                                    AudioSystem::output_flags flags){    if (mpPolicyManager == NULL) {        return 0;    }    LOGV("getOutput() tid %d", gettid());    Mutex::Autolock _l(mLock);    return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);// +++++++++++++++++++++++++++++AudioPolicyManagerBase::getOutput+++++++++++++++++++++++++++++++++++audio_io_handle_t AudioPolicyManagerBase::getOutput(AudioSystem::stream_type stream,                                    uint32_t samplingRate,                                    uint32_t format,                                    uint32_t channels,                                    AudioSystem::output_flags flags){    audio_io_handle_t output = 0;    uint32_t latency = 0;// 此处根据stream获取策略    routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);// 这儿再根据策略获取device// 这句代码和上句代码结合起来,就是根据stream的类型,获取对应的device    uint32_t device = getDeviceForStrategy(strategy);    LOGV("getOutput() stream %d, samplingRate %d, format %d, channels %x, flags %x", stream, samplingRate, format, channels, flags);#ifdef AUDIO_POLICY_TEST// 定义了AUDIO_POLICY_TEST的时候,会去打开一个output    if (mCurOutput != 0) {        LOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channels %x, mDirectOutput %d",                mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);        if (mTestOutputs[mCurOutput] == 0) {            LOGV("getOutput() opening test output");            AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();            outputDesc->mDevice = mTestDevice;            outputDesc->mSamplingRate = mTestSamplingRate;            outputDesc->mFormat = mTestFormat;            outputDesc->mChannels = mTestChannels;            outputDesc->mLatency = mTestLatencyMs;            outputDesc->mFlags = (AudioSystem::output_flags)(mDirectOutput ? AudioSystem::OUTPUT_FLAG_DIRECT : 0);            outputDesc->mRefCount[stream] = 0;            mTestOutputs[mCurOutput] = mpClientInterface->openOutput(&outputDesc->mDevice,                                            &outputDesc->mSamplingRate,                                            &outputDesc->mFormat,                                            &outputDesc->mChannels,                                            &outputDesc->mLatency,                                            outputDesc->mFlags);            if (mTestOutputs[mCurOutput]) {                AudioParameter outputCmd = AudioParameter();                outputCmd.addInt(String8("set_id"),mCurOutput);                mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());                addOutput(mTestOutputs[mCurOutput], outputDesc);            }        }        return mTestOutputs[mCurOutput];    }#endif //AUDIO_POLICY_TEST// Direct模式的话,会去打开一个output    // open a direct output if required by specified parameters    if (needsDirectOuput(stream, samplingRate, format, channels, flags, device)) {        LOGV("getOutput() opening direct output device %x", device);        AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();        outputDesc->mDevice = device;        outputDesc->mSamplingRate = samplingRate;        outputDesc->mFormat = format;        outputDesc->mChannels = channels;        outputDesc->mLatency = 0;        outputDesc->mFlags = (AudioSystem::output_flags)(flags | AudioSystem::OUTPUT_FLAG_DIRECT);        outputDesc->mRefCount[stream] = 0;        output = mpClientInterface->openOutput(&outputDesc->mDevice,                                        &outputDesc->mSamplingRate,                                        &outputDesc->mFormat,                                        &outputDesc->mChannels,                                        &outputDesc->mLatency,                                        outputDesc->mFlags);        // only accept an output with the requeted parameters        if (output == 0 ||            (samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||            (format != 0 && format != outputDesc->mFormat) ||            (channels != 0 && channels != outputDesc->mChannels)) {            LOGV("getOutput() failed opening direct output: samplingRate %d, format %d, channels %d",                    samplingRate, format, channels);            if (output != 0) {                mpClientInterface->closeOutput(output);            }            delete outputDesc;            return 0;        }        addOutput(output, outputDesc);        return output;    }    if (channels != 0 && channels != AudioSystem::CHANNEL_OUT_MONO &&        channels != AudioSystem::CHANNEL_OUT_STEREO) {        return 0;    }    // open a non direct output// stream模式的话,直接取成员变量的值// 下面看看用于取值的这几个成员变量赋值的地方    // get which output is suitable for the specified stream. The actual routing change will happen    // when startOutput() will be called    uint32_t a2dpDevice = device & AudioSystem::DEVICE_OUT_ALL_A2DP;    if (AudioSystem::popCount((AudioSystem::audio_devices)device) == 2) {#ifdef WITH_A2DP        if (a2dpUsedForSonification() && a2dpDevice != 0) {            // if playing on 2 devices among which one is A2DP, use duplicated output            LOGV("getOutput() using duplicated output");            LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device in multiple %x selected but A2DP output not opened", device);// mA2dpOutput和mDuplicatedOutput的赋值都在函数AudioPolicyManagerBase::handleA2dpConnection中            output = mDuplicatedOutput;// +++++++++++++++++++++++++++++AudioPolicyManagerBase::handleA2dpConnection+++++++++++++++++++++++++++++++++++status_t AudioPolicyManagerBase::handleA2dpConnection(AudioSystem::audio_devices device,                                                 const char *device_address){    // when an A2DP device is connected, open an A2DP and a duplicated output    LOGV("opening A2DP output for device %s", device_address);    AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();    outputDesc->mDevice = device;// 这个函数在下面有看    mA2dpOutput = mpClientInterface->openOutput(&outputDesc->mDevice,                                            &outputDesc->mSamplingRate,                                            &outputDesc->mFormat,                                            &outputDesc->mChannels,                                            &outputDesc->mLatency,                                            outputDesc->mFlags);    if (mA2dpOutput) {        // add A2DP output descriptor        addOutput(mA2dpOutput, outputDesc);        //TODO: configure audio effect output stage here        // set initial stream volume for A2DP device        applyStreamVolumes(mA2dpOutput, device);        if (a2dpUsedForSonification()) {// mA2dpOutput和mHardwareOutput是已经打开的两个output            mDuplicatedOutput = mpClientInterface->openDuplicateOutput(mA2dpOutput, mHardwareOutput);// +++++++++++++++++++++++++++++AudioPolicyService::openDuplicateOutput+++++++++++++++++++++++++++++++++++audio_io_handle_t AudioPolicyService::openDuplicateOutput(audio_io_handle_t output1,                                                          audio_io_handle_t output2){    sp af = AudioSystem::get_audio_flinger();    if (af == 0) {        LOGW("openDuplicateOutput() could not get AudioFlinger");        return 0;    }    return af->openDuplicateOutput(output1, output2);// +++++++++++++++++++++++++++++++AudioFlinger::openDuplicateOutput+++++++++++++++++++++++++++++++++int AudioFlinger::openDuplicateOutput(int output1, int output2){    Mutex::Autolock _l(mLock);    MixerThread *thread1 = checkMixerThread_l(output1);    MixerThread *thread2 = checkMixerThread_l(output2);// ++++++++++++++++++++++++++++++AudioFlinger::checkMixerThread_l++++++++++++++++++++++++++++++++++// checkMixerThread_l() must be called with AudioFlinger::mLock heldAudioFlinger::MixerThread *AudioFlinger::checkMixerThread_l(int output) const{    PlaybackThread *thread = checkPlaybackThread_l(output);    if (thread != NULL) {        if (thread->type() == PlaybackThread::DIRECT) {            thread = NULL;        }    }// +++++++++++++++++++++++++++++++AudioFlinger::checkPlaybackThread_l+++++++++++++++++++++++++++++++++// checkPlaybackThread_l() must be called with AudioFlinger::mLock heldAudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(int output) const{    PlaybackThread *thread = NULL;    if (mPlaybackThreads.indexOfKey(output) >= 0) {// AudioFlinger::openOutput函数中会向mPlaybackThreads中追加成员        thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();    }    return thread;}// -------------------------------AudioFlinger::checkPlaybackThread_l---------------------------------    return (MixerThread *)thread;}// ------------------------------AudioFlinger::checkMixerThread_l----------------------------------    if (thread1 == NULL || thread2 == NULL) {        LOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1, output2);        return 0;    }    int id = nextUniqueId();// 选一个thread作为主thread,来创建DuplicatingThread    DuplicatingThread *thread = new DuplicatingThread(this, thread1, id);// ++++++++++++++++++++++++++++AudioFlinger::DuplicatingThread::DuplicatingThread++++++++++++++++++++++++++++++++++++AudioFlinger::DuplicatingThread::DuplicatingThread(const sp& audioFlinger, AudioFlinger::MixerThread* mainThread, int id)    :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device()), mWaitTimeMs(UINT_MAX){    mType = PlaybackThread::DUPLICATING;    addOutputTrack(mainThread);// +++++++++++++++++++++++++++++AudioFlinger::DuplicatingThread::addOutputTrack+++++++++++++++++++++++++++++++++++void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread){    int frameCount = (3 * mFrameCount * mSampleRate) / thread->sampleRate();    OutputTrack *outputTrack = new OutputTrack((ThreadBase *)thread,                                            this,                                            mSampleRate,                                            mFormat,                                            mChannelCount,                                            frameCount);    if (outputTrack->cblk() != NULL) {        thread->setStreamVolume(AudioSystem::NUM_STREAM_TYPES, 1.0f);        mOutputTracks.add(outputTrack);        LOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);        updateWaitTime();    }// +++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::OutputTrack::OutputTrack+++++++++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(            const wp& thread,            DuplicatingThread *sourceThread,            uint32_t sampleRate,            int format,            int channelCount,            int frameCount)    :   Track(thread, NULL, AudioSystem::NUM_STREAM_TYPES, sampleRate, format, channelCount, frameCount, NULL, 0),    mActive(false), mSourceThread(sourceThread){    PlaybackThread *playbackThread = (PlaybackThread *)thread.unsafe_get();    if (mCblk != NULL) {        mCblk->flags |= CBLK_DIRECTION_OUT;        mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);        mCblk->volume[0] = mCblk->volume[1] = 0x1000;        mOutBuffer.frameCount = 0;        playbackThread->mTracks.add(this);        LOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, mCblk->frameCount %d, mCblk->sampleRate %d, mCblk->channelCount %d mBufferEnd %p",                mCblk, mBuffer, mCblk->buffers, mCblk->frameCount, mCblk->sampleRate, mCblk->channelCount, mBufferEnd);    } else {        LOGW("Error creating output track on thread %p", playbackThread);    }}// -----------------------------AudioFlinger::PlaybackThread::OutputTrack::OutputTrack-----------------------------------}// -----------------------------AudioFlinger::DuplicatingThread::addOutputTrack-----------------------------------}// ----------------------------AudioFlinger::DuplicatingThread::DuplicatingThread------------------------------------// 再将另外一个thread也添加进来    thread->addOutputTrack(thread2);    mPlaybackThreads.add(id, thread);    // notify client processes of the new output creation    thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);    return id;}// -------------------------------AudioFlinger::openDuplicateOutput---------------------------------}// -----------------------------AudioPolicyService::openDuplicateOutput-----------------------------------        }        if (mDuplicatedOutput != 0 ||            !a2dpUsedForSonification()) {            // If both A2DP and duplicated outputs are open, send device address to A2DP hardware            // interface            AudioParameter param;            param.add(String8("a2dp_sink_address"), String8(device_address));            mpClientInterface->setParameters(mA2dpOutput, param.toString());            mA2dpDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);            if (a2dpUsedForSonification()) {                // add duplicated output descriptor                AudioOutputDescriptor *dupOutputDesc = new AudioOutputDescriptor();                dupOutputDesc->mOutput1 = mOutputs.valueFor(mHardwareOutput);                dupOutputDesc->mOutput2 = mOutputs.valueFor(mA2dpOutput);                dupOutputDesc->mSamplingRate = outputDesc->mSamplingRate;                dupOutputDesc->mFormat = outputDesc->mFormat;                dupOutputDesc->mChannels = outputDesc->mChannels;                dupOutputDesc->mLatency = outputDesc->mLatency;                addOutput(mDuplicatedOutput, dupOutputDesc);                applyStreamVolumes(mDuplicatedOutput, device);            }        } else {            LOGW("getOutput() could not open duplicated output for %d and %d",                    mHardwareOutput, mA2dpOutput);            mpClientInterface->closeOutput(mA2dpOutput);            mOutputs.removeItem(mA2dpOutput);            mA2dpOutput = 0;            delete outputDesc;            return NO_INIT;        }    } else {        LOGW("setDeviceConnectionState() could not open A2DP output for device %x", device);        delete outputDesc;        return NO_INIT;    }    AudioOutputDescriptor *hwOutputDesc = mOutputs.valueFor(mHardwareOutput);    if (!a2dpUsedForSonification()) {        // mute music on A2DP output if a notification or ringtone is playing        uint32_t refCount = hwOutputDesc->strategyRefCount(STRATEGY_SONIFICATION);        for (uint32_t i = 0; i < refCount; i++) {            setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);        }    }    mA2dpSuspended = false;    return NO_ERROR;}// -----------------------------AudioPolicyManagerBase::handleA2dpConnection-----------------------------------        } else#endif        {            // if playing on 2 devices among which none is A2DP, use hardware output            output = mHardwareOutput;// +++++++++++++++++++++++++++++++AudioPolicyManagerBase::AudioPolicyManagerBase+++++++++++++++++++++++++++++++++mHardwareOutput在AudioPolicyManagerBase的构造函数中被赋值。AudioPolicyManagerBase::AudioPolicyManagerBase(AudioPolicyClientInterface *clientInterface)    :#ifdef AUDIO_POLICY_TEST    Thread(false),#endif //AUDIO_POLICY_TEST    mPhoneState(AudioSystem::MODE_NORMAL), mRingerMode(0),    mMusicStopTime(0), mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),    mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),    mA2dpSuspended(false){    mpClientInterface = clientInterface;    for (int i = 0; i < AudioSystem::NUM_FORCE_USE; i++) {        mForceUse[i] = AudioSystem::FORCE_NONE;    }    // devices available by default are speaker, ear piece and microphone    mAvailableOutputDevices = AudioSystem::DEVICE_OUT_EARPIECE |                        AudioSystem::DEVICE_OUT_SPEAKER;    mAvailableInputDevices = AudioSystem::DEVICE_IN_BUILTIN_MIC;#ifdef WITH_A2DP    mA2dpOutput = 0;    mDuplicatedOutput = 0;    mA2dpDeviceAddress = String8("");#endif    mScoDeviceAddress = String8("");    // open hardware output    AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();// 从下面的这个赋值可见,一般情况下,如果不使用A2DP的话,我们用的device其实是固定的:DEVICE_OUT_SPEAKER    outputDesc->mDevice = (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER;// 从AudioPolicyManagerBase对象被创建的地方可知,mpClientInterface其实是AudioPolicyService对象。    mHardwareOutput = mpClientInterface->openOutput(&outputDesc->mDevice,                                    &outputDesc->mSamplingRate,                                    &outputDesc->mFormat,                                    &outputDesc->mChannels,                                    &outputDesc->mLatency,                                    outputDesc->mFlags);    if (mHardwareOutput == 0) {        LOGE("Failed to initialize hardware output stream, samplingRate: %d, format %d, channels %d",                outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannels);    } else {        addOutput(mHardwareOutput, outputDesc);        setOutputDevice(mHardwareOutput, (uint32_t)AudioSystem::DEVICE_OUT_SPEAKER, true);        //TODO: configure audio effect output stage here    }// +++++++++++++++++++++++++++++++AudioPolicyService::openOutput+++++++++++++++++++++++++++++++++audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices,                                uint32_t *pSamplingRate,                                uint32_t *pFormat,                                uint32_t *pChannels,                                uint32_t *pLatencyMs,                                AudioSystem::output_flags flags){    sp af = AudioSystem::get_audio_flinger();    if (af == 0) {        LOGW("openOutput() could not get AudioFlinger");        return 0;    }    return af->openOutput(pDevices,                          pSamplingRate,                          (uint32_t *)pFormat,                          pChannels,                          pLatencyMs,                          flags);// ++++++++++++++++++++++++++++++AudioFlinger::openOutput++++++++++++++++++++++++++++++++++int AudioFlinger::openOutput(uint32_t *pDevices,                                uint32_t *pSamplingRate,                                uint32_t *pFormat,                                uint32_t *pChannels,                                uint32_t *pLatencyMs,                                uint32_t flags){    status_t status;    PlaybackThread *thread = NULL;    mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;    uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;    uint32_t format = pFormat ? *pFormat : 0;    uint32_t channels = pChannels ? *pChannels : 0;    uint32_t latency = pLatencyMs ? *pLatencyMs : 0;    LOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",            pDevices ? *pDevices : 0,            samplingRate,            format,            channels,            flags);    if (pDevices == NULL || *pDevices == 0) {        return 0;    }    Mutex::Autolock _l(mLock);// 调用HAL层的接口,打开一个output stream。// device类型,被作为参数传给了HAL// 这个函数前面已经看过,此处只把与device类型相关的东东再复习一下// ++++++++++++++++++++++++++++AudioHardwareALSA::openOutputStream++++++++++++++++++++++++++++++++++++AudioStreamOut *AudioHardwareALSA::openOutputStream(uint32_t devices,                                    int *format,                                    uint32_t *channels,                                    uint32_t *sampleRate,                                    status_t *status){...    // 从device 类型的定义可知,每种device类型占一个bit// 从函数getDeviceForStrategy的实现可知,我们根据stream类型最终取得的device肯定只有一种// 此处检查device类型是不是只占了一个bit    if (devices & (devices - 1)) {        if (status) *status = err;        LOGD("openOutputStream called with bad devices");        return out;    }    // Find the appropriate alsa device// ALSAHandleList中的内容其实是从一个数组中copy过来// 数组中只是列出了一些初始信息,很多有用的东东是在后面赋值的// 结构体中一个重要的东东module(alsa_device_t * )是在调用函数s_open中赋值的// +++++++++++++++++++++++++++++_defaults+++++++++++++++++++++++++++++++++++static alsa_handle_t _defaults[] = {    {        module      : 0,        devices     : IMX51_OUT_DEFAULT,        curDev      : 0,        curMode     : 0,        handle      : 0,        format      : SND_PCM_FORMAT_S16_LE, // AudioSystem::PCM_16_BIT        channels    : 2,        sampleRate  : DEFAULT_SAMPLE_RATE,        latency     : 200000, // Desired Delay in usec        bufferSize  : 6144, // Desired Number of samples        modPrivate  : (void *)&setDefaultControls,    },    {        module      : 0,        devices     : IMX51_IN_DEFAULT,        curDev      : 0,        curMode     : 0,        handle      : 0,        format      : SND_PCM_FORMAT_S16_LE, // AudioSystem::PCM_16_BIT        channels    : 2,        sampleRate  : DEFAULT_SAMPLE_RATE,        latency     : 250000, // Desired Delay in usec        bufferSize  : 6144, // Desired Number of samples        modPrivate  : (void *)&setDefaultControls,    },};// -----------------------------_defaults-----------------------------------    for(ALSAHandleList::iterator it = mDeviceList.begin();        it != mDeviceList.end(); ++it)// 根据device类型,找到占的坑        if (it->devices & devices) {// 在调s_open时,仍然将device类型作为参数传入            err = mALSADevice->open(&(*it), devices, mode());            if (err) break;// +++++++++++++++++++++++++++++s_open+++++++++++++++++++++++++++++++++++static status_t s_open(alsa_handle_t *handle, uint32_t devices, int mode){...    // device类型是在找device名称的时候    const char *devName = deviceName(handle, devices, mode, 1);// +++++++++++++++++++++++++++++deviceName+++++++++++++++++++++++++++++++++++//card_device =0, return the card name, card_device=1, return the card device nameconst char *deviceName(alsa_handle_t *alsa_handle, uint32_t device, int mode, int card_device){...      property_get("ro.HDMI_AUDIO_OUTPUT", value, "");// device类型在此处作判断用// 因此,device name的取得,很大程度上说依赖于配置文件的    if((device & AudioSystem::DEVICE_OUT_WIRED_HDMI) && havespdifdevice && (strcmp(value, "1") == 0))    {        return spdifcardname;    }else if(havesgtldevice)    {        return sgtlcardname;    }        return "default";}// -----------------------------deviceName-----------------------------------    // The PCM stream is opened in blocking mode, per ALSA defaults.  The    // AudioFlinger seems to assume blocking mode too, so asynchronous mode    // should not be used.// 根据上面得到的device name,打开一个alsa设备    int err = snd_pcm_open(&handle->handle, devName, direction(handle), 0);    if (err < 0) {        LOGE("Failed to Initialize any ALSA %s device: %s", stream, strerror(err));        return NO_INIT;    }...    // 将device类型保存到上面占地坑中    handle->curDev = devices;    handle->curMode = mode;    return err;}// -----------------------------s_open-----------------------------------            if (devices & AudioSystem::DEVICE_OUT_WIRED_HDMI){                strcpy(mCurCard ,SPDIF);                mMixer = mMixerSpdif;            } else {                strcpy(mCurCard,SGTL5000);                mMixer = mMixerSgtl5000;            }            out = new AudioStreamOutALSA(this, &(*it));            err = out->set(format, channels, sampleRate);            break;        }    if (status) *status = err;    return out;}// ----------------------------AudioHardwareALSA::openOutputStream------------------------------------    AudioStreamOut *output = mAudioHardware->openOutputStream(*pDevices,                                                             (int *)&format,                                                             &channels,                                                             &samplingRate,                                                             &status);    LOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",            output,            samplingRate,            format,            channels,            status);    mHardwareStatus = AUDIO_HW_IDLE;    if (output != 0) {        int id = nextUniqueId();        if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||            (format != AudioSystem::PCM_16_BIT) ||            (channels != AudioSystem::CHANNEL_OUT_STEREO)) { // 如果是direct模式,创建一个DirectOutputThread            thread = new DirectOutputThread(this, output, id, *pDevices);            LOGV("openOutput() created direct output: ID %d thread %p", id, thread);        } else {// stream模式的话,创建一个MixerThread            thread = new MixerThread(this, output, id, *pDevices);            LOGV("openOutput() created mixer output: ID %d thread %p", id, thread);#ifdef LVMX            unsigned bitsPerSample =                (format == AudioSystem::PCM_16_BIT) ? 16 :                    ((format == AudioSystem::PCM_8_BIT) ? 8 : 0);            unsigned channelCount = (channels == AudioSystem::CHANNEL_OUT_STEREO) ? 2 : 1;            int audioOutputType = LifeVibes::threadIdToAudioOutputType(thread->id());            LifeVibes::init_aot(audioOutputType, samplingRate, bitsPerSample, channelCount);            LifeVibes::setDevice(audioOutputType, *pDevices);#endif        }// 将上面创建的thread追加到playback thread列表,并将其与一个unique id关联起来        mPlaybackThreads.add(id, thread);        if (pSamplingRate) *pSamplingRate = samplingRate;        if (pFormat) *pFormat = format;        if (pChannels) *pChannels = channels;        if (pLatencyMs) *pLatencyMs = thread->latency();        // notify client processes of the new output creation        thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);        return id;    }    return 0;}// ------------------------------AudioFlinger::openOutput----------------------------------}// -------------------------------AudioPolicyService::openOutput---------------------------------    updateDeviceForStrategy();#ifdef AUDIO_POLICY_TEST    AudioParameter outputCmd = AudioParameter();    outputCmd.addInt(String8("set_id"), 0);    mpClientInterface->setParameters(mHardwareOutput, outputCmd.toString());    mTestDevice = AudioSystem::DEVICE_OUT_SPEAKER;    mTestSamplingRate = 44100;    mTestFormat = AudioSystem::PCM_16_BIT;    mTestChannels =  AudioSystem::CHANNEL_OUT_STEREO;    mTestLatencyMs = 0;    mCurOutput = 0;    mDirectOutput = false;    for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {        mTestOutputs[i] = 0;    }    const size_t SIZE = 256;    char buffer[SIZE];    snprintf(buffer, SIZE, "AudioPolicyManagerTest");    run(buffer, ANDROID_PRIORITY_AUDIO);#endif //AUDIO_POLICY_TEST}AudioPolicyManagerBase对象是在AudioPolicyService的构造函数中创建的。// ++++++++++++++++++++++++++++++AudioPolicyService::AudioPolicyService++++++++++++++++++++++++++++++++++AudioPolicyService::AudioPolicyService()    : BnAudioPolicyService() , mpPolicyManager(NULL){    char value[PROPERTY_VALUE_MAX];    // start tone playback thread    mTonePlaybackThread = new AudioCommandThread(String8(""));    // start audio commands thread    mAudioCommandThread = new AudioCommandThread(String8("ApmCommandThread"));#if (defined GENERIC_AUDIO) || (defined AUDIO_POLICY_TEST)    mpPolicyManager = new AudioPolicyManagerBase(this);    LOGV("build for GENERIC_AUDIO - using generic audio policy");#else    // if running in emulation - use the emulator driver    if (property_get("ro.kernel.qemu", value, 0)) {        LOGV("Running in emulation - using generic audio policy");        mpPolicyManager = new AudioPolicyManagerBase(this);    }    else {        LOGV("Using hardware specific audio policy");// 正常的话,应该是走到这儿来。// 这个函数以前看过,实际使用的应该是ALSA中的AudioPlicyManager// Alsa中的AudioPolicyManager继承自AudioPolicyManagerBase,// 并且基本上没做什么变化        mpPolicyManager = createAudioPolicyManager(this);    }#endif    // load properties    property_get("ro.camera.sound.forced", value, "0");    mpPolicyManager->setSystemProperty("ro.camera.sound.forced", value);}// ------------------------------AudioPolicyService::AudioPolicyService----------------------------------// -------------------------------AudioPolicyManagerBase::AudioPolicyManagerBase---------------------------------        }        LOGV("getOutput() using output %d for 2 devices %x", output, device);    } else {#ifdef WITH_A2DP        if (a2dpDevice != 0) {            // if playing on A2DP device, use a2dp output            LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device %x selected but A2DP output not opened", device);            output = mA2dpOutput;        } else#endif        {            // if playing on not A2DP device, use hardware output            output = mHardwareOutput;        }    }    LOGW_IF((output ==0), "getOutput() could not find output for stream %d, samplingRate %d, format %d, channels %x, flags %x",                stream, samplingRate, format, channels, flags);    return output;}// -----------------------------AudioPolicyManagerBase::getOutput-----------------------------------}// -----------------------------AudioPolicyService::getOutput-----------------------------------    }    return output;}// -------------------------AudioSystem::getOutput---------------------------------------



###########################################################


&&&&&&&&&&&&&&&&&&&&&&&总结&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
根据stream type找到对应的strategy。
根据strategy找到对应的device type。
根据device type找到对应的alsa_handle_t对象。
根据配置和device type找到设备名称,并打开设备获取一个snd_pcm_t对象。
将snd_pcm_t对象保存到alsa_handle_t对象对象中。


我们之前已经看过write操作,
其实就是往snd_pcm_t对象的stopped_areas或者running_areas成员中copy数据;
或者直接调用snd_pcm_t对象的fast_ops的writei函数写数据。
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&

更多相关文章

  1. 检测手机类型正则表达式
  2. Android Audio代码分析2 - 函数getMinBufferSize
  3. android关于fragment的构造函数用法建议
  4. Android Audio代码分析18 - setSampleRate函数
  5. Android Audio代码分析22 - AudioEffect::getEnabled函数

随机推荐

  1. 小tips:使用JSON.parse(JSON.stringify(ob
  2. window.parent、window.top、window.self
  3. 【前端】如何向requestAnimationFrame的
  4. 【前端】新手使用uikit-幻灯片,但是在播放
  5. 【前端】如何在vue项目中的utils.js封装
  6. 【前端】HTML5 audio ,在chrome中设置cur
  7. 【前端】添加draggable 属性,无论怎么设置
  8. 【前端】利用HTML5/JS有没有办法实现快速
  9. 【前端】Canvas 内部元素如何实现 mouseo
  10. 【前端】网站自动登陆是前端控制还是后端