继续啃AudioTrack的测试代码。


*****************************************源码*************************************************
//Test case 2: getPlaybackHeadPosition() increases after play()
@LargeTest
public void testPlaybackHeadPositionIncrease() throws Exception {
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionIncrease";
final int TEST_SR = 22050;
final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;

//-------- initialization --------------
int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
minBuffSize, TEST_MODE);
byte data[] = new byte[minBuffSize/2];
//-------- test --------------
assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
track.write(data, 0, data.length);
track.write(data, 0, data.length);
track.play();
Thread.sleep(100);
log(TEST_NAME, "position ="+ track.getPlaybackHeadPosition());
assertTrue(TEST_NAME, track.getPlaybackHeadPosition() > 0);
//-------- tear down --------------
track.release();
}
**********************************************************************************************
源码路径:
frameworks\base\media\tests\mediaframeworktest\src\com\android\mediaframeworktest\functional\MediaAudioTrackTest.java


#######################说明################################
//Test case 2: getPlaybackHeadPosition() increases after play()
// 在上一篇关于getPlaybackHeadPosition的分析中,没有调用play函数,就调用了getPlaybackHeadPosition,所以获取的position应该为0.
// 而本例子中,在write之后就调用了play,然后sleep了100ms,才调用的getPlaybackHeadPosition。
// 也就是说,播放了100ms才去获取的位置,此时的位置当然应该大于0
@LargeTest
public void testPlaybackHeadPositionIncrease() throws Exception {
// constants for test
final String TEST_NAME = "testPlaybackHeadPositionIncrease";
final int TEST_SR = 22050;
final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;

//-------- initialization --------------
int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
minBuffSize, TEST_MODE);
byte data[] = new byte[minBuffSize/2];
//-------- test --------------
assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
track.write(data, 0, data.length);
track.write(data, 0, data.length);
// 这次的引子是position如何被改变的
// 既然播放之后,获取的position为正,那么play函数的调用就应该会导致position的改变。
// 一起来看看play函数如何导致position改变的。
track.play();
++++++++++++++++++++++++++++++play++++++++++++++++++++++++++++++++++
/**
* Starts playing an AudioTrack.
* @throws IllegalStateException
*/
public void play()
throws IllegalStateException {
if (mState != STATE_INITIALIZED) {
throw(new IllegalStateException("play() called on uninitialized AudioTrack."));
}


synchronized(mPlayStateLock) {
native_start();
++++++++++++++++++++++++++++++android_media_AudioTrack_start++++++++++++++++++++++++++++++++++
static void
android_media_AudioTrack_start(JNIEnv *env, jobject thiz)
{
AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
thiz, javaAudioTrackFields.nativeTrackInJavaObj);
if (lpTrack == NULL ) {
jniThrowException(env, "java/lang/IllegalStateException",
"Unable to retrieve AudioTrack pointer for start()");
return;
}


lpTrack->start();
++++++++++++++++++++++++++++++AudioTrack::start++++++++++++++++++++++++++++++++++
void AudioTrack::start()
{
// mAudioTrackThread在函数AudioTrack::set中被赋值。
sp t = mAudioTrackThread;
status_t status;


LOGV("start %p", this);
if (t != 0) {
// 函数exitPending返回成员变量mExitPending
if (t->exitPending()) {
if (t->requestExitAndWait() == WOULD_BLOCK) {
LOGE("AudioTrack::start called from thread");
return;
}
}
t->mLock.lock();
}


if (android_atomic_or(1, &mActive) == 0) {
mNewPosition = mCblk->server + mUpdatePeriod;
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
mCblk->flags &= ~CBLK_DISABLED_ON;
if (t != 0) {
// 调用的是函数Thread::run
t->run("AudioTrackThread", THREAD_PRIORITY_AUDIO_CLIENT);
+++++++++++++++++++++++++++++++Thread::run+++++++++++++++++++++++++++++++++
status_t Thread::run(const char* name, int32_t priority, size_t stack)
{
Mutex::Autolock _l(mLock);


if (mRunning) {
// thread already started
return INVALID_OPERATION;
}


// reset status and exitPending to their default value, so we can
// try again after an error happened (either below, or in readyToRun())
mStatus = NO_ERROR;
// 在函数exitPending中会返回该成员变量
mExitPending = false;
mThread = thread_id_t(-1);

// hold a strong reference on ourself
mHoldSelf = this;


mRunning = true;


bool res;
// 我们调用函数AudioTrack::set的时候,将threadCanCallJava指定为了false
if (mCanCallJava) {
res = createThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
+++++++++++++++++++++++++++++++createThreadEtc+++++++++++++++++++++++++++++++++
// Create thread with lots of parameters
inline bool createThreadEtc(thread_func_t entryFunction,
void *userData,
const char* threadName = "android:unnamed_thread",
int32_t threadPriority = PRIORITY_DEFAULT,
size_t threadStackSize = 0,
thread_id_t *threadId = 0)
{
return androidCreateThreadEtc(entryFunction, userData, threadName,
threadPriority, threadStackSize, threadId) ? true : false;
+++++++++++++++++++++++++++++++++androidCreateThreadEtc+++++++++++++++++++++++++++++++
int androidCreateThreadEtc(android_thread_func_t entryFunction,
void *userData,
const char* threadName,
int32_t threadPriority,
size_t threadStackSize,
android_thread_id_t *threadId)
{
// gCreateThreadFn被初始化为androidCreateRawThreadEtc。
// 函数androidSetCreateThreadFunc会对其赋值
return gCreateThreadFn(entryFunction, userData, threadName,
threadPriority, threadStackSize, threadId);
++++++++++++++++++++++++++++++androidSetCreateThreadFunc++++++++++++++++++++++++++++++++++
void androidSetCreateThreadFunc(android_create_thread_fn func)
{
gCreateThreadFn = func;
}


函数AndroidRuntime::startReg调用了函数androidSetCreateThreadFunc
+++++++++++++++++++++++++++++AndroidRuntime::startReg+++++++++++++++++++++++++++++++++++
/*
* Register android native functions with the VM.
*/
/*static*/ int AndroidRuntime::startReg(JNIEnv* env)
{
/*
* This hook causes all future threads created in this process to be
* attached to the JavaVM. (This needs to go away in favor of JNI
* Attach calls.)
*/
androidSetCreateThreadFunc((android_create_thread_fn) javaCreateThreadEtc);
+++++++++++++++++++++++++++javaCreateThreadEtc+++++++++++++++++++++++++++++++++++++
/*
* This is invoked from androidCreateThreadEtc() via the callback
* set with androidSetCreateThreadFunc().
*
* We need to create the new thread in such a way that it gets hooked
* into the VM before it really starts executing.
*/
/*static*/ int AndroidRuntime::javaCreateThreadEtc(
android_thread_func_t entryFunction,
void* userData,
const char* threadName,
int32_t threadPriority,
size_t threadStackSize,
android_thread_id_t* threadId)
{
void** args = (void**) malloc(3 * sizeof(void*)); // javaThreadShell must free
int result;


assert(threadName != NULL);


args[0] = (void*) entryFunction;
args[1] = userData;
args[2] = (void*) strdup(threadName); // javaThreadShell must free


// 如果可以从java层调用的话,就在thread外包了层壳
result = androidCreateRawThreadEtc(AndroidRuntime::javaThreadShell, args,
threadName, threadPriority, threadStackSize, threadId);
return result;
}
---------------------------javaCreateThreadEtc-------------------------------------


LOGV("--- registering native functions ---\n");


/*
* Every "register" function calls one or more things that return
* a local reference (e.g. FindClass). Because we haven't really
* started the VM yet, they're all getting stored in the base frame
* and never released. Use Push/Pop to manage the storage.
*/
env->PushLocalFrame(200);


if (register_jni_procs(gRegJNI, NELEM(gRegJNI), env) < 0) {
env->PopLocalFrame(NULL);
return -1;
}
env->PopLocalFrame(NULL);


//createJavaThread("fubar", quickTest, (void*) "hello");


return 0;
}


函数AndroidRuntime::start调用了函数AndroidRuntime::startReg。
++++++++++++++++++++++++++++++AndroidRuntime::start++++++++++++++++++++++++++++++++++
/*
* Start the Android runtime. This involves starting the virtual machine
* and calling the "static void main(String[] args)" method in the class
* named by "className".
*/
void AndroidRuntime::start(const char* className, const bool startSystemServer)
{
LOGD("\n>>>>>> AndroidRuntime START %s <<<<<<\n",
className != NULL ? className : "(unknown)");


char* slashClassName = NULL;
char* cp;
JNIEnv* env;


blockSigpipe();


/*
* 'startSystemServer == true' means runtime is obslete and not run from
* init.rc anymore, so we print out the boot start event here.
*/
if (startSystemServer) {
/* track our progress through the boot sequence */
const int LOG_BOOT_PROGRESS_START = 3000;
LOG_EVENT_LONG(LOG_BOOT_PROGRESS_START,
ns2ms(systemTime(SYSTEM_TIME_MONOTONIC)));
}


const char* rootDir = getenv("ANDROID_ROOT");
if (rootDir == NULL) {
rootDir = "/system";
if (!hasDir("/system")) {
LOG_FATAL("No root directory specified, and /android does not exist.");
goto bail;
}
setenv("ANDROID_ROOT", rootDir, 1);
}


//const char* kernelHack = getenv("LD_ASSUME_KERNEL");
//LOGD("Found LD_ASSUME_KERNEL='%s'\n", kernelHack);


/* start the virtual machine */
if (startVm(&mJavaVM, &env) != 0)
goto bail;


/*
* Register android functions.
*/
if (startReg(env) < 0) {
LOGE("Unable to register all android natives\n");
goto bail;
}


/*
* We want to call main() with a String array with arguments in it.
* At present we only have one argument, the class name. Create an
* array to hold it.
*/
jclass stringClass;
jobjectArray strArray;
jstring classNameStr;
jstring startSystemServerStr;


stringClass = env->FindClass("java/lang/String");
assert(stringClass != NULL);
strArray = env->NewObjectArray(2, stringClass, NULL);
assert(strArray != NULL);
classNameStr = env->NewStringUTF(className);
assert(classNameStr != NULL);
env->SetObjectArrayElement(strArray, 0, classNameStr);
startSystemServerStr = env->NewStringUTF(startSystemServer ?
"true" : "false");
env->SetObjectArrayElement(strArray, 1, startSystemServerStr);


/*
* Start VM. This thread becomes the main thread of the VM, and will
* not return until the VM exits.
*/
jclass startClass;
jmethodID startMeth;


slashClassName = strdup(className);
for (cp = slashClassName; *cp != '\0'; cp++)
if (*cp == '.')
*cp = '/';


startClass = env->FindClass(slashClassName);
if (startClass == NULL) {
LOGE("JavaVM unable to locate class '%s'\n", slashClassName);
/* keep going */
} else {
startMeth = env->GetStaticMethodID(startClass, "main",
"([Ljava/lang/String;)V");
if (startMeth == NULL) {
LOGE("JavaVM unable to find main() in '%s'\n", className);
/* keep going */
} else {
env->CallStaticVoidMethod(startClass, startMeth, strArray);


#if 0
if (env->ExceptionCheck())
threadExitUncaughtException(env);
#endif
}
}


LOGD("Shutting down VM\n");
if (mJavaVM->DetachCurrentThread() != JNI_OK)
LOGW("Warning: unable to detach main thread\n");
if (mJavaVM->DestroyJavaVM() != 0)
LOGW("Warning: VM did not shut down cleanly\n");


bail:
free(slashClassName);
}
------------------------------AndroidRuntime::start----------------------------------
-----------------------------AndroidRuntime::startReg-----------------------------------
------------------------------androidSetCreateThreadFunc----------------------------------
}
---------------------------------androidCreateThreadEtc-------------------------------
}
-------------------------------createThreadEtc---------------------------------
} else {
res = androidCreateRawThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
+++++++++++++++++++++++++++++++androidCreateRawThreadEtc+++++++++++++++++++++++++++++++++
函数androidCreateRawThreadEtc有两种实现。


#if defined(HAVE_PTHREADS)


int androidCreateRawThreadEtc(android_thread_func_t entryFunction,
void *userData,
const char* threadName,
int32_t threadPriority,
size_t threadStackSize,
android_thread_id_t *threadId)
{
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);


#ifdef HAVE_ANDROID_OS /* valgrind is rejecting RT-priority create reqs */
if (threadPriority != PRIORITY_DEFAULT || threadName != NULL) {
// We could avoid the trampoline if there was a way to get to the
// android_thread_id_t (pid) from pthread_t
thread_data_t* t = new thread_data_t;
t->priority = threadPriority;
t->threadName = threadName ? strdup(threadName) : NULL;
t->entryFunction = entryFunction;
t->userData = userData;
entryFunction = (android_thread_func_t)&thread_data_t::trampoline;
userData = t;
}
#endif


if (threadStackSize) {
pthread_attr_setstacksize(&attr, threadStackSize);
}

errno = 0;
pthread_t thread;
int result = pthread_create(&thread, &attr,
(android_pthread_entry)entryFunction, userData);
if (result != 0) {
LOGE("androidCreateRawThreadEtc failed (entry=%p, res=%d, errno=%d)\n"
"(android threadPriority=%d)",
entryFunction, result, errno, threadPriority);
return 0;
}


if (threadId != NULL) {
*threadId = (android_thread_id_t)thread; // XXX: this is not portable
}
return 1;
}


#elif defined(HAVE_WIN32_THREADS)


int androidCreateRawThreadEtc(android_thread_func_t fn,
void *userData,
const char* threadName,
int32_t threadPriority,
size_t threadStackSize,
android_thread_id_t *threadId)
{
return doCreateThread( fn, userData, threadId);
++++++++++++++++++++++++++++++doCreateThread++++++++++++++++++++++++++++++++++
/*
* Create and run a new thread.
*/
static bool doCreateThread(android_thread_func_t fn, void* arg, android_thread_id_t *id)
{
HANDLE hThread;
struct threadDetails* pDetails = new threadDetails; // must be on heap
unsigned int thrdaddr;


pDetails->func = fn;
pDetails->arg = arg;


#if defined(HAVE__BEGINTHREADEX)
hThread = (HANDLE) _beginthreadex(NULL, 0, threadIntermediary, pDetails, 0,
&thrdaddr);
if (hThread == 0)
#elif defined(HAVE_CREATETHREAD)
hThread = CreateThread(NULL, 0,
(LPTHREAD_START_ROUTINE) threadIntermediary,
(void*) pDetails, 0, (DWORD*) &thrdaddr);
if (hThread == NULL)
#endif
{
LOG(LOG_WARN, "thread", "WARNING: thread create failed\n");
return false;
}


#if defined(HAVE_CREATETHREAD)
/* close the management handle */
CloseHandle(hThread);
#endif


if (id != NULL) {
*id = (android_thread_id_t)thrdaddr;
}


return true;
}
------------------------------doCreateThread----------------------------------
}


#endif
-------------------------------androidCreateRawThreadEtc---------------------------------
}

if (res == false) {
mStatus = UNKNOWN_ERROR; // something happened!
mRunning = false;
mThread = thread_id_t(-1);
mHoldSelf.clear(); // "this" may have gone away after this.


return UNKNOWN_ERROR;
}

// Do not refer to mStatus here: The thread is already running (may, in fact
// already have exited with a valid mStatus result). The NO_ERROR indication
// here merely indicates successfully starting the thread and does not
// imply successful termination/execution.
return NO_ERROR;
}


// 在上面创建线程的时候,我们传入的是函数_threadLoop。
++++++++++++++++++++++++++++Thread::_threadLoop++++++++++++++++++++++++++++++++++++
int Thread::_threadLoop(void* user)
{
Thread* const self = static_cast(user);
sp strong(self->mHoldSelf);
wp weak(strong);
self->mHoldSelf.clear();


#if HAVE_ANDROID_OS
// this is very useful for debugging with gdb
self->mTid = gettid();
#endif


bool first = true;


do {
bool result;
if (first) {
first = false;
self->mStatus = self->readyToRun();
result = (self->mStatus == NO_ERROR);


if (result && !self->mExitPending) {
// Binder threads (and maybe others) rely on threadLoop
// running at least once after a successful ::readyToRun()
// (unless, of course, the thread has already been asked to exit
// at that point).
// This is because threads are essentially used like this:
// (new ThreadSubclass())->run();
// The caller therefore does not retain a strong reference to
// the thread and the thread would simply disappear after the
// successful ::readyToRun() call instead of entering the
// threadLoop at least once.
// 可见,最终调用的还是各thread的threadLoop函数
result = self->threadLoop();
+++++++++++++++++++++++++++AudioTrack::AudioTrackThread::threadLoop+++++++++++++++++++++++++++++++++++++
bool AudioTrack::AudioTrackThread::threadLoop()
{
// 调用的其实是函数AudioTrack::processAudioBuffer
return mReceiver.processAudioBuffer(this);
++++++++++++++++++++++++++++AudioTrack::processAudioBuffer++++++++++++++++++++++++++++++++++++
bool AudioTrack::processAudioBuffer(const sp& thread)
{
Buffer audioBuffer;
uint32_t frames;
size_t writtenSize;


// Manage underrun callback
if (mActive && (mCblk->framesReady() == 0)) {
LOGV("Underrun user: %x, server: %x, flags %04x", mCblk->user, mCblk->server, mCblk->flags);
if ((mCblk->flags & CBLK_UNDERRUN_MSK) == CBLK_UNDERRUN_OFF) {
mCbf(EVENT_UNDERRUN, mUserData, 0);
if (mCblk->server == mCblk->frameCount) {
// 函数指针mCbf指向的是函数audioCallback
mCbf(EVENT_BUFFER_END, mUserData, 0);
++++++++++++++++++++++++++++audioCallback++++++++++++++++++++++++++++++++++++
static void audioCallback(int event, void* user, void *info) {
if (event == AudioTrack::EVENT_MORE_DATA) {
// set size to 0 to signal we're not using the callback to write more data
AudioTrack::Buffer* pBuff = (AudioTrack::Buffer*)info;
pBuff->size = 0;

} else if (event == AudioTrack::EVENT_MARKER) {
audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
env->ExceptionDescribe();
env->ExceptionClear();
}
}


} else if (event == AudioTrack::EVENT_NEW_POS) {
audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
env->ExceptionDescribe();
env->ExceptionClear();
}
}
}
}
----------------------------audioCallback------------------------------------
}
mCblk->flags |= CBLK_UNDERRUN_ON;
if (mSharedBuffer != 0) return false;
}
}


// Manage loop end callback
while (mLoopCount > mCblk->loopCount) {
int loopCount = -1;
mLoopCount--;
if (mLoopCount >= 0) loopCount = mLoopCount;


mCbf(EVENT_LOOP_END, mUserData, (void *)&loopCount);
}


// Manage marker callback
if (!mMarkerReached && (mMarkerPosition > 0)) {
if (mCblk->server >= mMarkerPosition) {
mCbf(EVENT_MARKER, mUserData, (void *)&mMarkerPosition);
mMarkerReached = true;
}
}


// Manage new position callback
if (mUpdatePeriod > 0) {
while (mCblk->server >= mNewPosition) {
mCbf(EVENT_NEW_POS, mUserData, (void *)&mNewPosition);
mNewPosition += mUpdatePeriod;
}
}


// If Shared buffer is used, no data is requested from client.
if (mSharedBuffer != 0) {
frames = 0;
} else {
frames = mRemainingFrames;
}


do {


audioBuffer.frameCount = frames;


// Calling obtainBuffer() with a wait count of 1
// limits wait time to WAIT_PERIOD_MS. This prevents from being
// stuck here not being able to handle timed events (position, markers, loops).
status_t err = obtainBuffer(&audioBuffer, 1);
if (err < NO_ERROR) {
if (err != TIMED_OUT) {
LOGE_IF(err != status_t(NO_MORE_BUFFERS), "Error obtaining an audio buffer, giving up.");
return false;
}
break;
}
if (err == status_t(STOPPED)) return false;


// Divide buffer size by 2 to take into account the expansion
// due to 8 to 16 bit conversion: the callback must fill only half
// of the destination buffer
if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
audioBuffer.size >>= 1;
}


size_t reqSize = audioBuffer.size;
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
writtenSize = audioBuffer.size;


// Sanity check on returned size
if (ssize_t(writtenSize) <= 0) {
// The callback is done filling buffers
// Keep this thread going to handle timed events and
// still try to get more data in intervals of WAIT_PERIOD_MS
// but don't just loop and block the CPU, so wait
usleep(WAIT_PERIOD_MS*1000);
break;
}
if (writtenSize > reqSize) writtenSize = reqSize;


if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
// 8 to 16 bit conversion
const int8_t *src = audioBuffer.i8 + writtenSize-1;
int count = writtenSize;
int16_t *dst = audioBuffer.i16 + writtenSize-1;
while(count--) {
*dst-- = (int16_t)(*src--^0x80) << 8;
}
writtenSize <<= 1;
}


audioBuffer.size = writtenSize;
// NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for
// 8 bit PCM data: in this case, mCblk->frameSize is based on a sampel size of
// 16 bit.
audioBuffer.frameCount = writtenSize/mCblk->frameSize;


frames -= audioBuffer.frameCount;


releaseBuffer(&audioBuffer);
}
while (frames);


if (frames == 0) {
mRemainingFrames = mNotificationFramesAct;
} else {
mRemainingFrames = frames;
}
return true;
}
----------------------------AudioTrack::processAudioBuffer------------------------------------
}
---------------------------AudioTrack::AudioTrackThread::threadLoop-------------------------------------
}
} else {
result = self->threadLoop();
}


if (result == false || self->mExitPending) {
self->mExitPending = true;
self->mLock.lock();
self->mRunning = false;
// clear thread ID so that requestExitAndWait() does not exit if
// called by a new thread using the same thread ID as this one.
self->mThread = thread_id_t(-1);
self->mThreadExitedCondition.broadcast();
self->mThread = thread_id_t(-1); // thread id could be reused
self->mLock.unlock();
break;
}

// Release our strong reference, to let a chance to the thread
// to die a peaceful death.
strong.clear();
// And immediately, re-acquire a strong reference for the next loop
strong = weak.promote();
} while(strong != 0);

return 0;
}
----------------------------Thread::_threadLoop------------------------------------
-------------------------------Thread::run---------------------------------
} else {
setpriority(PRIO_PROCESS, 0, THREAD_PRIORITY_AUDIO_CLIENT);
}


if (mCblk->flags & CBLK_INVALID_MSK) {
LOGW("start() track %p invalidated, creating a new one", this);
// no need to clear the invalid flag as this cblk will not be used anymore
// force new track creation
status = DEAD_OBJECT;
} else {
// mAudioTrack在函数AudioTrack::createTrack中被赋值,其最终指向的其实是一个TrackHandle对象
status = mAudioTrack->start();
+++++++++++++++++++++++++++AudioFlinger::TrackHandle::start+++++++++++++++++++++++++++++++++++++
status_t AudioFlinger::TrackHandle::start() {
return mTrack->start();
+++++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::Track::start+++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::Track::start()
{
status_t status = NO_ERROR;
LOGV("start(%d), calling thread %d session %d",
mName, IPCThreadState::self()->getCallingPid(), mSessionId);
sp thread = mThread.promote();
if (thread != 0) {
Mutex::Autolock _l(thread->mLock);
int state = mState;
// here the track could be either new, or restarted
// in both cases "unstop" the track
if (mState == PAUSED) {
mState = TrackBase::RESUMING;
LOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
} else {
mState = TrackBase::ACTIVE;
LOGV("? => ACTIVE (%d) on thread %p", mName, this);
}


if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
thread->mLock.unlock();
status = AudioSystem::startOutput(thread->id(),
(AudioSystem::stream_type)mStreamType,
mSessionId);
+++++++++++++++++++++++AudioSystem::startOutput+++++++++++++++++++++++++++++++++++++++++
status_t AudioSystem::startOutput(audio_io_handle_t output,
AudioSystem::stream_type stream,
int session)
{
const sp& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->startOutput(output, stream, session);
++++++++++++++++++++++++AudioPolicyService::startOutput++++++++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
AudioSystem::stream_type stream,
int session)
{
if (mpPolicyManager == NULL) {
return NO_INIT;
}
LOGV("startOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
return mpPolicyManager->startOutput(output, stream, session);
+++++++++++++++++++++++++++AudioPolicyManagerBase::startOutput+++++++++++++++++++++++++++++++++++++
status_t AudioPolicyManagerBase::startOutput(audio_io_handle_t output,
AudioSystem::stream_type stream,
int session)
{
LOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
// 函数AudioPolicyManagerBase::addOutput调用了函数mOutputs.add往mOutputs添加成员
ssize_t index = mOutputs.indexOfKey(output);
+++++++++++++++++++++++++++++AudioPolicyManagerBase::addOutput+++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::addOutput(audio_io_handle_t id, AudioOutputDescriptor *outputDesc)
{
outputDesc->mId = id;
mOutputs.add(id, outputDesc);
}


类AudioPolicyManagerBase中有几处调用了函数AudioPolicyManagerBase::addOutput。
一般是调用mpClientInterface->openOutput函数后,会调用函数AudioPolicyManagerBase::addOutput将output添加到mOutputs。
(mpClientInterface其实是一个AudioPolicyService对象)
例如:
函数AudioPolicyManagerBase::getOutput和AudioPolicyManagerBase的构造函数。
创建AudioTrack对象的时候,在函数AudioTrack::set中会调用函数AudioPolicyManagerBase::getOutput。
AudioPolicyManagerBase对象是在AudioPolicyService的构造函数中被创建。
-----------------------------AudioPolicyManagerBase::addOutput-----------------------------------
if (index < 0) {
LOGW("startOutput() unknow output %d", output);
return BAD_VALUE;
}


// 在mOutputs中,output作为key,outputDesc作为value。
AudioOutputDescriptor *outputDesc = mOutputs.valueAt(index);
// 函数getStrategy就是根据stream type返回特定的strategy。
routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);


#ifdef WITH_A2DP
if (mA2dpOutput != 0 && !a2dpUsedForSonification() && strategy == STRATEGY_SONIFICATION) {
setStrategyMute(STRATEGY_MEDIA, true, mA2dpOutput);
++++++++++++++++++++++++++++AudioPolicyManagerBase::setStrategyMute++++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::setStrategyMute(routing_strategy strategy, bool on, audio_io_handle_t output, int delayMs)
{
LOGV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
// 将strategy对应的stream全部mute
for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
if (getStrategy((AudioSystem::stream_type)stream) == strategy) {
setStreamMute(stream, on, output, delayMs);
+++++++++++++++++++++++++++++AudioPolicyManagerBase::setStreamMute+++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::setStreamMute(int stream, bool on, audio_io_handle_t output, int delayMs)
{
StreamDescriptor &streamDesc = mStreams[stream];
++++++++++++++++++++++++++++++StreamDescriptor++++++++++++++++++++++++++++++++++
// stream descriptor used for volume control
class StreamDescriptor
{
public:
StreamDescriptor()
: mIndexMin(0), mIndexMax(1), mIndexCur(1), mCanBeMuted(true) {}


void dump(char* buffer, size_t size);


int mIndexMin; // min volume index
int mIndexMax; // max volume index
int mIndexCur; // current volume index
bool mCanBeMuted; // true is the stream can be muted
};
------------------------------StreamDescriptor----------------------------------
AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
++++++++++++++++++++++++++++++AudioOutputDescriptor++++++++++++++++++++++++++++++++++
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
class AudioOutputDescriptor
{
public:
AudioOutputDescriptor();


status_t dump(int fd);


uint32_t device();
void changeRefCount(AudioSystem::stream_type, int delta);
uint32_t refCount();
uint32_t strategyRefCount(routing_strategy strategy);
bool isUsedByStrategy(routing_strategy strategy) { return (strategyRefCount(strategy) != 0);}
bool isDuplicated() { return (mOutput1 != NULL && mOutput2 != NULL); }


audio_io_handle_t mId; // output handle
uint32_t mSamplingRate; //
uint32_t mFormat; //
uint32_t mChannels; // output configuration
uint32_t mLatency; //
AudioSystem::output_flags mFlags; //
uint32_t mDevice; // current device this output is routed to
uint32_t mRefCount[AudioSystem::NUM_STREAM_TYPES]; // number of streams of each type using this output
AudioOutputDescriptor *mOutput1; // used by duplicated outputs: first output
AudioOutputDescriptor *mOutput2; // used by duplicated outputs: second output
float mCurVolume[AudioSystem::NUM_STREAM_TYPES]; // current stream volume
int mMuteCount[AudioSystem::NUM_STREAM_TYPES]; // mute request counter
};
------------------------------AudioOutputDescriptor----------------------------------


LOGV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d", stream, on, output, outputDesc->mMuteCount[stream]);


if (on) {
if (outputDesc->mMuteCount[stream] == 0) {
if (streamDesc.mCanBeMuted) {
checkAndSetVolume(stream, 0, output, outputDesc->device(), delayMs);
++++++++++++++++++++++++++++AudioPolicyManagerBase::checkAndSetVolume++++++++++++++++++++++++++++++++++++
status_t AudioPolicyManagerBase::checkAndSetVolume(int stream, int index, audio_io_handle_t output, uint32_t device, int delayMs, bool force)
{


// do not change actual stream volume if the stream is muted
if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
LOGV("checkAndSetVolume() stream %d muted count %d", stream, mOutputs.valueFor(output)->mMuteCount[stream]);
return NO_ERROR;
}


// do not change in call volume if bluetooth is connected and vice versa
if ((stream == AudioSystem::VOICE_CALL && mForceUse[AudioSystem::FOR_COMMUNICATION] == AudioSystem::FORCE_BT_SCO) ||
(stream == AudioSystem::BLUETOOTH_SCO && mForceUse[AudioSystem::FOR_COMMUNICATION] != AudioSystem::FORCE_BT_SCO)) {
LOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
stream, mForceUse[AudioSystem::FOR_COMMUNICATION]);
return INVALID_OPERATION;
}


float volume = computeVolume(stream, index, output, device);
+++++++++++++++++++++++++++++++AudioPolicyManagerBase::computeVolume+++++++++++++++++++++++++++++++++
float AudioPolicyManagerBase::computeVolume(int stream, int index, audio_io_handle_t output, uint32_t device)
{
float volume = 1.0;
AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);
StreamDescriptor &streamDesc = mStreams[stream];


if (device == 0) {
device = outputDesc->device();
}


int volInt = (100 * (index - streamDesc.mIndexMin)) / (streamDesc.mIndexMax - streamDesc.mIndexMin);
volume = AudioSystem::linearToLog(volInt);
+++++++++++++++++++++++++++++++AudioSystem::linearToLog+++++++++++++++++++++++++++++++++
float AudioSystem::linearToLog(int volume)
{
// float v = volume ? exp(float(100 - volume) * dBConvert) : 0;
// LOGD("linearToLog(%d)=%f", volume, v);
// return v;
return volume ? exp(float(100 - volume) * dBConvert) : 0;
}
-------------------------------AudioSystem::linearToLog---------------------------------


// if a headset is connected, apply the following rules to ring tones and notifications
// to avoid sound level bursts in user's ears:
// - always attenuate ring tones and notifications volume by 6dB
// - if music is playing, always limit the volume to current music volume,
// with a minimum threshold at -36dB so that notification is always perceived.
if ((device &
(AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP |
AudioSystem::DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
AudioSystem::DEVICE_OUT_WIRED_HEADSET |
AudioSystem::DEVICE_OUT_WIRED_HEADPHONE)) &&
((getStrategy((AudioSystem::stream_type)stream) == STRATEGY_SONIFICATION) ||
(stream == AudioSystem::SYSTEM)) &&
streamDesc.mCanBeMuted) {
// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
// #define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
// when the phone is ringing we must consider that music could have been paused just before
// by the music application and behave as if music was active if the last music track was
// just stopped
// 函数AudioPolicyManagerBase::setPhoneState中有对mLimitRingtoneVolume赋值
++++++++++++++++++++++++++++++mLimitRingtoneVolume++++++++++++++++++++++++++++++++++
// Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
if (state == AudioSystem::MODE_RINGTONE &&
(hwOutputDesc->mRefCount[AudioSystem::MUSIC] ||
(systemTime() - mMusicStopTime) < seconds(SONIFICATION_HEADSET_MUSIC_DELAY))) {
mLimitRingtoneVolume = true;
} else {
mLimitRingtoneVolume = false;
}
------------------------------mLimitRingtoneVolume----------------------------------
if (outputDesc->mRefCount[AudioSystem::MUSIC] || mLimitRingtoneVolume) {
float musicVol = computeVolume(AudioSystem::MUSIC, mStreams[AudioSystem::MUSIC].mIndexCur, output, device);
float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ? musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
if (volume > minVol) {
volume = minVol;
LOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
}
}
}


return volume;
}
-------------------------------AudioPolicyManagerBase::computeVolume---------------------------------
// We actually change the volume if:
// - the float value returned by computeVolume() changed
// - the force flag is set
if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
force) {
mOutputs.valueFor(output)->mCurVolume[stream] = volume;
LOGV("setStreamVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
if (stream == AudioSystem::VOICE_CALL ||
stream == AudioSystem::DTMF ||
stream == AudioSystem::BLUETOOTH_SCO) {
// offset value to reflect actual hardware volume that never reaches 0
// 1% corresponds roughly to first step in VOICE_CALL stream volume setting (see AudioService.java)
volume = 0.01 + 0.99 * volume;
}
mpClientInterface->setStreamVolume((AudioSystem::stream_type)stream, volume, output, delayMs);
+++++++++++++++++++++++++++++AudioPolicyService::setStreamVolume+++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::setStreamVolume(AudioSystem::stream_type stream,
float volume,
audio_io_handle_t output,
int delayMs)
{
// mAudioCommandThread在AudioPolicyService的构造函数中被创建
return mAudioCommandThread->volumeCommand((int)stream, volume, (int)output, delayMs);
++++++++++++++++++++++++++++++AudioPolicyService::AudioCommandThread::volumeCommand++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream,
float volume,
int output,
int delayMs)
{
status_t status = NO_ERROR;


AudioCommand *command = new AudioCommand();
command->mCommand = SET_VOLUME;
VolumeData *data = new VolumeData();
data->mStream = stream;
data->mVolume = volume;
data->mIO = output;
command->mParam = data;
if (delayMs == 0) {
command->mWaitStatus = true;
} else {
command->mWaitStatus = false;
}
Mutex::Autolock _l(mLock);
insertCommand_l(command, delayMs);
+++++++++++++++++++++++++++++++++AudioPolicyService::AudioCommandThread::insertCommand_l+++++++++++++++++++++++++++++++
// insertCommand_l() must be called with mLock held
void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
{
ssize_t i;
Vector removedCommands;


command->mTime = systemTime() + milliseconds(delayMs);


// acquire wake lock to make sure delayed commands are processed
if (mName != "" && mAudioCommands.isEmpty()) {
acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
}


// check same pending commands with later time stamps and eliminate them
// 如果command队列中,存在正在等待的相同的command,如果其延迟比新加入的还要晚,则删除这些command
// 从后往前遍历
for (i = mAudioCommands.size()-1; i >= 0; i--) {
AudioCommand *command2 = mAudioCommands[i];
// commands are sorted by increasing time stamp: no need to scan the rest of mAudioCommands
// command在队列中是按照时间戳排序的,开头的时间戳最短
// 从后往前遍历,碰到比自己小的时间戳,说明前面所有command 的时间戳都比新command的时间戳要小,也就没必要再比较了
if (command2->mTime <= command->mTime) break;
// 只比较相同的command类型
if (command2->mCommand != command->mCommand) continue;


switch (command->mCommand) {
// 如果是设置参数,因为要设置的参数比较多,只把要设置的相同类型的参数删掉
// 如果队列中command设置的所有的参数类型与新command的所有参数类型完全相同,则将原来的command删除
case SET_PARAMETERS: {
ParametersData *data = (ParametersData *)command->mParam;
ParametersData *data2 = (ParametersData *)command2->mParam;
if (data->mIO != data2->mIO) break;
LOGV("Comparing parameter command %s to new command %s",
data2->mKeyValuePairs.string(), data->mKeyValuePairs.string());
AudioParameter param = AudioParameter(data->mKeyValuePairs);
AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
for (size_t j = 0; j < param.size(); j++) {
String8 key;
String8 value;
param.getAt(j, key, value);
for (size_t k = 0; k < param2.size(); k++) {
String8 key2;
String8 value2;
param2.getAt(k, key2, value2);
if (key2 == key) {
param2.remove(key2);
LOGV("Filtering out parameter %s", key2.string());
break;
}
}
}
// if all keys have been filtered out, remove the command.
// otherwise, update the key value pairs
if (param2.size() == 0) {
removedCommands.add(command2);
} else {
data2->mKeyValuePairs = param2.toString();
}
} break;


// 如果是设置音量,比较它们是否是要设置同一个output上的相同的stream的音量
// 如果是,则删除原command
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam;
VolumeData *data2 = (VolumeData *)command2->mParam;
if (data->mIO != data2->mIO) break;
if (data->mStream != data2->mStream) break;
LOGV("Filtering out volume command on output %d for stream %d",
data->mIO, data->mStream);
removedCommands.add(command2);
} break;
case START_TONE:
case STOP_TONE:
default:
break;
}
}


// remove filtered commands
for (size_t j = 0; j < removedCommands.size(); j++) {
// removed commands always have time stamps greater than current command
for (size_t k = i + 1; k < mAudioCommands.size(); k++) {
if (mAudioCommands[k] == removedCommands[j]) {
LOGV("suppressing command: %d", mAudioCommands[k]->mCommand);
mAudioCommands.removeAt(k);
break;
}
}
}
removedCommands.clear();


// insert command at the right place according to its time stamp
LOGV("inserting command: %d at index %d, num commands %d",
command->mCommand, (int)i+1, mAudioCommands.size());
// 将command加入到队列mAudioCommands中,位置i是根据时间戳算出来的
// 函数AudioPolicyService::AudioCommandThread::threadLoop中会处理mAudioCommands中的command
mAudioCommands.insertAt(command, i + 1);
+++++++++++++++++++++++++++++++++AudioPolicyService::AudioCommandThread::threadLoop+++++++++++++++++++++++++++++++
bool AudioPolicyService::AudioCommandThread::threadLoop()
{
nsecs_t waitTime = INT64_MAX;


mLock.lock();
while (!exitPending())
{
while(!mAudioCommands.isEmpty()) {
nsecs_t curTime = systemTime();
// commands are sorted by increasing time stamp: execute them from index 0 and up
if (mAudioCommands[0]->mTime <= curTime) {
// 每次总是取第一个command进行处理,因为第一个command 的时间戳永远是最早的
AudioCommand *command = mAudioCommands[0];
mAudioCommands.removeAt(0);
mLastCommand = *command;


switch (command->mCommand) {
case START_TONE: {
mLock.unlock();
ToneData *data = (ToneData *)command->mParam;
LOGV("AudioCommandThread() processing start tone %d on stream %d",
data->mType, data->mStream);
if (mpToneGenerator != NULL)
delete mpToneGenerator;
mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
mpToneGenerator->startTone(data->mType);
delete data;
mLock.lock();
}break;
case STOP_TONE: {
mLock.unlock();
LOGV("AudioCommandThread() processing stop tone");
if (mpToneGenerator != NULL) {
mpToneGenerator->stopTone();
delete mpToneGenerator;
mpToneGenerator = NULL;
}
mLock.lock();
}break;
case SET_VOLUME: {
VolumeData *data = (VolumeData *)command->mParam;
LOGV("AudioCommandThread() processing set volume stream %d, \
volume %f, output %d", data->mStream, data->mVolume, data->mIO);
command->mStatus = AudioSystem::setStreamVolume(data->mStream,
data->mVolume,
data->mIO);
++++++++++++++++++++++++++++++AudioSystem::setStreamVolume++++++++++++++++++++++++++++++++++
status_t AudioSystem::setStreamVolume(int stream, float value, int output)
{
if (uint32_t(stream) >= NUM_STREAM_TYPES) return BAD_VALUE;
const sp& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
af->setStreamVolume(stream, value, output);
++++++++++++++++++++++++++++++AudioFlinger::setStreamVolume++++++++++++++++++++++++++++++++++
status_t AudioFlinger::setStreamVolume(int stream, float value, int output)
{
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}


if (stream < 0 || uint32_t(stream) >= AudioSystem::NUM_STREAM_TYPES) {
return BAD_VALUE;
}


AutoMutex lock(mLock);
PlaybackThread *thread = NULL;
if (output) {
thread = checkPlaybackThread_l(output);
if (thread == NULL) {
return BAD_VALUE;
}
}


mStreamTypes[stream].volume = value;


// 如果没有找到output对应的playback thread,则将所有的playback thread中的对应stream的音量全部改变
// 否则只改变对应的playback thread中对应stream的音量
if (thread == NULL) {
for (uint32_t i = 0; i < mPlaybackThreads.size(); i++) {
mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
}
} else {
thread->setStreamVolume(stream, value);
++++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::setStreamVolume++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::setStreamVolume(int stream, float value)
{
#ifdef LVMX
int audioOutputType = LifeVibes::getMixerType(mId, mType);
if (LifeVibes::audioOutputTypeIsLifeVibes(audioOutputType)) {
LifeVibes::setStreamVolume(audioOutputType, stream, value);
}
#endif
// 只是改变了软件音量
// mStreamTypes中保存的音量,在函数AudioFlinger::MixerThread::prepareTracks_l中有被使用
mStreamTypes[stream].volume = value;
+++++++++++++++++++++++++++++mStreamTypes+++++++++++++++++++++++++++++++++++
// read original volumes with volume control
float typeVolume = mStreamTypes[track->type()].volume;
#ifdef LVMX
bool streamMute=false;
// read the volume from the LivesVibes audio engine.
if (LifeVibes::audioOutputTypeIsLifeVibes(audioOutputType))
{
LifeVibes::getStreamVolumes(audioOutputType, track->type(), &typeVolume, &streamMute);
if (streamMute) {
typeVolume = 0;
}
}
#endif
float v = masterVolume * typeVolume;
vl = (uint32_t)(v * cblk->volume[0]) << 12;
vr = (uint32_t)(v * cblk->volume[1]) << 12;


va = (uint32_t)(v * cblk->sendLevel);
}
// Delegate volume control to effect in track effect chain if needed
// 转换后的音量在此处有被使用
// chain->setVolume_l函数中不仅使用了音量,也有去改变音量
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
++++++++++++++++++++++++++++++AudioFlinger::EffectChain::setVolume_l++++++++++++++++++++++++++++++++++
// setVolume_l() must be called with PlaybackThread::mLock held
bool AudioFlinger::EffectChain::setVolume_l(uint32_t *left, uint32_t *right)
{
uint32_t newLeft = *left;
uint32_t newRight = *right;
bool hasControl = false;
int ctrlIdx = -1;
size_t size = mEffects.size();


// first update volume controller
for (size_t i = size; i > 0; i--) {
if (mEffects[i - 1]->isProcessEnabled() &&
(mEffects[i - 1]->desc().flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL) {
ctrlIdx = i - 1;
hasControl = true;
break;
}
}


if (ctrlIdx == mVolumeCtrlIdx && *left == mLeftVolume && *right == mRightVolume) {
if (hasControl) {
*left = mNewLeftVolume;
*right = mNewRightVolume;
}
return hasControl;
}


mVolumeCtrlIdx = ctrlIdx;
mLeftVolume = newLeft;
mRightVolume = newRight;


// second get volume update from volume controller
if (ctrlIdx >= 0) {
mEffects[ctrlIdx]->setVolume(&newLeft, &newRight, true);
+++++++++++++++++++++++++++++AudioFlinger::EffectModule::setVolume+++++++++++++++++++++++++++++++++++
status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
{
Mutex::Autolock _l(mLock);
status_t status = NO_ERROR;


// Send volume indication if EFFECT_FLAG_VOLUME_IND is set and read back altered volume
// if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
(mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
status_t cmdStatus;
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
volume[0] = *left;
volume[1] = *right;
if (controller) {
pVolume = volume;
}
status = (*mEffectInterface)->command(mEffectInterface,
EFFECT_CMD_SET_VOLUME,
size,
volume,
&size,
pVolume);
if (controller && status == NO_ERROR && size == sizeof(volume)) {
*left = volume[0];
*right = volume[1];
}
}
return status;
}
-----------------------------AudioFlinger::EffectModule::setVolume-----------------------------------
mNewLeftVolume = newLeft;
mNewRightVolume = newRight;
}
// then indicate volume to all other effects in chain.
// Pass altered volume to effects before volume controller
// and requested volume to effects after controller
uint32_t lVol = newLeft;
uint32_t rVol = newRight;


for (size_t i = 0; i < size; i++) {
if ((int)i == ctrlIdx) continue;
// this also works for ctrlIdx == -1 when there is no volume controller
if ((int)i > ctrlIdx) {
lVol = *left;
rVol = *right;
}
mEffects[i]->setVolume(&lVol, &rVol, false);
}
*left = newLeft;
*right = newRight;


return hasControl;
}
------------------------------AudioFlinger::EffectChain::setVolume_l----------------------------------
// Do not ramp volume if volume is controlled by effect
param = AudioMixer::VOLUME;
track->mHasVolumeController = true;
} else {
// force no volume ramp when volume controller was just disabled or removed
// from effect chain to avoid volume spike
if (track->mHasVolumeController) {
param = AudioMixer::VOLUME;
}
track->mHasVolumeController = false;
}


// Convert volumes from 8.24 to 4.12 format
int16_t left, right, aux;
uint32_t v_clamped = (vl + (1 << 11)) >> 12;
if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
left = int16_t(v_clamped);
v_clamped = (vr + (1 << 11)) >> 12;
if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
right = int16_t(v_clamped);


if (va > MAX_GAIN_INT) va = MAX_GAIN_INT;
aux = int16_t(va);


#ifdef LVMX
if ( tracksConnectedChanged || stateChanged )
{
// only do the ramp when the volume is changed by the user / application
param = AudioMixer::VOLUME;
}
#endif


// XXX: these things DON'T need to be done each time
mAudioMixer->setBufferProvider(track);
mAudioMixer->enable(AudioMixer::MIXING);


// setParameter函数将volume信息设置到track对象中
// 函数process__OneTrack16BitsStereoNoResampling中会根据音量对数据进行处理
mAudioMixer->setParameter(param, AudioMixer::VOLUME0, (void *)left);
mAudioMixer->setParameter(param, AudioMixer::VOLUME1, (void *)right);
mAudioMixer->setParameter(param, AudioMixer::AUXLEVEL, (void *)aux);
-----------------------------mStreamTypes-----------------------------------
return NO_ERROR;
}
------------------------------AudioFlinger::PlaybackThread::setStreamVolume----------------------------------
}


return NO_ERROR;
}
------------------------------AudioFlinger::setStreamVolume----------------------------------
return NO_ERROR;
}
------------------------------AudioSystem::setStreamVolume----------------------------------
if (command->mWaitStatus) {
command->mCond.signal();
mWaitWorkCV.wait(mLock);
}
delete data;
}break;
case SET_PARAMETERS: {
ParametersData *data = (ParametersData *)command->mParam;
LOGV("AudioCommandThread() processing set parameters string %s, io %d",
data->mKeyValuePairs.string(), data->mIO);
command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
if (command->mWaitStatus) {
command->mCond.signal();
mWaitWorkCV.wait(mLock);
}
delete data;
}break;
case SET_VOICE_VOLUME: {
VoiceVolumeData *data = (VoiceVolumeData *)command->mParam;
LOGV("AudioCommandThread() processing set voice volume volume %f",
data->mVolume);
// 这个命令会因函数AudioPolicyManagerBase::checkAndSetVolume中调用mpClientInterface->setVoiceVolume函数引起
// 这儿也展开看看
command->mStatus = AudioSystem::setVoiceVolume(data->mVolume);
++++++++++++++++++++++++++++++AudioSystem::setVoiceVolume++++++++++++++++++++++++++++++++++
status_t AudioSystem::setVoiceVolume(float value)
{
const sp& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->setVoiceVolume(value);
++++++++++++++++++++++++++++++AudioFlinger::setVoiceVolume++++++++++++++++++++++++++++++++++
status_t AudioFlinger::setVoiceVolume(float value)
{
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}


AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_SET_VOICE_VOLUME;
status_t ret = mAudioHardware->setVoiceVolume(value);
+++++++++++++++++++++++++++++AudioHardwareALSA::setVoiceVolume+++++++++++++++++++++++++++++++++++
status_t AudioHardwareALSA::setVoiceVolume(float volume)
{
/* could not set this params on SPDIF card, will return directly */
if (!strcmp(mCurCard,SPDIF))
return INVALID_OPERATION;
// The voice volume is used by the VOICE_CALL audio stream.


if (mMixer)
return mMixer->setVolume(AudioSystem::DEVICE_OUT_EARPIECE, volume, volume);
+++++++++++++++++++++++++++++AudioFlinger::setVoiceVolume+++++++++++++++++++++++++++++++++++
status_t ALSAMixer::setVolume(uint32_t device, float left, float right)
{
for (int j = 0; mixerProp[j][SND_PCM_STREAM_PLAYBACK].device; j++)
if (mixerProp[j][SND_PCM_STREAM_PLAYBACK].device & device) {


mixer_info_t *info = mixerProp[j][SND_PCM_STREAM_PLAYBACK].mInfo;
if (!info || !info->elem) return INVALID_OPERATION;


long minVol = info->min;
long maxVol = info->max;


// Make sure volume is between bounds.
long vol = minVol + left * (maxVol - minVol);
if (vol > maxVol) vol = maxVol;
if (vol < minVol) vol = minVol;


info->volume = vol;
snd_mixer_selem_set_playback_volume_all (info->elem, vol);
+++++++++++++++++++++++++++++snd_mixer_selem_set_playback_volume_all+++++++++++++++++++++++++++++++++++
/**
* \brief Set value of playback volume control for all channels of a mixer simple element
* \param elem Mixer simple element handle
* \param value control value
* \return 0 on success otherwise a negative error code
*/
int snd_mixer_selem_set_playback_volume_all(snd_mixer_elem_t *elem, long value)
{
snd_mixer_selem_channel_id_t chn;
int err;


for (chn = 0; chn < 32; chn++) {
if (!snd_mixer_selem_has_playback_channel(elem, chn))
+++++++++++++++++++++++++++++++snd_mixer_selem_has_playback_channel+++++++++++++++++++++++++++++++++
/**
* \brief Get info about channels of playback stream of a mixer simple element
* \param elem Mixer simple element handle
* \param channel Mixer simple element channel identifier
* \return 0 if channel is not present, 1 if present
*/
int snd_mixer_selem_has_playback_channel(snd_mixer_elem_t *elem, snd_mixer_selem_channel_id_t channel)
{
CHECK_BASIC(elem);
return sm_selem_ops(elem)->is(elem, SM_PLAY, SM_OPS_IS_CHANNEL, (int)channel);
}
-------------------------------snd_mixer_selem_has_playback_channel---------------------------------
continue;
err = snd_mixer_selem_set_playback_volume(elem, chn, value);
+++++++++++++++++++++++++++++snd_mixer_selem_set_playback_volume+++++++++++++++++++++++++++++++++++
/**
* \brief Set value of playback volume control of a mixer simple element
* \param elem Mixer simple element handle
* \param channel mixer simple element channel identifier
* \param value control value
* \return 0 on success otherwise a negative error code
*/
int snd_mixer_selem_set_playback_volume(snd_mixer_elem_t *elem, snd_mixer_selem_channel_id_t channel, long value)
{
CHECK_BASIC(elem);
CHECK_DIR_CHN(elem, SM_CAP_PVOLUME, SM_CAP_PVOLUME_JOIN, channel);
// #define sm_selem_ops(x) ((sm_selem_t *)((x)->private_data))->ops
return sm_selem_ops(elem)->set_volume(elem, SM_PLAY, channel, value);
+++++++++++++++++++++++++++++sm_selem_t+++++++++++++++++++++++++++++++++++
typedef struct _sm_selem {
snd_mixer_selem_id_t *id;
struct sm_elem_ops *ops;
unsigned int caps;
unsigned int capture_group;
} sm_selem_t;
-----------------------------sm_selem_t-----------------------------------
}
-----------------------------snd_mixer_selem_set_playback_volume-----------------------------------
if (err < 0)
return err;
if (chn == 0 && snd_mixer_selem_has_playback_volume_joined(elem))
+++++++++++++++++++++++++++++++++snd_mixer_selem_has_playback_volume_joined+++++++++++++++++++++++++++++++
/**
* \brief Return info about playback volume control of a mixer simple element
* \param elem Mixer simple element handle
* \return 0 if control is separated per channel, 1 if control acts on all channels together
*/
int snd_mixer_selem_has_playback_volume_joined(snd_mixer_elem_t *elem)
{
CHECK_BASIC(elem);
return COND_CAPS(elem, SM_CAP_PVOLUME_JOIN);
}
---------------------------------snd_mixer_selem_has_playback_volume_joined-------------------------------
return 0;
}
return 0;
}
-----------------------------snd_mixer_selem_set_playback_volume_all-----------------------------------
}


return NO_ERROR;
}
-----------------------------AudioFlinger::setVoiceVolume-----------------------------------
else
return INVALID_OPERATION;
}
-----------------------------AudioHardwareALSA::setVoiceVolume-----------------------------------
mHardwareStatus = AUDIO_HW_IDLE;


return ret;
}
------------------------------AudioFlinger::setVoiceVolume----------------------------------
}
------------------------------AudioSystem::setVoiceVolume----------------------------------
if (command->mWaitStatus) {
command->mCond.signal();
mWaitWorkCV.wait(mLock);
}
delete data;
}break;
default:
LOGW("AudioCommandThread() unknown command %d", command->mCommand);
}
delete command;
waitTime = INT64_MAX;
} else {
waitTime = mAudioCommands[0]->mTime - curTime;
break;
}
}
// release delayed commands wake lock
if (mName != "" && mAudioCommands.isEmpty()) {
release_wake_lock(mName.string());
}
LOGV("AudioCommandThread() going to sleep");
mWaitWorkCV.waitRelative(mLock, waitTime);
LOGV("AudioCommandThread() waking up");
}
mLock.unlock();
return false;
}
---------------------------------AudioPolicyService::AudioCommandThread::threadLoop-------------------------------
}
---------------------------------AudioPolicyService::AudioCommandThread::insertCommand_l-------------------------------
LOGV("AudioCommandThread() adding set volume stream %d, volume %f, output %d",
stream, volume, output);
mWaitWorkCV.signal();
if (command->mWaitStatus) {
command->mCond.wait(mLock);
status = command->mStatus;
mWaitWorkCV.signal();
}
return status;
}
------------------------------AudioPolicyService::AudioCommandThread::volumeCommand----------------------------------
}
-----------------------------AudioPolicyService::setStreamVolume-----------------------------------
}


if (stream == AudioSystem::VOICE_CALL ||
stream == AudioSystem::BLUETOOTH_SCO) {
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AudioSystem::VOICE_CALL) {
voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
} else {
voiceVolume = 1.0;
}
if (voiceVolume != mLastVoiceVolume && output == mHardwareOutput) {
mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
mLastVoiceVolume = voiceVolume;
}
}


return NO_ERROR;
}
----------------------------AudioPolicyManagerBase::checkAndSetVolume------------------------------------
}
}
// increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
outputDesc->mMuteCount[stream]++;
} else {
if (outputDesc->mMuteCount[stream] == 0) {
LOGW("setStreamMute() unmuting non muted stream!");
return;
}
if (--outputDesc->mMuteCount[stream] == 0) {
checkAndSetVolume(stream, streamDesc.mIndexCur, output, outputDesc->device(), delayMs);
}
}
}
-----------------------------AudioPolicyManagerBase::setStreamMute-----------------------------------
}
}
}
----------------------------AudioPolicyManagerBase::setStrategyMute------------------------------------
}
#endif


// incremenent usage count for this stream on the requested output:
// NOTE that the usage count is the same for duplicated output and hardware output which is
// necassary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);


setOutputDevice(output, getNewDevice(output));
++++++++++++++++++++++++++++AudioPolicyManagerBase::setOutputDevice++++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::setOutputDevice(audio_io_handle_t output, uint32_t device, bool force, int delayMs)
{
LOGV("setOutputDevice() output %d device %x delayMs %d", output, device, delayMs);
AudioOutputDescriptor *outputDesc = mOutputs.valueFor(output);




if (outputDesc->isDuplicated()) {
setOutputDevice(outputDesc->mOutput1->mId, device, force, delayMs);
setOutputDevice(outputDesc->mOutput2->mId, device, force, delayMs);
return;
}
#ifdef WITH_A2DP
// filter devices according to output selected
if (output == mA2dpOutput) {
device &= AudioSystem::DEVICE_OUT_ALL_A2DP;
} else {
device &= ~AudioSystem::DEVICE_OUT_ALL_A2DP;
}
#endif


uint32_t prevDevice = (uint32_t)outputDesc->device();
// Do not change the routing if:
// - the requestede device is 0
// - the requested device is the same as current device and force is not specified.
// Doing this check here allows the caller to call setOutputDevice() without conditions
if ((device == 0 || device == prevDevice) && !force) {
LOGV("setOutputDevice() setting same device %x or null device for output %d", device, output);
return;
}


outputDesc->mDevice = device;
// mute media streams if both speaker and headset are selected
if (output == mHardwareOutput && AudioSystem::popCount(device) == 2) {
setStrategyMute(STRATEGY_MEDIA, true, output);
// wait for the PCM output buffers to empty before proceeding with the rest of the command
usleep(outputDesc->mLatency*2*1000);
}


// do the routing
AudioParameter param = AudioParameter();
param.addInt(String8(AudioParameter::keyRouting), (int)device);
mpClientInterface->setParameters(mHardwareOutput, param.toString(), delayMs);
+++++++++++++++++++++++++++++AudioPolicyService::setParameters+++++++++++++++++++++++++++++++++++
void AudioPolicyService::setParameters(audio_io_handle_t ioHandle,
const String8& keyValuePairs,
int delayMs)
{
mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs, delayMs);
+++++++++++++++++++++++++++++AudioPolicyService::AudioCommandThread::parametersCommand+++++++++++++++++++++++++++++++++++
status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle,
const String8& keyValuePairs,
int delayMs)
{
status_t status = NO_ERROR;


AudioCommand *command = new AudioCommand();
command->mCommand = SET_PARAMETERS;
ParametersData *data = new ParametersData();
data->mIO = ioHandle;
data->mKeyValuePairs = keyValuePairs;
command->mParam = data;
if (delayMs == 0) {
command->mWaitStatus = true;
} else {
command->mWaitStatus = false;
}
Mutex::Autolock _l(mLock);
// 将command添加到队列
// command的处理还是在函数AudioPolicyService::AudioCommandThread::threadLoop中
// 这儿只摘取了set parameter的片段
insertCommand_l(command, delayMs);
++++++++++++++++++++++++++++++++case SET_PARAMETERS++++++++++++++++++++++++++++++++
case SET_PARAMETERS: {
ParametersData *data = (ParametersData *)command->mParam;
LOGV("AudioCommandThread() processing set parameters string %s, io %d",
data->mKeyValuePairs.string(), data->mIO);
command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
++++++++++++++++++++++++++++++++AudioSystem::setParameters++++++++++++++++++++++++++++++++
status_t AudioSystem::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) {
const sp& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->setParameters(ioHandle, keyValuePairs);
++++++++++++++++++++++++++++++++AudioFlinger::setParameters++++++++++++++++++++++++++++++++
status_t AudioFlinger::setParameters(int ioHandle, const String8& keyValuePairs)
{
status_t result;


LOGV("setParameters(): io %d, keyvalue %s, tid %d, calling tid %d",
ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid());
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}


#ifdef LVMX
AudioParameter param = AudioParameter(keyValuePairs);
LifeVibes::setParameters(ioHandle,keyValuePairs);
String8 key = String8(AudioParameter::keyRouting);
int device;
if (NO_ERROR != param.getInt(key, device)) {
device = -1;
}


key = String8(LifevibesTag);
String8 value;
int musicEnabled = -1;
if (NO_ERROR == param.get(key, value)) {
if (value == LifevibesEnable) {
mLifeVibesClientPid = IPCThreadState::self()->getCallingPid();
musicEnabled = 1;
} else if (value == LifevibesDisable) {
mLifeVibesClientPid = -1;
musicEnabled = 0;
}
}
#endif


// ioHandle == 0 means the parameters are global to the audio hardware interface
if (ioHandle == 0) {
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_SET_PARAMETER;
// 函数setParameters其实是个空的,并没有实现。
// 类AudioHardwareALSA中没有实现,基类AudioHardwareBase中是个空的
result = mAudioHardware->setParameters(keyValuePairs);
#ifdef LVMX
if (musicEnabled != -1) {
LifeVibes::enableMusic((bool) musicEnabled);
}
#endif
mHardwareStatus = AUDIO_HW_IDLE;
return result;
}


// hold a strong ref on thread in case closeOutput() or closeInput() is called
// and the thread is exited once the lock is released
sp thread;
{
Mutex::Autolock _l(mLock);
thread = checkPlaybackThread_l(ioHandle);
if (thread == NULL) {
thread = checkRecordThread_l(ioHandle);
}
}
if (thread != NULL) {
result = thread->setParameters(keyValuePairs);
+++++++++++++++++++++++++++++++AudioFlinger::ThreadBase::setParameters+++++++++++++++++++++++++++++++++
status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
{
status_t status;


LOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
Mutex::Autolock _l(mLock);


// checkForNewParameters_l函数会使用mNewParameters,如函数AudioFlinger::MixerThread::checkForNewParameters_l
mNewParameters.add(keyValuePairs);
+++++++++++++++++++++++++++++++AudioFlinger::MixerThread::checkForNewParameters_l+++++++++++++++++++++++++++++++++
// checkForNewParameters_l() must be called with ThreadBase::mLock held
bool AudioFlinger::MixerThread::checkForNewParameters_l()
{
bool reconfig = false;


while (!mNewParameters.isEmpty()) {
status_t status = NO_ERROR;
String8 keyValuePair = mNewParameters[0];
AudioParameter param = AudioParameter(keyValuePair);
int value;


if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
if (value != AudioSystem::PCM_16_BIT) {
status = BAD_VALUE;
} else {
reconfig = true;
}
}
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
if (value != AudioSystem::CHANNEL_OUT_STEREO) {
status = BAD_VALUE;
} else {
reconfig = true;
}
}
if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
// do not accept frame count changes if tracks are open as the track buffer
// size depends on frame count and correct behavior would not be garantied
// if frame count is changed after track creation
if (!mTracks.isEmpty()) {
status = INVALID_OPERATION;
} else {
reconfig = true;
}
}
if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
// forward device change to effects that have requested to be
// aware of attached audio device.
mDevice = (uint32_t)value;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->setDevice_l(mDevice);
}
}


if (status == NO_ERROR) {
// mOutput在父类PlaybackThread的构造函数中被赋值,其值是通过构造函数参数传过去的
// MixerThread对象是在函数AudioFlinger::openOutput中创建的
// 其中的output是调用函数AudioHardwareALSA::openOutputStream得到的
// 其中setParameters函数的实现是调用父类ALSAStreamOps的setParameters函数。
status = mOutput->setParameters(keyValuePair);
++++++++++++++++++++++++++++++AudioStreamOutALSA::setParameters++++++++++++++++++++++++++++++++++
virtual status_t setParameters(const String8& keyValuePairs) {
return ALSAStreamOps::setParameters(keyValuePairs);
++++++++++++++++++++++++++++++ALSAStreamOps::setParameters++++++++++++++++++++++++++++++++++
status_t ALSAStreamOps::setParameters(const String8& keyValuePairs)
{
AudioParameter param = AudioParameter(keyValuePairs);
String8 key = String8(AudioParameter::keyRouting);
status_t status = NO_ERROR;
int device;
LOGV("setParameters() %s", keyValuePairs.string());


if (param.getInt(key, device) == NO_ERROR) {
// mParent其实是AudioHardwareALSA对象
// mALSADevice的赋值在AudioHardwareALSA的构造函数中
// 通过调用函数module->methods->open得到,真正调用的函数是s_device_open
// mParent->mALSADevice->route其实是函数s_route
mParent->mALSADevice->route(mHandle, (uint32_t)device, mParent->mode());
++++++++++++++++++++++++++++++s_route++++++++++++++++++++++++++++++++++
static status_t s_route(alsa_handle_t *handle, uint32_t devices, int mode)
{
status_t status = NO_ERROR;


LOGD("route called for devices %08x in mode %d...", devices, mode);
// below Always noting to do, so we open device every time.
#if 0
if (handle->handle && handle->curDev == devices && handle->curMode == mode)
; // Nothing to do
else if (handle->handle && (handle->devices & devices))
setAlsaControls(handle, devices, mode);
else {
LOGE("Why are we routing to a device that isn't supported by this object?!?!?!?!");
status = s_open(handle, devices, mode);
}
#else
/* fix me if I was wrong here to judge the current sound card. In facat, in s_open, it will reconfig the controls */
setAlsaControls(handle, devices, mode);
+++++++++++++++++++++++++++++++++++setAlsaControls+++++++++++++++++++++++++++++
void setAlsaControls(alsa_handle_t *handle, uint32_t devices, int mode)
{
AlsaControlSet set = (AlsaControlSet) handle->modPrivate;
const char *card = deviceName(handle, devices, mode, 0);
set(devices, mode, card);
}
-----------------------------------setAlsaControls-----------------------------
status = s_open(handle, devices, mode);
#endif
return status;
}
------------------------------s_route----------------------------------
param.remove(key);
}


if (param.size()) {
status = BAD_VALUE;
}
return status;
}
------------------------------ALSAStreamOps::setParameters----------------------------------
}
------------------------------AudioStreamOutALSA::setParameters----------------------------------
if (!mStandby && status == INVALID_OPERATION) {
mOutput->standby();
mStandby = true;
mBytesWritten = 0;
status = mOutput->setParameters(keyValuePair);
}
if (status == NO_ERROR && reconfig) {
delete mAudioMixer;
readOutputParameters();
+++++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::readOutputParameters+++++++++++++++++++++++++++++++++
void AudioFlinger::PlaybackThread::readOutputParameters()
{
mSampleRate = mOutput->sampleRate();
mChannels = mOutput->channels();
mChannelCount = (uint16_t)AudioSystem::popCount(mChannels);
mFormat = mOutput->format();
mFrameSize = (uint16_t)mOutput->frameSize();
mFrameCount = mOutput->bufferSize() / mFrameSize;


// FIXME - Current mixer implementation only supports stereo output: Always
// Allocate a stereo buffer even if HW output is mono.
if (mMixBuffer != NULL) delete[] mMixBuffer;
mMixBuffer = new int16_t[mFrameCount * 2];
memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));


// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
// Note that mLock is not held when readOutputParameters() is called from the constructor
// but in this case nothing is done below as no audio sessions have effect yet so it doesn't
// matter.
// create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
Vector< sp > effectChains = mEffectChains;
for (size_t i = 0; i < effectChains.size(); i ++) {
mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false);
}
}
-------------------------------AudioFlinger::PlaybackThread::readOutputParameters---------------------------------
mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
for (size_t i = 0; i < mTracks.size() ; i++) {
int name = getTrackName_l();
if (name < 0) break;
mTracks[i]->mName = name;
// limit track sample rate to 2 x new output sample rate
if (mTracks[i]->mCblk->sampleRate > 2 * sampleRate()) {
mTracks[i]->mCblk->sampleRate = 2 * sampleRate();
}
}
sendConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
++++++++++++++++++++++++++++++++++AudioFlinger::ThreadBase::sendConfigEvent_l++++++++++++++++++++++++++++++
// sendConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendConfigEvent_l(int event, int param)
{
ConfigEvent *configEvent = new ConfigEvent();
configEvent->mEvent = event;
configEvent->mParam = param;
// 函数AudioFlinger::ThreadBase::processConfigEvents会处理mConfigEvents中的event
mConfigEvents.add(configEvent);
++++++++++++++++++++++++++++++++++AudioFlinger::ThreadBase::processConfigEvents++++++++++++++++++++++++++++++
void AudioFlinger::ThreadBase::processConfigEvents()
{
mLock.lock();
while(!mConfigEvents.isEmpty()) {
LOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
ConfigEvent *configEvent = mConfigEvents[0];
mConfigEvents.removeAt(0);
// release mLock before locking AudioFlinger mLock: lock order is always
// AudioFlinger then ThreadBase to avoid cross deadlock
mLock.unlock();
mAudioFlinger->mLock.lock();
audioConfigChanged_l(configEvent->mEvent, configEvent->mParam);
+++++++++++++++++++++++++++++++++++AudioFlinger::audioConfigChanged_l+++++++++++++++++++++++++++++
// audioConfigChanged_l() must be called with AudioFlinger::mLock held
void AudioFlinger::audioConfigChanged_l(int event, int ioHandle, void *param2)
{
size_t size = mNotificationClients.size();
for (size_t i = 0; i < size; i++) {
// 函数AudioFlinger::registerClient会往mNotificationClients中add 对象
mNotificationClients.valueAt(i)->client()->ioConfigChanged(event, ioHandle, param2);
++++++++++++++++++++++++++++++++++AudioFlinger::registerClient++++++++++++++++++++++++++++++
void AudioFlinger::registerClient(const sp& client)
{


Mutex::Autolock _l(mLock);


int pid = IPCThreadState::self()->getCallingPid();
if (mNotificationClients.indexOfKey(pid) < 0) {
sp notificationClient = new NotificationClient(this,
client,
pid);
LOGV("registerClient() client %p, pid %d", notificationClient.get(), pid);


mNotificationClients.add(pid, notificationClient);


sp binder = client->asBinder();
binder->linkToDeath(notificationClient);


// the config change is always sent from playback or record threads to avoid deadlock
// with AudioSystem::gLock
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
mPlaybackThreads.valueAt(i)->sendConfigEvent(AudioSystem::OUTPUT_OPENED);
}


for (size_t i = 0; i < mRecordThreads.size(); i++) {
mRecordThreads.valueAt(i)->sendConfigEvent(AudioSystem::INPUT_OPENED);
}
}
}
函数AudioSystem::get_audio_flinger中调用了函数AudioFlinger::registerClient。
+++++++++++++++++++++++++++++++AudioSystem::get_audio_flinger+++++++++++++++++++++++++++++++++
// establish binder interface to AudioFlinger service
const sp& AudioSystem::get_audio_flinger()
{
Mutex::Autolock _l(gLock);
if (gAudioFlinger.get() == 0) {
sp sm = defaultServiceManager();
sp binder;
do {
binder = sm->getService(String16("media.audio_flinger"));
if (binder != 0)
break;
LOGW("AudioFlinger not published, waiting...");
usleep(500000); // 0.5 s
} while(true);
if (gAudioFlingerClient == NULL) {
// 上面调用的函数ioConfigChanged其实是函数AudioSystem::AudioFlingerClient::ioConfigChanged
gAudioFlingerClient = new AudioFlingerClient();
++++++++++++++++++++++++++++++AudioSystem::AudioFlingerClient::ioConfigChanged++++++++++++++++++++++++++++++++++
void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, void *param2) {
LOGV("ioConfigChanged() event %d", event);
OutputDescriptor *desc;
uint32_t stream;


if (ioHandle == 0) return;


Mutex::Autolock _l(AudioSystem::gLock);


switch (event) {
case STREAM_CONFIG_CHANGED:
if (param2 == 0) break;
stream = *(uint32_t *)param2;
LOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %d", stream, ioHandle);
if (gStreamOutputMap.indexOfKey(stream) >= 0) {
gStreamOutputMap.replaceValueFor(stream, ioHandle);
}
break;
case OUTPUT_OPENED: {
if (gOutputs.indexOfKey(ioHandle) >= 0) {
LOGV("ioConfigChanged() opening already existing output! %d", ioHandle);
break;
}
if (param2 == 0) break;
desc = (OutputDescriptor *)param2;


OutputDescriptor *outputDesc = new OutputDescriptor(*desc);
gOutputs.add(ioHandle, outputDesc);
LOGV("ioConfigChanged() new output samplingRate %d, format %d channels %d frameCount %d latency %d",
outputDesc->samplingRate, outputDesc->format, outputDesc->channels, outputDesc->frameCount, outputDesc->latency);
} break;
case OUTPUT_CLOSED: {
if (gOutputs.indexOfKey(ioHandle) < 0) {
LOGW("ioConfigChanged() closing unknow output! %d", ioHandle);
break;
}
LOGV("ioConfigChanged() output %d closed", ioHandle);


gOutputs.removeItem(ioHandle);
for (int i = gStreamOutputMap.size() - 1; i >= 0 ; i--) {
if (gStreamOutputMap.valueAt(i) == ioHandle) {
gStreamOutputMap.removeItemsAt(i);
}
}
} break;
// 我们上面添加的是这个event
case OUTPUT_CONFIG_CHANGED: {
int index = gOutputs.indexOfKey(ioHandle);
if (index < 0) {
LOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
break;
}
if (param2 == 0) break;
desc = (OutputDescriptor *)param2;


LOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %d frameCount %d latency %d",
ioHandle, desc->samplingRate, desc->format,
desc->channels, desc->frameCount, desc->latency);
OutputDescriptor *outputDesc = gOutputs.valueAt(index);
delete outputDesc;
outputDesc = new OutputDescriptor(*desc);
gOutputs.replaceValueFor(ioHandle, outputDesc);
} break;
case INPUT_OPENED:
case INPUT_CLOSED:
case INPUT_CONFIG_CHANGED:
break;


}
}
-------------------------------AudioSystem::AudioFlingerClient::ioConfigChanged---------------------------------
} else {
if (gAudioErrorCallback) {
gAudioErrorCallback(NO_ERROR);
}
}
binder->linkToDeath(gAudioFlingerClient);
gAudioFlinger = interface_cast(binder);
gAudioFlinger->registerClient(gAudioFlingerClient);
}
LOGE_IF(gAudioFlinger==0, "no AudioFlinger!?");


return gAudioFlinger;
}
-------------------------------AudioSystem::get_audio_flinger---------------------------------
----------------------------------AudioFlinger::registerClient------------------------------
}
}
-----------------------------------AudioFlinger::audioConfigChanged_l-----------------------------
mAudioFlinger->mLock.unlock();
delete configEvent;
mLock.lock();
}
mLock.unlock();
}
----------------------------------AudioFlinger::ThreadBase::processConfigEvents------------------------------
LOGV("sendConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param);
mWaitWorkCV.signal();
}
----------------------------------AudioFlinger::ThreadBase::sendConfigEvent_l------------------------------
}
}


mNewParameters.removeAt(0);


mParamStatus = status;
mParamCond.signal();
mWaitWorkCV.wait(mLock);
}
return reconfig;
}
-------------------------------AudioFlinger::MixerThread::checkForNewParameters_l---------------------------------
mWaitWorkCV.signal();
// wait condition with timeout in case the thread loop has exited
// before the request could be processed
if (mParamCond.waitRelative(mLock, seconds(2)) == NO_ERROR) {
status = mParamStatus;
mWaitWorkCV.signal();
} else {
status = TIMED_OUT;
}
return status;
}
-------------------------------AudioFlinger::ThreadBase::setParameters---------------------------------
#ifdef LVMX
if ((NO_ERROR == result) && (device != -1)) {
LifeVibes::setDevice(LifeVibes::threadIdToAudioOutputType(thread->id()), device);
}
#endif
return result;
}
return BAD_VALUE;
}
--------------------------------AudioFlinger::setParameters--------------------------------
}
--------------------------------AudioSystem::setParameters--------------------------------
if (command->mWaitStatus) {
command->mCond.signal();
mWaitWorkCV.wait(mLock);
}
delete data;
}break;
--------------------------------case SET_PARAMETERS--------------------------------
LOGV("AudioCommandThread() adding set parameter string %s, io %d ,delay %d",
keyValuePairs.string(), ioHandle, delayMs);
mWaitWorkCV.signal();
if (command->mWaitStatus) {
command->mCond.wait(mLock);
status = command->mStatus;
mWaitWorkCV.signal();
}
return status;
}
-----------------------------AudioPolicyService::AudioCommandThread::parametersCommand-----------------------------------
}
-----------------------------AudioPolicyService::setParameters-----------------------------------
// update stream volumes according to new device
applyStreamVolumes(output, device, delayMs);
+++++++++++++++++++++++++++++AudioPolicyManagerBase::applyStreamVolumes+++++++++++++++++++++++++++++++++++
void AudioPolicyManagerBase::applyStreamVolumes(audio_io_handle_t output, uint32_t device, int delayMs)
{
LOGV("applyStreamVolumes() for output %d and device %x", output, device);


for (int stream = 0; stream < AudioSystem::NUM_STREAM_TYPES; stream++) {
checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, device, delayMs);
}
}
-----------------------------AudioPolicyManagerBase::applyStreamVolumes-----------------------------------


// if changing from a combined headset + speaker route, unmute media streams
if (output == mHardwareOutput && AudioSystem::popCount(prevDevice) == 2) {
setStrategyMute(STRATEGY_MEDIA, false, output, delayMs);
}
}
----------------------------AudioPolicyManagerBase::setOutputDevice------------------------------------


// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, true, false);
}


// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream, mStreams[stream].mIndexCur, output, outputDesc->device());


return NO_ERROR;
}
---------------------------AudioPolicyManagerBase::startOutput-------------------------------------
}
-----------------------AudioPolicyService::startOutput-----------------------------------------
}
-----------------------AudioSystem::startOutput-----------------------------------------
thread->mLock.lock();
}
if (status == NO_ERROR) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
playbackThread->addTrack_l(this);
} else {
mState = state;
}
} else {
status = BAD_VALUE;
}
return status;
}
----------------------------AudioFlinger::PlaybackThread::Track::start------------------------------------
}
---------------------------AudioFlinger::TrackHandle::start-------------------------------------
}
// 如果状态为DEAD_OBJECT,则创建一个AudioTrack对象,并调用其start函数
if (status == DEAD_OBJECT) {
LOGV("start() dead IAudioTrack: creating a new one");
status = createTrack(mStreamType, mCblk->sampleRate, mFormat, mChannelCount,
mFrameCount, mFlags, mSharedBuffer, getOutput(), false);
if (status == NO_ERROR) {
status = mAudioTrack->start();
if (status == NO_ERROR) {
mNewPosition = mCblk->server + mUpdatePeriod;
}
}
}
if (status != NO_ERROR) {
LOGV("start() failed");
android_atomic_and(~1, &mActive);
if (t != 0) {
t->requestExit();
} else {
setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
}
}
}


if (t != 0) {
t->mLock.unlock();
}
}
------------------------------AudioTrack::start----------------------------------
}
------------------------------android_media_AudioTrack_start----------------------------------
mPlayState = PLAYSTATE_PLAYING;
}
}
------------------------------play----------------------------------
Thread.sleep(100);
log(TEST_NAME, "position ="+ track.getPlaybackHeadPosition());
assertTrue(TEST_NAME, track.getPlaybackHeadPosition() > 0);
//-------- tear down --------------
track.release();
}
###########################################################

更多相关文章

  1. Android客户端WebService访问接口(Ksoap2Android调用.net服务端)
  2. Android如何调用webservice 以及错误解决汇总
  3. Android之SAX解析笔记
  4. Android(安卓)6.0+ 需要在运行时请求的权限
  5. android widget跳转至系统时间界面
  6. Android(安卓)ListView滑动回弹——overScrollBy
  7. miui卸载爆炸效果
  8. android listview DataSetObserver
  9. Qt在Android平台上实现html转PDF的功能

随机推荐

  1. android之camera用法实例详解
  2. N 个小程序开发视频免费下载
  3. android使用javamail 发送邮件遇到的问题
  4. android 按钮按下时改变字体颜色
  5. android 自定义Android菜单背景的代码
  6. android 源码下载与编译(ubuntu11.04)
  7. Android View drawText 文本居中
  8. Android 本地文件管理类
  9. 如何下载Android kernel内核源代码,编译
  10. Android6.0 Audio系统代码流程