文章目錄

先上图,以免一大堆的代码引来大家的不适。


在prepare结束后,就可以调用start方法开始播放了。为了简单起见,我们对start之前的调用关系不做分析,仅仅列出这些方法的实现。

public void start() throws IllegalStateException {
if (isRestricted()) {
_setVolume(0, 0);
}
stayAwake(true);
_start();
}

static void
android_media_MediaPlayer_start(JNIEnv *env, jobject thiz)
{

ALOGV("start");
sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
process_media_player_call( env, thiz, mp->start(), NULL, NULL );
}
status_t MediaPlayer::start()
{
status_t ret = NO_ERROR;
Mutex::Autolock _l(mLock);
mLockThreadId = getThreadId();
if (mCurrentState & MEDIA_PLAYER_STARTED) {
ret = NO_ERROR;
} else if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_PREPARED |
MEDIA_PLAYER_PLAYBACK_COMPLETE | MEDIA_PLAYER_PAUSED ) ) ) {
mPlayer->setLooping(mLoop);
mPlayer->setVolume(mLeftVolume, mRightVolume);
mPlayer->setAuxEffectSendLevel(mSendLevel);
mCurrentState = MEDIA_PLAYER_STARTED;
ret = mPlayer->start();
if (ret != NO_ERROR) {
mCurrentState = MEDIA_PLAYER_STATE_ERROR;
} else {
if (mCurrentState == MEDIA_PLAYER_PLAYBACK_COMPLETE) {
ALOGV("playback completed immediately following start()");
}
}
} else {
ALOGE("start called in state %d", mCurrentState);
ret = INVALID_OPERATION;
}
mLockThreadId = 0;
return ret;
}
status_t StagefrightPlayer::start() {
return mPlayer->play();
}
status_t AwesomePlayer::play() {
ATRACE_CALL();

Mutex::Autolock autoLock(mLock);
modifyFlags(CACHE_UNDERRUN, CLEAR);
return play_l();
}

Start的真正工作是从AwesomePlayer::play_l开始的,在AwesomePlayer::play_l中通过createAudioPlayer_l创建出音频播放器,然后再通过startAudioPlayer_l开始音频播放器的播放。下面将针对这两个方法进行分析:

status_t AwesomePlayer::play_l() {
modifyFlags(SEEK_PREVIEW, CLEAR);
mMediaRenderingStartGeneration = ++mStartGeneration;
if (!(mFlags & PREPARED)) {
status_t err = prepare_l();
}
modifyFlags(PLAYING, SET);
modifyFlags(FIRST_FRAME, SET);
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
createAudioPlayer_l();
}
CHECK(!(mFlags & AUDIO_RUNNING));
if (mVideoSource == NULL) {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.
status_t err = startAudioPlayer_l(
false /* sendErrorNotification */);
}
}
if (mFlags & AT_EOS) {
// Legacy behaviour, if a stream finishes playing and then
// is started again, we play from the start...
seekTo_l(0);
}
return OK;
}

createAudioPlayer_l方法相对简单,它通过AudioPlayer构造方法创建出一个AudioPlayer,然后将mAudioSource赋给它,mAudioPlayer的输入为mAudioSource,也就是解码器对应的OMXCodec。在构造AudioPlayer对象时会存放在其成员mSource中。
这里还需要注意的是mAudioSink,这个大家还有印象吧,就是我们之前提到的在setDataSource阶段创建的AudioOutPut,也就说这里做了两个重要的事情,一个是将输入mAudioSource赋给AudioPlayer,另一个是将mAudioSink这个与硬件相关的赋给AudioPlayer

void AwesomePlayer::createAudioPlayer_l()
{
mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);

mAudioPlayer->setSource(mAudioSource);
// If there was a seek request before we ever started,
// honor the request now.
// Make sure to do this before starting the audio player
// to avoid a race condition.
//如果在开始播放之前有一个seek的请求那么需要在启动audio player之前进行seek
seekAudioIfNecessary_l();
}

AudioPlayer::AudioPlayer(
const sp<MediaPlayerBase::AudioSink> &audioSink,
uint32_t flags,
AwesomePlayer *observer)
: mInputBuffer(NULL),
mSampleRate(0),
mLatencyUs(0),
mFrameSize(0),
mNumFramesPlayed(0),
mNumFramesPlayedSysTimeUs(ALooper::GetNowUs()),
mPositionTimeMediaUs(-1),
mPositionTimeRealUs(-1),
mSeeking(false),
mReachedEOS(false),
mFinalStatus(OK),
mSeekTimeUs(0),
mStarted(false),
mIsFirstBuffer(false),
mFirstBufferResult(OK),
mFirstBuffer(NULL),
mAudioSink(audioSink),
mObserver(observer),
mPinnedTimeUs(-1ll),
mPlaying(false),
mStartPosUs(0),
mCreateFlags(flags) {
}
void AudioPlayer::setSource(const sp<MediaSource> &source) {
CHECK(mSource == NULL);
mSource = source;
}

在audio Player创建结束后就可以在startAudioPlayer_l中调用它的start方法进行播放了。

status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {

if (!(mFlags & AUDIOPLAYER_STARTED)) {
bool wasSeeking = mAudioPlayer->isSeeking();
// We've already started the MediaSource in order to enable
// the prefetcher to read its data.
err = mAudioPlayer->start(true /* sourceAlreadyStarted */);
return err;
}
modifyFlags(AUDIOPLAYER_STARTED, SET);
if (wasSeeking) {
CHECK(!mAudioPlayer->isSeeking());
// We will have finished the seek while starting the audio player.
postAudioSeekComplete();
} else {
notifyIfMediaStarted_l();
}
}
return err;
}

AudioPlayer::start首先通过mSource->read(&mFirstBuffer, &options); 读取第一段解码后的数据,解码第一帧相当于启动了解码循环。然后再通过mAudioSink->open, mAudioSink->start();进行播放

status_t AudioPlayer::start(bool sourceAlreadyStarted) {
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
mSampleRate, numChannels, channelMask, audioFormat,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback,
this,
(audio_output_flags_t)flags,
useOffload() ? &offloadInfo : NULL);
if (err == OK) {
mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
mFrameSize = mAudioSink->frameSize();
err = mAudioSink->start();
// do not alter behavior for non offloaded tracks: ignore start status.
if (!useOffload()) {
err = OK;
}
}
} else {

}
return OK;
}

这是AudioSinkCallback

size_t AudioPlayer::AudioSinkCallback(
MediaPlayerBase::AudioSink * /* audioSink */,
void *buffer, size_t size, void *cookie,
MediaPlayerBase::AudioSink::cb_event_t event) {
AudioPlayer *me = (AudioPlayer *)cookie;

switch(event) {
case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
return me->fillBuffer(buffer, size);

case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
ALOGV("AudioSinkCallback: stream end");
me->mReachedEOS = true;
me->notifyAudioEOS();
break;

case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
ALOGV("AudioSinkCallback: Tear down event");
me->mObserver->postAudioTearDown();
break;
}

return 0;
}

status_t OMXCodec::read(MediaBuffer **buffer, const ReadOptions *options) {
if (mInitialBufferSubmit) {
mInitialBufferSubmit = false;
//===========================
drainInputBuffers();
if (mState == EXECUTING) {
// Otherwise mState == RECONFIGURING and this code will trigger
// after the output port is reenabled.
//===========================
fillOutputBuffers();
}
}
//等待缓存被填满
while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
//如果到这里缓存还是空的则表示已经结束解码
if (mFilledBuffers.empty()) {
return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
}
//获取缓存的开始位置
size_t index = *mFilledBuffers.begin();
mFilledBuffers.erase(mFilledBuffers.begin());

BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
info->mStatus = OWNED_BY_CLIENT;

info->mMediaBuffer->add_ref();
if (mSkipCutBuffer != NULL) {
mSkipCutBuffer->submit(info->mMediaBuffer);
}
*buffer = info->mMediaBuffer;
return OK;
}
void OMXCodec::drainInputBuffers() {
CHECK(mState == EXECUTING || mState == RECONFIGURING);

if (mFlags & kUseSecureInputBuffers) {
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
for (size_t i = 0; i < buffers->size(); ++i) {
if (!drainAnyInputBuffer()|| (mFlags & kOnlySubmitOneInputBufferAtOneTime)) {
break;
}

}
} else {
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
for (size_t i = 0; i < buffers->size(); ++i) {
BufferInfo *info = &buffers->editItemAt(i);
if (info->mStatus != OWNED_BY_US) {

continue;
}
if (!drainInputBuffer(info)) {
break;
}

if (mFlags & kOnlySubmitOneInputBufferAtOneTime) {
break;
}

}
}
}
bool OMXCodec::drainAnyInputBuffer() {
return drainInputBuffer((BufferInfo *)NULL);
}
bool OMXCodec::drainInputBuffer(BufferInfo *info) {
for (;;) {
MediaBuffer *srcBuffer;
if (mSeekTimeUs >= 0) {

} else if (mLeftOverBuffer) {

} else {
err = mSource->read(&srcBuffer);
}
return true;
}
status_t MP3Source::read(MediaBuffer **out, const ReadOptions *options) {

MediaBuffer *buffer;
status_t err = mGroup->acquire_buffer(&buffer);
size_t frame_size;
int bitrate;
int num_samples;
int sample_rate;
//获取同步信息
for (;;) {
ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), 4);
uint32_t header = U32_AT((const uint8_t *)buffer->data());
if ((header & kMask) == (mFixedHeader & kMask)
&& GetMPEGAudioFrameSize(
header, &frame_size, &sample_rate, NULL,
&bitrate, &num_samples)) {
// re-calculate mCurrentTimeUs because we might have called Resync()
if (seekCBR) {
mCurrentTimeUs = (mCurrentPos - mFirstFramePos) * 8000 / bitrate;
mBasisTimeUs = mCurrentTimeUs;
}
break;
}
}
CHECK(frame_size <= buffer->size());
//获取需要获取的数据
ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
buffer->set_range(0, frame_size);
buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
mCurrentPos += frame_size;
mSamplesRead += num_samples;
mCurrentTimeUs = mBasisTimeUs + ((mSamplesRead * 1000000) / sample_rate);
*out = buffer;
return OK;
}

紧接着的工作就交给mAudioSink->open, mAudioSink->start()了,
我们知道这里的mAudioSink是通过MediaPlayerService::Client::setDataSource_pre方法的

sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
//………………………………………….
if (!p->hardwareOutput()) {
Mutex::Autolock l(mLock);
mAudioOutput =
new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
mPid, mAudioAttributes);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
return p;
}

知道了mAudioSink我们就可以分析open方法了。需要注意的是mAudioSink->open 中传入的参数中有个函数指针 AudioPlayer::AudioSinkCallback ,其主要作用就是audioout播放pcm的时候会定期调用此回调函数填充数据,回调函数保存在 mCallback中。这里还有个重要的细节要注意,在构造AudioTrack对象的时候,传入了CallbackWrapper作为audiotrack的callback当audiotrack需要数据的时候,就会调用此函数:

status_t MediaPlayerService::AudioOutput::open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
audio_format_t format, int bufferCount,
AudioCallback cb, void *cookie,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo,
bool doNotReconnect,
uint32_t suggestedFrameCount)
{
sp<AudioTrack> t;
CallbackData *newcbd = NULL;
//将回调函数保存在 mCallback
mCallback = cb;
mCallbackCookie = cookie;
// We don't attempt to create a new track if we are recycling an
// offloaded track. But, if we are recycling a non-offloaded or we
// are switching where one is offloaded and one isn't then we create
// the new track in advance so that we can read additional stream info
if (!(reuse && bothOffloaded)) {
ALOGV("creating new AudioTrack");
if (mCallback != NULL) {
newcbd = new CallbackData(this);
//new 一个AudioTrack
t = new AudioTrack(
mStreamType,
sampleRate,
format,
channelMask,
frameCount,
flags,
//audiotrack需要数据的时候,就会调用此函数
CallbackWrapper,
newcbd,
0, // notification frames
mSessionId,
AudioTrack::TRANSFER_CALLBACK,
offloadInfo,
mUid,
mPid,
mAttributes,
doNotReconnect);
} else {
//………………………………………
}
}
mCallbackData = newcbd;
//将 new出来的AudioTrack保存到mTrack中
mTrack = t;
return res;
}

上面有介绍到AudioOut播放pcm的时候会定期调用AudioPlayer::AudioSinkCallback此回调函数填充数据,但是在代码中如果跟踪mCallback你会发现并没有直接对其进行调用,其实对这个方法的相关调用在AudioOutput::CallbackWrapper中完成的。
接下来我们就从CallbackWrapper的注册以及如何调用AudioPlayer::AudioSinkCallback进行对缓存进行操作进行较为细致的分析:

首先我们看下AudioTrack构造方法:
在构造方法中调用了set方法,CallbackWrapper是它的第7个参数。

AudioTrack::AudioTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
offloadInfo, uid, pid, pAttributes, doNotReconnect);
}

我们看到set方法中CallbackWrapper最终被赋给mCbf,同时在其中开启了AudioTrackThread线程。并调用createTrack_l创建IAudioTrack。

status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect)
{
switch (transferType) {
case TRANSFER_DEFAULT:
//........................
case TRANSFER_CALLBACK:
if (cbf == NULL || sharedBuffer != 0) {
return BAD_VALUE;
}
break;
//........................
default:
return BAD_VALUE;
}
mCbf = cbf;
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioTrack
status_t status = createTrack_l();
return NO_ERROR;
}

AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
: Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
{
}

bool AudioTrack::AudioTrackThread::threadLoop()
{
//…………………………………………..
nsecs_t ns = mReceiver.processAudioBuffer();
switch (ns) {
case 0:
return true;
case NS_INACTIVE:
pauseInternal();
return true;
case NS_NEVER:
return false;
case NS_WHENEVER:
// Event driven: call wake() when callback notifications conditions change.
ns = INT64_MAX;
// fall through
default:
LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
pauseInternal(ns);
return true;
}
}

我们接下来看下processAudioBuffer,这是一个十分关键的方法,但是这里我们仅仅关注与mCbf相关的部分。如红色标注的代码段所示:

nsecs_t AudioTrack::processAudioBuffer()
{
if (waitStreamEnd) {
// FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread
// should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
// (and make sure we don't callback for more data while we're stopping).
// This helps with position, marker notifications, and track invalidation.
struct timespec timeout;
timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
timeout.tv_nsec = 0;

status_t status = proxy->waitStreamEndDone(&timeout);
switch (status) {
case NO_ERROR:
case DEAD_OBJECT:
case TIMED_OUT:
mCbf(EVENT_STREAM_END, mUserData, NULL);
{
AutoMutex lock(mLock);
// The previously assigned value of waitStreamEnd is no longer valid,
// since the mutex has been unlocked and either the callback handler
// or another thread could have re-started the AudioTrack during that time.
waitStreamEnd = mState == STATE_STOPPING;
if (waitStreamEnd) {
mState = STATE_STOPPED;
mReleased = 0;
}
}
break;
}
return 0;
}
if (newUnderrun) {
mCbf(EVENT_UNDERRUN, mUserData, NULL);
}
while (loopCountNotifications > 0) {
mCbf(EVENT_LOOP_END, mUserData, NULL);
--loopCountNotifications;
}
if (flags & CBLK_BUFFER_END) {
mCbf(EVENT_BUFFER_END, mUserData, NULL);
}
if (markerReached) {
mCbf(EVENT_MARKER, mUserData, &markerPosition);
}
while (newPosCount > 0) {
size_t temp = newPosition;
mCbf(EVENT_NEW_POS, mUserData, &temp);
newPosition += updatePeriod;
newPosCount--;
}
if (mObservedSequence != sequence) {
mObservedSequence = sequence;
mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
// for offloaded tracks, just wait for the upper layers to recreate the track
if (isOffloadedOrDirect()) {
return NS_INACTIVE;
}
}

size_t reqSize = audioBuffer.size;
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
size_t writtenSize = audioBuffer.size;

// A lot has transpired since ns was calculated, so run again immediately and re-calculate
return 0;
}

从上面代码可以看出在new AudioTrack的时候会启动AudioTrackThread,在AudioTrackThread中的threadLoop会调用mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);等回调方法,从而调用AudioOutput::CallbackWrapper方法,在AudioOutput::CallbackWrapper方法,接下来我们看下AudioOutput::CallbackWrapper方法,在该方法中通过data->getOutput();获得AudioOutput,再通过*me->mCallback来调用AudioPlayer::AudioSinkCallback中对应的回调函数。

void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
//ALOGV("callbackwrapper");
CallbackData *data = (CallbackData*)cookie;
// lock to ensure we aren't caught in the middle of a track switch.
data->lock();
AudioOutput *me = data->getOutput();
AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
switch(event) {
case AudioTrack::EVENT_MORE_DATA: {
size_t actualSize = (*me->mCallback)(
me, buffer->raw, buffer->size, me->mCallbackCookie,
CB_EVENT_FILL_BUFFER);
// Log when no data is returned from the callback.
// (1) We may have no data (especially with network streaming sources).
// (2) We may have reached the EOS and the audio track is not stopped yet.
// Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
// NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
//
// This is a benign busy-wait, with the next data request generated 10 ms or more later;
// nevertheless for power reasons, we don't want to see too many of these.
me->mBytesWritten += actualSize; // benign race with reader.
buffer->size = actualSize;
} break;

case AudioTrack::EVENT_STREAM_END:
// currently only occurs for offloaded callbacks
ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
(*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
me->mCallbackCookie, CB_EVENT_STREAM_END);
break;

case AudioTrack::EVENT_NEW_IAUDIOTRACK :
ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
(*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
break;
case AudioTrack::EVENT_UNDERRUN:
// This occurs when there is no data available, typically
// when there is a failure to supply data to the AudioTrack. It can also
// occur in non-offloaded mode when the audio device comes out of standby.
//
// If an AudioTrack underruns it outputs silence. Since this happens suddenly
// it may sound like an audible pop or glitch.
//
// The underrun event is sent once per track underrun; the condition is reset
// when more data is sent to the AudioTrack.
break;
default:
ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
}
data->unlock();
}

下面是AudioPlayer::AudioSinkCallback的实现部分,我们假设当前传递的事件为CB_EVENT_FILL_BUFFER,这时候将会调用AudioPlayer::fillBuffer来填充缓冲区的数据。
在AudioPlayer::fillBuffer中直接调用err = mSource->read(&mInputBuffer, &options);即调用解码器的mediabuffer来填充数据,这部分代码就不列出了。

// static
size_t AudioPlayer::AudioSinkCallback(
MediaPlayerBase::AudioSink * /* audioSink */,
void *buffer, size_t size, void *cookie,
MediaPlayerBase::AudioSink::cb_event_t event) {
AudioPlayer *me = (AudioPlayer *)cookie;
switch(event) {
case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
return me->fillBuffer(buffer, size);
case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
ALOGV("AudioSinkCallback: stream end");
me->mReachedEOS = true;
me->notifyAudioEOS();
break;
case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
ALOGV("AudioSinkCallback: Tear down event");
me->mObserver->postAudioTearDown();
break;
}
return 0;
}

我们看完open方法的流程,接下来看下start方法:该方法比较简单就只掉调用mTrack->start.

status_t MediaPlayerService::AudioOutput::start()
{
if (mTrack != 0) {
mTrack->setVolume(mLeftVolume, mRightVolume);
mTrack->setAuxEffectSendLevel(mSendLevel);
return mTrack->start();
}
return NO_INIT;
}

我们看下AudioTrack::start在该方法内部调用了 mAudioTrack->start();

status_t AudioTrack::start()
{
State previousState = mState;
if (previousState == STATE_PAUSED_STOPPING) {
mState = STATE_STOPPING;
} else {
mState = STATE_ACTIVE;
}
(void) updateAndGetPosition_l();
sp<AudioTrackThread> t = mAudioTrackThread;
if (t != 0) {
if (previousState == STATE_STOPPING) {
mProxy->interrupt();
} else {
t->resume();
}
} else {
mPreviousPriority = getpriority(PRIO_PROCESS, 0);
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}

status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
status = mAudioTrack->start();
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
}
return status;
}

跟踪源码我们可以看出mAudioTrack是在createTrack_l()中创建的对应代码如下,它是调用audioFlinger->createTrack创建的,通过上述流程Audiotrack启动后就会周期性的调用回调函数从解码器获取数据进行输出。

// must be called with mLock held
status_t AudioTrack::createTrack_l()
{
//……………………………………….
sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
mSampleRate,
mFormat,
mChannelMask,
&temp,
&trackFlags,
mSharedBuffer,
output,
tid,
&mSessionId,
mClientUid,
&status);

//……………………………………….
mAudioTrack = track;
return status;
}

无图无真相,直接上图!再看这个图应该更清晰了吧,要是还有疑问可以给我发邮件。

文章目錄