Stagefright框架解读(—)音视频Playback流程

最后更新于:2022-04-01 14:45:56

转载请注明出处:[](http://blog.csdn.net/guolin_blog/article/details/42238633)[](http://blog.csdn.net/itachi85/article/details/45041923)http://blog.csdn.net/itachi85/article/details/7216639 从Android 2.0,Google引进了Stagefright,并在android2.3时用Stagefright在Android中是以shared library的形式存在(libstagefright.so),其中AwesomePlayer可用來播放video/audio。AwesomePlayer提供許多API,可以让上层的应用用程式(Java/JNI)來呼叫,我在这里简单说明一下video playback的流程(采用的是android2.2的源码)。 在Java中,若要播放一个影片,我們通常会这样写: ~~~ MediaPlayer mp = new MediaPlayer(); mp.setDataSource(PATH_TO_FILE); mp.prepare(); mp.start(); ~~~ 在Stagefright中,会看到如下的处理: 1.将影片文件的绝对路径指定给uri: ~~~ status_t AwesomePlayer::setDataSource( const char *uri, const KeyedVector<String8, String8> *headers) { Mutex::Autolock autoLock(mLock); return setDataSource_l(uri, headers); } status_t AwesomePlayer::setDataSource_l( const char *uri, const KeyedVector<String8, String8> *headers) { reset_l(); mUri = uri; if (headers) { mUriHeaders = *headers; } // The actual work will be done during preparation in the call to // ::finishSetDataSource_l to avoid blocking the calling thread in // setDataSource for any significant time. return OK; } ~~~ 2.启动mQueue: ~~~ status_t AwesomePlayer::prepare() { Mutex::Autolock autoLock(mLock); return prepare_l(); } status_t AwesomePlayer::prepare_l() { if (mFlags & PREPARED) { return OK; } if (mFlags & PREPARING) { return UNKNOWN_ERROR; } mIsAsyncPrepare = false; status_t err = prepareAsync_l(); if (err != OK) { return err; } while (mFlags & PREPARING) { mPreparedCondition.wait(mLock); } return mPrepareResult; } status_t AwesomePlayer::prepareAsync() { Mutex::Autolock autoLock(mLock); if (mFlags & PREPARING) { return UNKNOWN_ERROR; // async prepare already pending } mIsAsyncPrepare = true; return prepareAsync_l(); } status_t AwesomePlayer::prepareAsync_l() { if (mFlags & PREPARING) { return UNKNOWN_ERROR; // async prepare already pending } if (!mQueueStarted) { mQueue.start(); mQueueStarted = true; } mFlags |= PREPARING; mAsyncPrepareEvent = new AwesomeEvent( this, &AwesomePlayer::onPrepareAsyncEvent); mQueue.postEvent(mAsyncPrepareEvent); return OK; } ~~~ 3.onprepareAsyncEvent被触发,根据传来文件的header来创建相应的解析器,并初始化音视频解码器: ~~~ void AwesomePlayer::onPrepareAsyncEvent() { sp<Prefetcher> prefetcher; { Mutex::Autolock autoLock(mLock); if (mFlags & PREPARE_CANCELLED) { LOGI("prepare was cancelled before doing anything"); abortPrepare(UNKNOWN_ERROR); return; } if (mUri.size() > 0) { //在这个方法中创建解析器  <strong>status_t err = finishSetDataSource_l();</strong> if (err != OK) { abortPrepare(err); return; } } if (mVideoTrack != NULL && mVideoSource == NULL) { //初始化视频解码器  <strong>status_t err = initVideoDecoder();</strong> if (err != OK) { abortPrepare(err); return; } } if (mAudioTrack != NULL && mAudioSource == NULL) { //初始化音频解码器  <strong>status_t err = initAudioDecoder(); </strong> if (err != OK) { abortPrepare(err); return; } } prefetcher = mPrefetcher; } if (prefetcher != NULL) { { Mutex::Autolock autoLock(mLock); if (mFlags & PREPARE_CANCELLED) { LOGI("prepare was cancelled before preparing the prefetcher"); prefetcher.clear(); abortPrepare(UNKNOWN_ERROR); return; } } LOGI("calling prefetcher->prepare()"); status_t result = prefetcher->prepare(&AwesomePlayer::ContinuePreparation, this); prefetcher.clear(); if (result == OK) { LOGI("prefetcher is done preparing"); } else { Mutex::Autolock autoLock(mLock); CHECK_EQ(result, -EINTR); LOGI("prefetcher->prepare() was cancelled early."); abortPrepare(UNKNOWN_ERROR); return; } } Mutex::Autolock autoLock(mLock); if (mIsAsyncPrepare) { if (mVideoWidth < 0 || mVideoHeight < 0) { notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0); } else { notifyListener_l(MEDIA_SET_VIDEO_SIZE, mVideoWidth, mVideoHeight); } notifyListener_l(MEDIA_PREPARED); } mPrepareResult = OK; mFlags &= ~(PREPARING|PREPARE_CANCELLED); mFlags |= PREPARED; mAsyncPrepareEvent = NULL; mPreparedCondition.broadcast(); postBufferingEvent_l(); } ~~~ ~~~ status_t AwesomePlayer::finishSetDataSource_l() { sp<DataSource> dataSource; if (!strncasecmp("http://", mUri.string(), 7)) { mConnectingDataSource = new HTTPDataSource(mUri, &mUriHeaders); mLock.unlock(); status_t err = mConnectingDataSource->connect(); mLock.lock(); if (err != OK) { mConnectingDataSource.clear(); LOGI("mConnectingDataSource->connect() returned %d", err); return err; } dataSource = new CachingDataSource( mConnectingDataSource, 64 * 1024, 10); mConnectingDataSource.clear(); } else { dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders); } if (dataSource == NULL) { return UNKNOWN_ERROR; } <strong>sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); </strong> if (extractor == NULL) { return UNKNOWN_ERROR; } dataSource->getDrmInfo(&mDecryptHandle, &mDrmManagerClient); if (mDecryptHandle != NULL && RightsStatus::RIGHTS_VALID != mDecryptHandle->status) { notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_NO_LICENSE); } if (dataSource->flags() & DataSource::kWantsPrefetching) { mPrefetcher = new Prefetcher; } <strong> return setDataSource_l(extractor)</strong>; } ~~~ 4.使用extractor对文件进行A/V分离: ~~~ status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) { bool haveAudio = false; bool haveVideo = false; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<MetaData> meta = extractor->getTrackMetaData(i); const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); if (!haveVideo && !strncasecmp(mime, "video/", 6)) { setVideoSource(extractor->getTrack(i)); haveVideo = true; } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) { setAudioSource(extractor->getTrack(i)); haveAudio = true; } if (haveAudio && haveVideo) { break; } } if (!haveAudio && !haveVideo) { return UNKNOWN_ERROR; } mExtractorFlags = extractor->flags(); return OK; } ~~~ 5.将解析后的音视频数据分别交给VideoTrack和AudioTrack: ~~~ void AwesomePlayer::setVideoSource(sp<MediaSource> source) { CHECK(source != NULL); if (mPrefetcher != NULL) { source = mPrefetcher->addSource(source); } mVideoTrack = source; } void AwesomePlayer::setAudioSource(sp<MediaSource> source) {     CHECK(source != NULL);     if (mPrefetcher != NULL) {         source = mPrefetcher->addSource(source);     }     mAudioTrack = source; } ~~~ 6.根据mVideoTrck中的编码类型来选择 video decoder 同理根据mAudioTrack中的编码类型来选择 audio decoder: ~~~ status_t AwesomePlayer::initVideoDecoder() { mVideoSource = OMXCodec::Create( mClient.interface(), mVideoTrack->getFormat(), false, // createEncoder mVideoTrack); if (mVideoSource != NULL) { int64_t durationUs; if (mVideoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) { Mutex::Autolock autoLock(mMiscStateLock); if (mDurationUs < 0 || durationUs > mDurationUs) { mDurationUs = durationUs; } } CHECK(mVideoTrack->getFormat()->findInt32(kKeyWidth, &mVideoWidth)); CHECK(mVideoTrack->getFormat()->findInt32(kKeyHeight, &mVideoHeight)); status_t err = mVideoSource->start(); if (err != OK) { mVideoSource.clear(); return err; } } return mVideoSource != NULL ? OK : UNKNOWN_ERROR; } ~~~ 7.将mVideoEvent放入mQueue中,开始解码播放,并交由mvideoRenderer来画出   audio的数据则交由audioplayer来管理,它最终将解码的数据交给audioTrack并由audioTrack和audioFlinger进行交互,最终将数据交给audio hal层,这个我们以后会做讲解: ~~~ status_t AwesomePlayer::play() { Mutex::Autolock autoLock(mLock); return play_l(); } status_t AwesomePlayer::play_l() { if (mFlags & PLAYING) { return OK; } if (!(mFlags & PREPARED)) { status_t err = prepare_l(); if (err != OK) { return err; } } mFlags |= PLAYING; mFlags |= FIRST_FRAME; bool deferredAudioSeek = false; if (mAudioSource != NULL) { if (mAudioPlayer == NULL) { if (mAudioSink != NULL) { //音频数据由audioplayer进行管理  mAudioPlayer = new AudioPlayer(mAudioSink); mAudioPlayer->setSource(mAudioSource); // We've already started the MediaSource in order to enable // the prefetcher to read its data. //调用audioPlayer的start方法则是调用audioSource对数据进行解码 //并将解码似得数据最终交给audioTrack,并调用audioTrack的start方法与audioFlinger进行交互  status_t err = mAudioPlayer->start( true /* sourceAlreadyStarted */); if (err != OK) { delete mAudioPlayer; mAudioPlayer = NULL; mFlags &= ~(PLAYING | FIRST_FRAME); return err; } delete mTimeSource; mTimeSource = mAudioPlayer; deferredAudioSeek = true; mWatchForAudioSeekComplete = false; mWatchForAudioEOS = true; } } else { mAudioPlayer->resume(); } postCheckAudioStatusEvent_l(); } if (mTimeSource == NULL && mAudioPlayer == NULL) { mTimeSource = new SystemTimeSource; } if (mVideoSource != NULL) { // Kick off video playback //将mVideoEvent放入queue中  postVideoEvent_l(); } if (deferredAudioSeek) { // If there was a seek request while we were paused // and we're just starting up again, honor the request now. seekAudioIfNecessary_l(); } if (mFlags & AT_EOS) { // Legacy behaviour, if a stream finishes playing and then // is started again, we play from the start... seekTo_l(0); } if (mDecryptHandle != NULL) { int64_t position; getPosition(&position); mDrmManagerClient->setPlaybackStatus(mDecryptHandle, Playback::START, position / 1000); } return OK; }  void AwesomePlayer::postVideoEvent_l(int64_t delayUs) {     if (mVideoEventPending) {         return;     }     mVideoEventPending = true;     mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs); } ~~~ ~~~ void AwesomePlayer::onVideoEvent() { mVideoSource->read(&mVideoBuffer, &options); mVideoRenderer->render(mVideoBuffer); postVideoEvent_l(); } ~~~
';