2025-02-17

在最初的代碼testWriteByte中,創建完AudioTrack對象後,調用瞭AudioTrack對象的write函數實現播放。
今天就來看看write函數的實現。


*****************************************源碼*************************************************
    public int write(byte[] audioData,int offsetInBytes, int sizeInBytes) {
        if ((mDataLoadMode == MODE_STATIC)
                && (mState == STATE_NO_STATIC_DATA)
                && (sizeInBytes > 0)) {
            mState = STATE_INITIALIZED;
        }


        if (mState != STATE_INITIALIZED) {
            return ERROR_INVALID_OPERATION;
        }


        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
                || (offsetInBytes + sizeInBytes > audioData.length)) {
            return ERROR_BAD_VALUE;
        }


        return native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat);
    }
**********************************************************************************************
源碼路徑:
frameworks\base\media\java\android\media\AudioTrack.java


#################說明################################################
    /**
     * Writes the audio data to the audio hardware for playback.
     * @param audioData the array that holds the data to play.
     * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
     *    starts.
     * @param sizeInBytes the number of bytes to read in audioData after the offset.
     * @return the number of bytes that were written or {@link #ERROR_INVALID_OPERATION}
     *    if the object wasn't properly initialized, or {@link #ERROR_BAD_VALUE} if
     *    the parameters don't resolve to valid data and indexes.
     */
// 先看看註釋,有一點需要註意,offsetInBytes是指要播放的數據是從參數audioData的哪個地方開始
    public int write(byte[] audioData,int offsetInBytes, int sizeInBytes) {
        if ((mDataLoadMode == MODE_STATIC)
                && (mState == STATE_NO_STATIC_DATA)
                && (sizeInBytes > 0)) {
            mState = STATE_INITIALIZED;
        }


        if (mState != STATE_INITIALIZED) {
            return ERROR_INVALID_OPERATION;
        }


        if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
                || (offsetInBytes + sizeInBytes > audioData.length)) {
            return ERROR_BAD_VALUE;
        }


// 前面主要檢查瞭狀態及參數,真正幹活的在native中。
        return native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
路徑:frameworks\base\core\jni\android_media_AudioTrack.cpp
對應的native側的函數為android_media_AudioTrack_native_write,其實現如下:
static jint android_media_AudioTrack_native_write(JNIEnv *env,  jobject thiz,
                                                  jbyteArray javaAudioData,
                                                  jint offsetInBytes, jint sizeInBytes,
                                                  jint javaAudioFormat) {
    jbyte* cAudioData = NULL;
    AudioTrack *lpTrack = NULL;
    //LOGV("android_media_AudioTrack_native_write(offset=%d, sizeInBytes=%d) called",
    //    offsetInBytes, sizeInBytes);
   
    // get the audio track to load with samples
// 我們創建AudioTrack對象的時間將其保存到瞭java側,
// 現在要使用它瞭,所以把它取出來
    lpTrack = (AudioTrack *)env->GetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    if (lpTrack == NULL) {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for write()");
        return 0;
    }


    // get the pointer for the audio data from the java array
    if (javaAudioData) {
        cAudioData = (jbyte *)env->GetPrimitiveArrayCritical(javaAudioData, NULL);
        if (cAudioData == NULL) {
            LOGE("Error retrieving source of audio data to play, can't play");
            return 0; // out of memory or no data to load
        }
    } else {
        LOGE("NULL java array of audio data to play, can't play");
        return 0;
    }


    jint written = writeToTrack(lpTrack, javaAudioFormat, cAudioData, offsetInBytes, sizeInBytes);

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
jint writeToTrack(AudioTrack* pTrack, jint audioFormat, jbyte* data,
                  jint offsetInBytes, jint sizeInBytes) {
    // give the data to the native AudioTrack object (the data starts at the offset)
    ssize_t written = 0;
    // regular write() or copy the data to the AudioTrack's shared memory?
// 判斷shareBuffer是否為0.
// 如果是stream模式,shareBuffer為0,即不需要共享內存,因為數據是播放的時候一次一次寫過來的
// 如果是direct模式,需要共享內存,因為數據是開始一次寫過來的,後來再播放的時候,隻是去共享內存中取
    if (pTrack->sharedBuffer() == 0) {
// stream模式的情況下,直接調用AudioTrack對象的write函數。
        written = pTrack->write(data + offsetInBytes, sizeInBytes);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ssize_t AudioTrack::write(const void* buffer, size_t userSize)
{
// 如果存在共享內存的話,說明不應該調到這兒來
    if (mSharedBuffer != 0) return INVALID_OPERATION;


// 不要相信用戶
    if (ssize_t(userSize) < 0) {
        // sanity-check. user is most-likely passing an error code.
        LOGE("AudioTrack::write(buffer=%p, size=%u (%d)",
                buffer, userSize, userSize);
        return BAD_VALUE;
    }


    LOGV("write %p: %d bytes, mActive=%d", this, userSize, mActive);


    ssize_t written = 0;
    const int8_t *src = (const int8_t *)buffer;
    Buffer audioBuffer;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
看看Buffer的實現:
    /* Create Buffer on the stack and pass it to obtainBuffer()
     * and releaseBuffer().
     */


    class Buffer
    {
    public:
        enum {
            MUTE    = 0x00000001
        };
        uint32_t    flags;
        int         channelCount;
        int         format;
        size_t      frameCount;
        size_t      size;
        union {
            void*       raw;
            short*      i16;
            int8_t*     i8;
        };
    };
—————————————————————-


    do {
        audioBuffer.frameCount = userSize/frameSize();


        // Calling obtainBuffer() with a negative wait count causes
        // an (almost) infinite wait time.
// 獲取寫數據用的buffer
        status_t err = obtainBuffer(&audioBuffer, -1);
        if (err < 0) {
            // out of buffers, return #bytes written
            if (err == status_t(NO_MORE_BUFFERS))
                break;
            return ssize_t(err);
        }
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
{
    int active;
    status_t result;
    audio_track_cblk_t* cblk = mCblk;
    uint32_t framesReq = audioBuffer->frameCount;
    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;


    audioBuffer->frameCount  = 0;
    audioBuffer->size = 0;


// audio_track_cblk_t是個什麼東東?其實,它是個蠻重要的東東。
// 之前,我們也有看到過。今天找一下它的準確誕生地
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
audio_track_cblk_t對象是在創建AudioTrack的時候創建的。
如果調到AudioTrack的構造函數,就不再說瞭。AudioTrack構造函數之後的調用關系如下:
1、AudioTrack的構造函數調用瞭函數AudioTrack::set。
    mStatus = set(streamType, sampleRate, format, channels,
            0, flags, cbf, user, notificationFrames,
            sharedBuffer, false, sessionId);
2、函數AudioTrack::set調用瞭函數AudioTrack::createTrack。
    // create the IAudioTrack
    status_t status = createTrack(streamType, sampleRate, format, channelCount,
                                  frameCount, flags, sharedBuffer, output, true);
3、函數AudioTrack::createTrack調用瞭函數AudioFlinger::createTrack。
    sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
                                                      streamType,
                                                      sampleRate,
                                                      format,
                                                      channelCount,
                                                      frameCount,
                                                      ((uint16_t)flags) << 16,
                                                      sharedBuffer,
                                                      output,
                                                      &mSessionId,
                                                      &status);
並對成員變量mCblk進行賦值。
    mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
    mCblk->flags |= CBLK_DIRECTION_OUT;
4、函數AudioFlinger::createTrack調用瞭函數AudioFlinger::PlaybackThread::createTrack_l。
        track = thread->createTrack_l(client, streamType, sampleRate, format,
                channelCount, frameCount, sharedBuffer, lSessionId, &lStatus);
5、函數AudioFlinger::PlaybackThread::createTrack_l中創建瞭AudioFlinger::PlaybackThread::Track對象。
        track = new Track(this, client, streamType, sampleRate, format,
                channelCount, frameCount, sharedBuffer, sessionId);
6、類AudioFlinger::PlaybackThread::Track,是類AudioFlinger::ThreadBase::TrackBase的子類。
7、最終的誕生地,在AudioFlinger::ThreadBase::TrackBase的構造函數中
mCblkMemory = client->heap()->allocate(size);
mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
—————————————————————-
    uint32_t framesAvail = cblk->framesAvailable();
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
uint32_t audio_track_cblk_t::framesAvailable()
{
    Mutex::Autolock _l(lock);
    return framesAvailable_l();
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
uint32_t audio_track_cblk_t::framesAvailable_l()
{
    uint64_t u = this->user;
    uint64_t s = this->server;


++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#define CBLK_DIRECTION_MSK      0x0002
#define CBLK_DIRECTION_OUT      0x0002  // this cblk is for an AudioTrack
#define CBLK_DIRECTION_IN       0x0000  // this cblk is for an AudioRecord
—————————————————————-
// 可見,CBLK_DIRECTION_MSK和CBLK_DIRECTION_OUT是相同的
// 判斷CBLK_DIRECTION_MSK,其實也就是判斷CBLK_DIRECTION_OUT。
// 我們是用它來播放的,此處當然是CBLK_DIRECTION_OUT瞭。
    if (flags & CBLK_DIRECTION_MSK) {
        uint64_t limit = (s < loopStart) ? s : loopStart;
        return limit + frameCount – u;
    } else {
        return frameCount + u – s;
    }
}
—————————————————————-
}
—————————————————————-


// 此處會不斷循環,直到framesAvail不為0
    if (framesAvail == 0) {
        cblk->lock.lock();
        goto start_loop_here;
        while (framesAvail == 0) {
            active = mActive;
            if (UNLIKELY(!active)) {
                LOGV("Not active and NO_MORE_BUFFERS");
                cblk->lock.unlock();
                return NO_MORE_BUFFERS;
            }
            if (UNLIKELY(!waitCount)) {
                cblk->lock.unlock();
                return WOULD_BLOCK;
            }
            if (!(cblk->flags & CBLK_INVALID_MSK)) {
                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
            }
            if (cblk->flags & CBLK_INVALID_MSK) {
                LOGW("obtainBuffer() track %p invalidated, creating a new one", this);
                // no need to clear the invalid flag as this cblk will not be used anymore
                cblk->lock.unlock();
                goto create_new_track;
            }
            if (__builtin_expect(result!=NO_ERROR, false)) {
                cblk->waitTimeMs += waitTimeMs;
                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
                    // timing out when a loop has been set and we have already written upto loop end
                    // is a normal condition: no need to wake AudioFlinger up.
                    if (cblk->user < cblk->loopEnd) {
                        LOGW(   "obtainBuffer timed out (is the CPU pegged?) %p "
                                "user=%08llx, server=%08llx", this, cblk->user, cblk->server);
                        //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
                        cblk->lock.unlock();
                        result = mAudioTrack->start();
                        if (result == DEAD_OBJECT) {
                            LOGW("obtainBuffer() dead IAudioTrack: creating a new one");
create_new_track:
                            result = createTrack(mStreamType, cblk->sampleRate, mFormat, mChannelCount,
                                                 mFrameCount, mFlags, mSharedBuffer, getOutput(), false);
                            if (result == NO_ERROR) {
                                cblk = mCblk;
                                cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
                                mAudioTrack->start();
                            }
                        }
                        cblk->lock.lock();
                    }
                    cblk->waitTimeMs = 0;
                }


                if (–waitCount == 0) {
                    cblk->lock.unlock();
                    return TIMED_OUT;
                }
            }
            // read the server count again
        start_loop_here:
            framesAvail = cblk->framesAvailable_l();
        }
        cblk->lock.unlock();
    }


    // restart track if it was disabled by audioflinger due to previous underrun
    if (cblk->flags & CBLK_DISABLED_MSK) {
        cblk->flags &= ~CBLK_DISABLED_ON;
        LOGW("obtainBuffer() track %p disabled, restarting", this);
        mAudioTrack->start();
    }


    cblk->waitTimeMs = 0;


    if (framesReq > framesAvail) {
        framesReq = framesAvail;
    }


    uint64_t u = cblk->user;
    uint64_t bufferEnd = cblk->userBase + cblk->frameCount;


    if (u + framesReq > bufferEnd) {
        framesReq = (uint32_t)(bufferEnd – u);
    }


    audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
    audioBuffer->channelCount = mChannelCount;
    audioBuffer->frameCount = framesReq;
    audioBuffer->size = framesReq * cblk->frameSize;
    if (AudioSystem::isLinearPCM(mFormat)) {
        audioBuffer->format = AudioSystem::PCM_16_BIT;
    } else {
        audioBuffer->format = mFormat;
    }
    audioBuffer->raw = (int8_t *)cblk->buffer(u);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void* audio_track_cblk_t::buffer(uint64_t offset) const
{
    return (int8_t *)this->buffers + (offset – userBase) * this->frameSize;
}
—————————————————————-
    active = mActive;
    return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
—————————————————————-


        size_t toWrite;


        if (mFormat == AudioSystem::PCM_8_BIT && !(mFlags & AudioSystem::OUTPUT_FLAG_DIRECT)) {
            // Divide capacity by 2 to take expansion into account
            toWrite = audioBuffer.size>>1;
            // 8 to 16 bit conversion
            int count = toWrite;
            int16_t *dst = (int16_t *)(audioBuffer.i8);
            while(count–) {
                *dst++ = (int16_t)(*src++^0x80) << 8;
            }
        } else {
            toWrite = audioBuffer.size;
            memcpy(audioBuffer.i8, src, toWrite);
            src += toWrite;
        }
        userSize -= toWrite;
        written += toWrite;


        releaseBuffer(&audioBuffer);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void AudioTrack::releaseBuffer(Buffer* audioBuffer)
{
    audio_track_cblk_t* cblk = mCblk;
    cblk->stepUser(audioBuffer->frameCount);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
uint64_t audio_track_cblk_t::stepUser(uint32_t frameCount)
{
    uint64_t u = this->user;


    u += frameCount;
    // Ensure that user is never ahead of server for AudioRecord
    if (flags & CBLK_DIRECTION_MSK) {
        // If stepServer() has been called once, switch to normal obtainBuffer() timeout period
        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
            bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
        }
    } else if (u > this->server) {
        LOGW("stepServer occured after track reset");
        u = this->server;
    }


    if (u >= userBase + this->frameCount) {
        userBase += this->frameCount;
    }


    this->user = u;


    // Clear flow control error condition as new data has been written/read to/from buffer.
    flags &= ~CBLK_UNDERRUN_MSK;


    return u;
}
—————————————————————-
}
—————————————————————-
    } while (userSize);


    return written;
}
—————————————————————-
    } else {
// direct模式的話,將數據copy到共享內存。
// 註意,如果格式為PCM8,需要做下處理
        if (audioFormat == javaAudioTrackFields.PCM16) {
            // writing to shared memory, check for capacity
            if ((size_t)sizeInBytes > pTrack->sharedBuffer()->size()) {
                sizeInBytes = pTrack->sharedBuffer()->size();
            }
            memcpy(pTrack->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);
            written = sizeInBytes;
        } else if (audioFormat == javaAudioTrackFields.PCM8) {
            // data contains 8bit data we need to expand to 16bit before copying
            // to the shared memory
            // writing to shared memory, check for capacity,
            // note that input data will occupy 2X the input space due to 8 to 16bit conversion
            if (((size_t)sizeInBytes)*2 > pTrack->sharedBuffer()->size()) {
                sizeInBytes = pTrack->sharedBuffer()->size() / 2;
            }
            int count = sizeInBytes;
            int16_t *dst = (int16_t *)pTrack->sharedBuffer()->pointer();
            const int8_t *src = (const int8_t *)(data + offsetInBytes);
            while(count–) {
                *dst++ = (int16_t)(*src++^0x80) << 8;
            }
            // even though we wrote 2*sizeInBytes, we only report sizeInBytes as written to hide
            // the 8bit mixer restriction from the user of this function
            written = sizeInBytes;
        }
    }
    return written;


}
—————————————————————-


    env->ReleasePrimitiveArrayCritical(javaAudioData, cAudioData, 0);


    //LOGV("write wrote %d (tried %d) bytes in the native AudioTrack with offset %d",
    //     (int)written, (int)(sizeInBytes), (int)offsetInBytes);
    return written;
}
—————————————————————-
    }
###################################################################


&&&&&&&&&&&總結&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1、寫播放數據,其實最終寫到瞭一個audio_track_cblk_t結構體中。
2、audio_track_cblk_t結構體在AudioFlinger中的TrackBase類的構造函數中創建。
    創建的時候首先從Client申請一塊內存,然後將內存地址強制轉換成audio_track_cblk_t的指針。
結構體audio_track_cblk_t的最後一個成員便是指向數據的指針。
3、至此,隻是將數據寫到瞭AudioFlinger,AudioFling如何使用這些數據,最終實現播放,還需要繼續學習。
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&

摘自:江風的專欄

發佈留言

發佈留言必須填寫的電子郵件地址不會公開。 必填欄位標示為 *