1. 程式人生 > >Android跨程序通訊Binder原理分析(二)

Android跨程序通訊Binder原理分析(二)

文章目錄

1 Binder原始碼分析

1.1 Service的註冊流程

    Android 系統中將大量的核心服務以 Service 的形式來對外提供,Service 只有註冊到 ServiceManager 後,Client 端才能通過 ServiceManager 獲取到 Service 的代理物件,從而使用到 Service 提供的服務。下面以MediaServer為例來分析Service 的註冊流程。先看一下MediaServer的main方法。

// frameworks/av/media/mediaserver/main_mediaserver.cpp
int main(int argc __unused, char **argv __unused)
{
    signal(SIGPIPE, SIG_IGN);

    sp<ProcessState> proc(ProcessState::self());                  <1>
    sp<IServiceManager> sm(defaultServiceManager());              <2>
    ALOGI
("ServiceManager: %p", sm.get()); InitializeIcuOrDie(); MediaPlayerService::instantiate(); ResourceManagerService::instantiate(); registerExtensions(); ProcessState::self()->startThreadPool(); IPCThreadState::self()->joinThreadPool(); } // frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
void MediaPlayerService::instantiate() { defaultServiceManager()->addService( String16("media.player"), new MediaPlayerService()); <3> }

<1> 首先呼叫了ProcessState::self()函式

// frameworks/native/libs/binder/ProcessState.cpp
sp<ProcessState> ProcessState::self()
{
    Mutex::Autolock _l(gProcessMutex);
    if (gProcess != NULL) {
        return gProcess;
    }
    gProcess = new ProcessState("/dev/binder");
    return gProcess;
}

ProcessState::self()建立了一個singleton的ProcessState

// frameworks/native/libs/binder/ProcessState.cpp
ProcessState::ProcessState(const char *driver)
    : mDriverName(String8(driver))
    , mDriverFD(open_driver(driver))      // 呼叫open_driver開啟裝置節點“/dev/binder”
    , mVMStart(MAP_FAILED)
    , mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
    , mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
    , mExecutingThreadsCount(0)
    , mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
    , mStarvationStartTimeMs(0)
    , mManagesContexts(false)
    , mBinderContextCheckFunc(NULL)
    , mBinderContextUserData(NULL)
    , mThreadPoolStarted(false)
    , mThreadPoolSeq(1)
{
    if (mDriverFD >= 0) {
        // mmap the binder, providing a chunk of virtual address space to receive transactions.
        // 這裡的BINDER_VM_SIZE = ((1*1024*1024) - (4096 *2));也就是1M-8K,在使用Binder傳輸資料的時候不應該超過這個大小的限制
        mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
        if (mVMStart == MAP_FAILED) {
            // *sigh*
            ALOGE("Using %s failed: unable to mmap transaction memory.\n", mDriverName.c_str());
            close(mDriverFD);
            mDriverFD = -1;
            mDriverName.clear();
        }
    }

    LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened.  Terminating.");
}

static int open_driver(const char *driver)
{
    int fd = open(driver, O_RDWR | O_CLOEXEC);
    if (fd >= 0) {
        int vers = 0;
        status_t result = ioctl(fd, BINDER_VERSION, &vers);
        if (result == -1) {
            ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
            close(fd);
            fd = -1;
        }
        if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
          ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
                vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
            close(fd);
            fd = -1;
        }
        size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
        result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads); // 設定Binder的最大執行緒數DEFAULT_MAX_BINDER_THREADS = 15
        if (result == -1) {
            ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
        }
    } else {
        ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
    }
    return fd;
}

在ProcessState的建構函式中首先呼叫open_driver開啟裝置節點"/dev/binder",之後呼叫函式mmap(),它通過對映的方式使程序的使用者空間和核心空間訪問的是同一塊實體記憶體,從而減少了一次拷貝過程(Binder一次資料拷貝見《Android跨程序通訊Binder原理分析(一)》)。

<2> 接下來分析函式defaultservicemanager()

// frameworks/native/libs/binder/IServiceManager.cpp
sp<IServiceManager> defaultServiceManager()
{
    if (gDefaultServiceManager != NULL) return gDefaultServiceManager;

    {
        AutoMutex _l(gDefaultServiceManagerLock);
        while (gDefaultServiceManager == NULL) {
            gDefaultServiceManager = interface_cast<IServiceManager>(
                ProcessState::self()->getContextObject(NULL));
            if (gDefaultServiceManager == NULL)   // gDefaultServiceManager == NULL說名ServiceManager還未初始化完成
                sleep(1);
        }
    }

    return gDefaultServiceManager;
}

在defaultServiceManager()中首先呼叫了ProcessState::self()->getContextObject(NULL)獲取IBinder物件,然後呼叫interface_cast()函式將IBinder物件轉換成IServiceManager物件(interface_cast見《Android跨程序通訊Binder原理分析(一)》)

// frameworks/native/libs/binder/ProcessState.cpp
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);         //規定ServiceManager的handle值為0
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;
    ...
    b = new BpBinder(handle); // 這個handle值會儲存在BpBinder的成員變數mHandle中,在呼叫IPCThreadState的transact函式時會作為引數使用
    e->binder = b;
    if (b) e->refs = b->getWeakRefs();
    result = b;
    ...        
    return result;
}

這裡建立了BpBinder物件handle的值為0代表這個BpBinder是ServiceManager的代理端。
<3> 接下來分析defaultServiceManager()->addService()函式

// frameworks/native/libs/binder/IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service,
        bool allowIsolated)
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    data.writeStrongBinder(service);
    data.writeInt32(allowIsolated ? 1 : 0);
    status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
    return err == NO_ERROR ? reply.readExceptionCode() : err;
}

通過 remote 函式得到儲存在 BpRefBase 中的成員變數mRemote中 ,這裡mRemote就是BpBinder物件,然後呼叫其 transact 函式(mRemote見《Android跨程序通訊Binder原理分析(一)》)

//frameworks/native/libs/binder/BpBinder.cpp
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags); // mHandle的值為0
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

IPCThreadState是程序中真正幹活的“夥計”具體實現程式碼如下:

// frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;
    }
    ...

    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

這裡建立了IPCThreadState物件,接下來我們分析IPCThreadState的建構函式

// frameworks/native/libs/binder/IPCThreadState.cpp
IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
#ifdef OUTPUT_DEBUG_LOG
    mWriteHistoryIndex = 0;
    mReadHistoryIndex = 0;
    for (int i = 0; i < CMD_HISTORY_SIZE; i++) {
        mWriteHistory[i].when.tv_sec = 0;
        mWriteHistory[i].size = 0;
        mReadHistory[i].when.tv_sec = 0;
        mReadHistory[i].size = 0;
    }
#endif

由此可見每一個執行緒都有一個IPCThreadState,每個IPCThreadState中都有一個mIn和mOut,其中mIn是用來接收Binder裝置資料的,而mOut則是用來儲存發往Binder裝置資料的。
傳輸工作是很辛苦的,BpBinder的transact()呼叫了IPCThreadState的transact()函式,這個函式實際完成了與Binder通訊的工作,我們看看IPCThreadState的transact()函式的實現。

// frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ...
    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
            (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    ...
    if (reply) {
        err = waitForResponse(reply);
    } else {
        Parcel fakeReply;
        err = waitForResponse(&fakeReply);
    }
    ....
}

IPCThreadState函式負責與Binder驅動進行互動,具體的互動過程這裡就先不進行分析了,感興趣的可以去看看程式碼。這裡我們只需要知道在建立Service的時候,在Binder驅動中會建立一個flat_binder_object結構體,該結構體中儲存著一個handle變數。
Android 中每個Service都有一個handle(根據建立時間遞增,handle = 0的是ServiceManager),Binder Driver可以通過這個handle的值去找到指定的Service;

#bionic/libc/kernel/uapi/linux/android/binder.h
struct flat_binder_object {
 unsigned long type;
 unsigned long flags;
 union {
    void *binder;
    signed long handle;
 };
 void *cookie;
};

到此為止就完成了MediaServer的註冊工作。

1.2 Service的獲取流程

接下來分析MediaServer的Client端是如何連結到MediaServer的Server端的,我們先看下面的程式碼

// frameworks/av/media/libmedia/mediametadataretriever.cpp
const sp<IMediaPlayerService> MediaMetadataRetriever::getService()
{
    Mutex::Autolock lock(sServiceLock);
    if (sService == 0) {
        sp<IServiceManager> sm = defaultServiceManager();
        sp<IBinder> binder;
        do {
            binder = sm->getService(String16("media.player"));     <1>
            if (binder != 0) {
                break;
            }
            ALOGW("MediaPlayerService not published, waiting...");
            usleep(500000); // 0.5 s
        } while (true);
        if (sDeathNotifier == NULL) {
            sDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(sDeathNotifier);
        //interface_cast函式在Android跨程序通訊Binder原理分析(一)分析過,他能夠將IBinder物件轉為IMediaPlayerService
        sService = interface_cast<IMediaPlayerService>(binder);  
    }
    ALOGE_IF(sService == 0, "no MediaPlayerService!?");
    return sService;
}

<1> 這裡呼叫了ServiceManager的getService()函式,引數為"media.player"與之前addService()傳的引數是一樣的。看getService()的實現

// frameworks/native/libs/binder/IServiceManager.cpp
virtual sp<IBinder> getService(const String16& name) const
{
    unsigned n;
    for (n = 0; n < 5; n++){
        if (n > 0) {
            if (!strcmp(ProcessState::self()->getDriverName().c_str(), "/dev/vndbinder")) {
                ALOGI("Waiting for vendor service %s...", String8(name).string());
                CallStack stack(LOG_TAG);
            } else {
                ALOGI("Waiting for service %s...", String8(name).string());
            }
            sleep(1);
        }
        sp<IBinder> svc = checkService(name);    
        if (svc != NULL) return svc;
    }
    return NULL;
}

virtual sp<IBinder> checkService( const String16& name) const
{
    Parcel data, reply;
    data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
    data.writeString16(name);
    remote()->transact(CHECK_SERVICE_TRANSACTION, data, &reply);
    return reply.readStrongBinder();
}

getService()函式呼叫了內部的checkService(),在checkService()中把”media.player“封裝在Parcel物件裡之後呼叫remote()->transact(),該函式在分析addService()的時候分析過,只不過這裡傳的引數不同為CHECK_SERVICE_TRANSACTION。transact()會和Binder Driver進行互動並將返回結果儲存在reply中,下面分析函式readStrongBinder()。

// frameworks/native/libs/binder/Parcel.cpp
status_t Parcel::readStrongBinder(sp<IBinder>* val) const
{
    status_t status = readNullableStrongBinder(val);
    if (status == OK && !val->get()) {
        status = UNEXPECTED_NULL;
    }
    return status;
}

status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
{
    return unflatten_binder(ProcessState::self(), *this, val);
}

這裡只是一些轉呼叫,接著看unflatten_binder函式

// frameworks/native/libs/binder/Parcel.cpp
status_t unflatten_binder(const sp<ProcessState>& proc,
    const Parcel& in, sp<IBinder>* out)
{
    const flat_binder_object* flat = in.readObject(false);
    ...case BINDER_TYPE_HANDLE:
                *out = proc->getStrongProxyForHandle(flat->handle);
                return finish_unflatten_binder(
                    static_cast<BpBinder*>(out->get()), *flat, in);
        }        
    }
    return BAD_TYPE;
}

這裡出現了flat_binder_object結構體,前面addService()時會初始化這個結構體,這裡用到了flat_binder_object結構體中的handle,接下來看getStrongProxyForHandle函式

// frameworks/native/libs/binder/ProcessState.cpp
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        IBinder* b = e->binder;
        if (