1. 程式人生 > >Android Binder機制——ServiceManager的啟動

Android Binder機制——ServiceManager的啟動

基於Android 7.0原始碼,分析ServiceManager的啟動過程。

binder驅動的初始化是binder_init函式

一、概述

  ServiceManager是整個Binder IPC通訊過程中的守護程序,本身也是一個Binder服務,但並沒有採用libbinder中的多執行緒模型來與Binder驅動通訊,而是自行編寫了binder.c直接和Binder驅動來通訊,並且只有一個迴圈binder_loop來進行讀取和處理事務,這樣的好處是簡單而高效。
  ServiceManager本身工作相對並不複雜,主要就兩個工作:查詢和註冊服務。
  ServiceManager模組的程式碼位於frameworks\native\cmds\servicemanager檔案目錄下。具體的內容可以參考該目錄下的Android.mk檔案。ServiceManager的入口函式是位於frameworks\native\cmds\servicemanager\service_manager.c檔案中的main函式。

int main()
{
    struct binder_state *bs;
    bs = binder_open(128*1024); // 開啟binder驅動


    if (binder_become_context_manager(bs)) { // 成為上下文管理者
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }


    binder_loop(bs, svcmgr_handler); // 進入無限迴圈,處理客戶端發來的請求。萬分注意svcmgr_handler函式指標
return 0; }

1. 流程圖

流程圖

2. 時序圖

ServiceManager的啟動.png

二、流程分析

1. binder_open

[===>frameworks\native\cmds\servicemanager\binder.c]

struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));//分配記憶體
    if (!bs) {
        errno = ENOMEM;
        return
NULL; } bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);//開啟binder裝置驅動 if (bs->fd < 0) { ... goto fail_open; } if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) || (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {//通過ioctl獲取驅動版本資訊 ... goto fail_open; } bs->mapsize = mapsize;//mapsize為128*1024 bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); if (bs->mapped == MAP_FAILED) { ... goto fail_map; } return bs; fail_map: close(bs->fd); fail_open: free(bs); return NULL; }

  這裡通過malloc分配記憶體,open開啟binder裝置驅動,ioctl獲取binder版本資訊,mmap對映記憶體,這裡暫且不糾結上面的四個系統呼叫時如何執行原理,先只要知道open、ioctl和mmap最後分別都會呼叫到binder驅動(檔案位於kernel\drivers\staging\android\binder.c)中的binder_open、binder_ioctl和binder_mmap。
  上面函式返回的是一個binder_state結構體,來看看在執行完此處的binder_open後返回的binder_state結構體的資料是如何。

binder_state結構體 數值 備註
int fd /dev/binder /dev/binder裝置檔案描述符
void* mmap mmap(NULL,mapsize,PROT_READ,MAP_PRIVATE,bs->fd,0) 裝置檔案/dev/binder對映到程序空間的起始地址
int mapsize 128*1024 上述記憶體對映空間的大小

2. binder_become_context_manager

[===>frameworks\native\cmds\servicemanager\binder.c]

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

  成為整個系統唯一的一個上下文管理者,這裡只是將BINDER_SET_CONTEXT_MGR傳給binder驅動,這裡的ioctl由binder驅動的binder_ioctl繼續處理。

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;

    ...

    switch (cmd) {
    ...
    case BINDER_SET_CONTEXT_MGR:
        ret = binder_ioctl_set_ctx_mgr(filp);
        if (ret)
            goto err;
        ret = security_binder_set_context_mgr(proc->tsk);
        if (ret < 0)
            goto err;
        break;
    ...
    }
    ...
}

  這裡又繼續把工作設定上下文管理者交給binder_ioctl_set_ctx_mgr。

static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    kuid_t curr_euid = current_euid();

    if (binder_context_mgr_node != NULL) {
        pr_err("BINDER_SET_CONTEXT_MGR already set\n");
        ret = -EBUSY;
        goto out;
    }
    ...
    binder_context_mgr_node = binder_new_node(proc, 0, 0);
    if (binder_context_mgr_node == NULL) {
        ret = -ENOMEM;
        goto out;
    }
    binder_context_mgr_node->local_weak_refs++;
    binder_context_mgr_node->local_strong_refs++;
    binder_context_mgr_node->has_strong_ref = 1;
    binder_context_mgr_node->has_weak_ref = 1;
out:
    return ret;
}

  這裡建立生成一個binder_node節點,並將剛建立的binder_node儲存在靜態變數binder_context_mgr_node中。

3. binder_loop

[===>framework/native/cmds/servicemanager/binder.c]

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));//讓service_manager進入無限迴圈

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//不斷讀寫binder裝置

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

  注意,根據前文可以知道,這裡的func為svcmgr_handler函式指標。
  函式開始就例項化了binder_write_read結構體,其結構體資料如下:

binder_write_read結構體 資料型別 備註
write_size binder_size_t 寫入binder裝置節點的資料size大小
write_consumed binder_size_t
write_buffer binder_uintptr_t 將要寫入binder裝置節點的資料緩衝區
read_size binder_size_t 讀取binder裝置節點的資料size大小
read_consumed binder_size_t
read_buffer binder_uintptr_t 從binder裝置節點讀取的資料緩衝區

  binder_write_read結構體的作用相當於一個用於binder讀寫的緩衝區管理者。當需要向binder驅動讀資料時,需要將與”write”有關的成員置為0,讀取到的資料內容儲存到read_buffer緩衝區,讀到的資料大小存入read_size;當需要想binder驅動寫資料時,需要將與”read”有關的成員置為0,寫入binder驅動的資料內容和資料大小分別由write_buffer和write_size指定。

3.1 BC_ENTER_LOOPER

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

3.1.1 binder_write

[===>frameworks\native\cmds\servicemanager\binder.c]

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        ...
    }
    return res;
}

  在binder_write中就利用了binder_write_read結構體,將與“read”相關的成員置0,將BC_ENTERN_LOOPER指令通過BINDER_WRITE_READ指令傳給binder驅動端。

3.1.2 binder_ioctl

[===>kernel\drivers\android\binder.c]

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    struct binder_proc *proc = filp->private_data;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;//對應上部分的從使用者空間傳入的binder_write_read結構體

    ...
    thread = binder_get_thread(proc);
    if (thread == NULL) {
        ret = -ENOMEM;
        goto err;
    }

    switch (cmd) {
    case BINDER_WRITE_READ:
        ret = binder_ioctl_write_read(filp, cmd, arg, thread);
        if (ret)
            goto err;
        break;
    ...
    }
    ...
}

  這裡的函式引數arg包含著從使用者空間傳入的binder_write_read結構體資料,其“read”部分為0,“write”部分包含BC_ENTER_LOOPER指令。

3.1.3 binder_ioctl_write_read

[===>kernel\drivers\android\binder.c]

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//從使用者空間傳入的binder_write_read結構體拷貝至核心空間
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);

    if (bwr.write_size > 0) {
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) {
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    if (bwr.read_size > 0) {
        ...
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

  在從使用者空間拷貝獲取到binder_write_read結構體資料後,其binder_write_read結構體“write”資料不為0且包含“BC_ENTER_LOOPER”,其“read”資料為0,所以會執行binder_thread_write。

3.1.4 binder_thread_write

[===>kernel\drivers\android\binder.c]

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error == BR_OK) {
        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        trace_binder_command(cmd);
        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
            binder_stats.bc[_IOC_NR(cmd)]++;
            proc->stats.bc[_IOC_NR(cmd)]++;
            thread->stats.bc[_IOC_NR(cmd)]++;
        }
        switch (cmd) {
        ...
        case BC_ENTER_LOOPER:
            binder_debug(BINDER_DEBUG_THREADS,
                     "%d:%d BC_ENTER_LOOPER\n",
                     proc->pid, thread->pid);
            if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
                thread->looper |= BINDER_LOOPER_STATE_INVALID;
                binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
                    proc->pid, thread->pid);
            }
            thread->looper |= BINDER_LOOPER_STATE_ENTERED;
            break;
        ...
    }
    return 0;
}

  去除掉其他無關指令,只留下“BC_ENTER_LOOPER指令後,就是很簡單的給binder_thread類似的thread節點的looper狀態增加了BINDER_LOOPER_STATE_ENTERED位,以表示thread描述的執行緒已經進入到了looper迴圈。

3.2 BINDER_READ_WRITE

        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

  binder_write_read結構體“write”有關資料為0,“read”有關部分不為0。這部分的過程和3.1大致相同,只不過具體走的程式碼有所差異。

3.2.1 binder_ioctl

  同3.1.2

3.2.2 binder_ioctl_write_read

[===>kernel\drivers\android\binder.c]

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

    if (size != sizeof(struct binder_write_read)) {
        ret = -EINVAL;
        goto out;
    }
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//從使用者空間傳入的binder_write_read結構體拷貝至核心空間
        ret = -EFAULT;
        goto out;
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d write %lld at %016llx, read %lld at %016llx\n",
             proc->pid, thread->pid,
             (u64)bwr.write_size, (u64)bwr.write_buffer,
             (u64)bwr.read_size, (u64)bwr.read_buffer);

    if (bwr.write_size > 0) {
        ...
    }
    if (bwr.read_size > 0) {
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
                     bwr.read_size,
                     &bwr.read_consumed,
                     filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
        if (ret < 0) {
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    binder_debug(BINDER_DEBUG_READ_WRITE,
             "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
             proc->pid, thread->pid,
             (u64)bwr.write_consumed, (u64)bwr.write_size,
             (u64)bwr.read_consumed, (u64)bwr.read_size);
    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

  此

3.2.3 binder_thread_read

  這部分表示沒有看懂。只明白最終把讀取到的資料先儲存到了核心空間的binder_write_read結構體中,然後通過copy_to_user拷貝到了使用者空間的binder_write_read結構體中。

3.3 binder_parse

[===>frameworks\native\cmds\servicemanager\binder.c]

        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);

  在獲取到從核心空間拷貝而來的binder_write_read結構體後,該結構體物件read_buffer儲存著讀取到的資料,隨後就通過binder_parse解析這些資料。
  在這個場景下,在傳入res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);中bwr的“read”和“write”沒有含有任何“BC_XXX”碼,因此傳入binder_parse中的readbuff並沒有包含“BR_XXX”碼,因此binder_parse應該是直接返回,不做處理了。

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        case BR_NOOP:
            break;
        case BR_TRANSACTION_COMPLETE:
            break;
        case BR_INCREFS:
        case BR_ACQUIRE:
        case BR_RELEASE:
        case BR_DECREFS:
#if TRACE
            fprintf(stderr,"  %p, %p\n", (void *)ptr, (void *)(ptr + sizeof(void *)));
#endif
            ptr += sizeof(struct binder_ptr_cookie);
            break;
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                res = func(bs, txn, &msg, &reply);
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            ptr += sizeof(*txn);
            break;
        }
        case BR_REPLY: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: reply too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (bio) {
                bio_init_from_txn(bio, txn);
                bio = 0;
            } else {
                /* todo FREE BUFFER */
            }
            ptr += sizeof(*txn);
            r = 0;
            break;
        }
        case BR_DEAD_BINDER: {
            struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
            ptr += sizeof(binder_uintptr_t);
            death->func(bs, death->ptr);
            break;
        }
        case BR_FAILED_REPLY:
            r = -1;
            break;
        case BR_DEAD_REPLY:
            r = -1;
            break;
        default:
            ALOGE("parse: OOPS %d\n", cmd);
            return -1;
        }
    }

    return r;
}

  回到binder_loop函式。

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        ...

        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        ...
    }
}

  這裡會無限迴圈讀取binder驅動傳來的資料,然後解析,通過解析資料,在合適的情況下就會回撥svcmgr_handler函式。那麼什麼時候是所謂的“合適的情況”呢?後面其他的binder的文章中以具體的用法來分析。

int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;

    //ALOGI("target=%p code=%d pid=%d uid=%d\n",
    //      (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);

    if (txn->target.ptr != BINDER_SERVICE_MANAGER)
        return -1;

    if (txn->code == PING_TRANSACTION)
        return 0;

    // Equivalent to Parcel::enforceInterface(), reading the RPC
    // header with the strict mode policy mask and the interface name.
    // Note that we ignore the strict_policy and don't propagate it
    // further (since we do no outbound RPCs anyway).
    strict_policy = bio_get_uint32(msg);
    s = bio_get_string16(msg, &len);
    if (s == NULL) {
        return -1;
    }

    if ((len != (sizeof(svcmgr_id) / 2)) ||
        memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
        fprintf(stderr,"invalid id %s\n", str8(s, len));
        return -1;
    }

    if (sehandle && selinux_status_updated() > 0) {
        struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
        if (tmp_sehandle) {
            selabel_close(sehandle);
            sehandle = tmp_sehandle;
        }
    }

    switch(txn->code) {
    case SVC_MGR_GET_SERVICE:
    case SVC_MGR_CHECK_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
        if (!handle)
            break;
        bio_put_ref(reply, handle);
        return 0;

    case SVC_MGR_ADD_SERVICE:
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;

    case SVC_MGR_LIST_SERVICES: {
        uint32_t n = bio_get_uint32(msg);

        if (!svc_can_list(txn->sender_pid)) {
            ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
                    txn->sender_euid);
            return -1;
        }
        si = svclist;
        while ((n-- > 0) && si)
            si = si->next;
        if (si) {
            bio_put_string16(reply, si->name);
            return 0;
        }
        return -1;
    }
    default:
        ALOGE("unknown code %d\n", txn->code);
        return -1;
    }

    bio_put_uint32(reply, 0);
    return 0;
}