相关代码文件如下:
代码路径 | 说明 |
---|---|
frameworks/native/cmds/servicemanager/service_manager.c | servicemanager守护进程启动文件 |
frameworks/native/cmds/servicemanager/binder.c | servicemanager在实现时,并没有引用libbinder库,所以将一些和binder驱动交互的函数在该文件中实现 |
启动
int main(int argc, char** argv)
{
struct binder_state *bs;
char *driver;
// 默认binder驱动的设备节点是/dev/binder
if (argc > 1) {
driver = argv[1];
} else {
driver = "/dev/binder";
}
// 打开/dev/binder设备节点
bs = binder_open(driver, 128*1024);
if (!bs) {
// 显然,设备厂商可以有自己的binder驱动
#ifdef VENDORSERVICEMANAGER
ALOGW("failed to open binder driver %s\n", driver);
while (true) {
sleep(UINT_MAX);
}
#else
ALOGE("failed to open binder driver %s\n", driver);
#endif
return -1;
}
// 将servicemanager注册成为binder服务的管家
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
// SELinux相关操作,忽略
...
// 进入循环,不断处理来自client的事件,每个事件由svcmgr_handler()处理
binder_loop(bs, svcmgr_handler);
return 0;
}
打开binder驱动: binder_open()
struct binder_state *binder_open(const char* driver, size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
// 打开设备节点
bs->fd = open(driver, O_RDWR | O_CLOEXEC);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open %s (%s)\n", driver, strerror(errno));
goto fail_open;
}
// 检查驱动的版本号
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr, "binder: kernel driver version (%d) differs from user space version (%d)\n",
vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
goto fail_open;
}
// 建立共享内存区域
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n", strerror(errno));
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
打开操作非常的标准,这里为了和binder驱动高效的交换数据,使用了mmap()。打开的文件句柄保存在了动态分配的binder_state中。
struct binder_state
{
int fd;
void *mapped;
size_t mapsize;
};
成为binder上下文管理者: binder_become_context_manager
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
事件循环: binder_loop()
一切准备工作就绪后,servicemanager进入循环,不停的读取来自驱动的消息,然后处理一个个的事件。
binder读写缓存区
不过在进入binder_loop()之前,先来看看读写binder驱动用的一个最核心数据结构struct binder_read_write,后面我们称该结构为binder读写缓存区。
struct binder_write_read {
binder_size_t write_size;
binder_size_t write_consumed;
binder_uintptr_t write_buffer;
binder_size_t read_size;
binder_size_t read_consumed;
binder_uintptr_t read_buffer;
};
该结构的成员分为读和写两类。对于写缓存区,write_size指定了要写入的字节数,write_consumed为一个返回值,驱动会将实际写入的字节数放到该值,write_buffer指向要写入的数据;对于读缓存区,read_size指定了read_buffer的可用空间,read_consumed由驱动更新,告诉应用read_buffer中实际读到的字节数,read_buffer指向读取缓存区。
binder_loop()
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
// 不写入内容
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
// 告诉驱动开始进入循环
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
// 写buffer没有空间,读buffer有32字节的空间,所以下面ioctl()调用只会读取到数据
bwr.read_size = sizeof(readbuf);
// 驱动将会将实际放入读buffer中的字节数记录到read_consumed中
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
// 阻塞方式读binder设备,当有client要和servicemanager通信时将会收到可读信息
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
// 解析并处理读取到的命令,核心事件处理函数func为svcmgr_handler
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
来源:CSDN
作者:fanxiaoyu321
链接:https://blog.csdn.net/fanxiaoyu321/article/details/104046299